refactor bufferstream to use a queue (#346)
This change modifies how the backpressure algorithm in bufferstream
works - in particular, instead of working byte-by-byte, it will now work
seq-by-seq.
When data arrives, it usually does so in packets - in the current
bufferstream, the packet is read then split into bytes which are fed one
by one to the bufferstream. On the reading side, the bytes are popped of
the bufferstream, again byte by byte, to satisfy `readOnce` requests -
this introduces a lot of synchronization traffic because the checks for
full buffer and for async event handling must be done for every byte.
In this PR, a queue of length 1 is used instead - this means there will
at most exist one "packet" in `pushTo`, one in the queue and one in the
slush buffer that is used to store incomplete reads.
* avoid byte-by-byte copy to buffer, with synchronization in-between
* reuse AsyncQueue synchronization logic instead of rolling own
* avoid writeHandler callback - implement `write` method instead
* simplify EOF signalling by only setting EOF flag in queue reader (and
reset)
* remove BufferStream pipes (unused)
* fixes drainBuffer deadlock when drain is called from within read loop
and thus blocks draining
* fix lpchannel init order
2020-09-10 06:19:13 +00:00
|
|
|
import unittest
|
2020-06-03 02:21:11 +00:00
|
|
|
import chronos, stew/byteutils
|
2020-06-19 17:29:43 +00:00
|
|
|
import ../libp2p/stream/bufferstream,
|
2020-11-23 15:07:11 +00:00
|
|
|
../libp2p/stream/lpstream,
|
|
|
|
../libp2p/errors
|
2019-09-01 21:51:39 +00:00
|
|
|
|
2020-11-13 03:44:02 +00:00
|
|
|
import ./helpers
|
|
|
|
|
refactor bufferstream to use a queue (#346)
This change modifies how the backpressure algorithm in bufferstream
works - in particular, instead of working byte-by-byte, it will now work
seq-by-seq.
When data arrives, it usually does so in packets - in the current
bufferstream, the packet is read then split into bytes which are fed one
by one to the bufferstream. On the reading side, the bytes are popped of
the bufferstream, again byte by byte, to satisfy `readOnce` requests -
this introduces a lot of synchronization traffic because the checks for
full buffer and for async event handling must be done for every byte.
In this PR, a queue of length 1 is used instead - this means there will
at most exist one "packet" in `pushTo`, one in the queue and one in the
slush buffer that is used to store incomplete reads.
* avoid byte-by-byte copy to buffer, with synchronization in-between
* reuse AsyncQueue synchronization logic instead of rolling own
* avoid writeHandler callback - implement `write` method instead
* simplify EOF signalling by only setting EOF flag in queue reader (and
reset)
* remove BufferStream pipes (unused)
* fixes drainBuffer deadlock when drain is called from within read loop
and thus blocks draining
* fix lpchannel init order
2020-09-10 06:19:13 +00:00
|
|
|
{.used.}
|
2019-10-29 18:51:48 +00:00
|
|
|
|
2019-09-01 21:51:39 +00:00
|
|
|
suite "BufferStream":
|
2020-04-21 01:24:42 +00:00
|
|
|
teardown:
|
2020-11-05 03:52:54 +00:00
|
|
|
# echo getTracker(BufferStreamTrackerName).dump()
|
|
|
|
check getTracker(BufferStreamTrackerName).isLeaked() == false
|
2020-04-21 01:24:42 +00:00
|
|
|
|
2020-11-13 03:44:02 +00:00
|
|
|
asyncTest "push data to buffer":
|
|
|
|
let buff = newBufferStream()
|
|
|
|
check buff.len == 0
|
|
|
|
var data = "12345"
|
|
|
|
await buff.pushData(data.toBytes())
|
|
|
|
check buff.len == 5
|
|
|
|
await buff.close()
|
2020-04-21 01:24:42 +00:00
|
|
|
|
2020-11-13 03:44:02 +00:00
|
|
|
asyncTest "push and wait":
|
|
|
|
let buff = newBufferStream()
|
|
|
|
check buff.len == 0
|
2019-09-01 21:51:39 +00:00
|
|
|
|
2020-11-13 03:44:02 +00:00
|
|
|
let fut0 = buff.pushData("1234".toBytes())
|
|
|
|
let fut1 = buff.pushData("5".toBytes())
|
|
|
|
check buff.len == 4 # the second write should not be visible yet
|
2020-04-21 01:24:42 +00:00
|
|
|
|
2020-11-13 03:44:02 +00:00
|
|
|
var data: array[1, byte]
|
|
|
|
check: 1 == await buff.readOnce(addr data[0], data.len)
|
2019-09-01 21:51:39 +00:00
|
|
|
|
2020-11-13 03:44:02 +00:00
|
|
|
check ['1'] == string.fromBytes(data)
|
|
|
|
await fut0
|
|
|
|
await fut1
|
|
|
|
check buff.len == 4
|
|
|
|
await buff.close()
|
|
|
|
|
|
|
|
asyncTest "read with size":
|
|
|
|
let buff = newBufferStream()
|
|
|
|
check buff.len == 0
|
|
|
|
|
|
|
|
await buff.pushData("12345".toBytes())
|
|
|
|
var data: array[3, byte]
|
|
|
|
await buff.readExactly(addr data[0], data.len)
|
|
|
|
check ['1', '2', '3'] == string.fromBytes(data)
|
|
|
|
await buff.close()
|
|
|
|
|
|
|
|
asyncTest "readExactly":
|
|
|
|
let buff = newBufferStream()
|
|
|
|
check buff.len == 0
|
|
|
|
|
|
|
|
await buff.pushData("12345".toBytes())
|
|
|
|
check buff.len == 5
|
|
|
|
var data: array[2, byte]
|
|
|
|
await buff.readExactly(addr data[0], data.len)
|
|
|
|
check string.fromBytes(data) == ['1', '2']
|
|
|
|
await buff.close()
|
|
|
|
|
|
|
|
asyncTest "readExactly raises":
|
|
|
|
let buff = newBufferStream()
|
|
|
|
check buff.len == 0
|
|
|
|
|
|
|
|
await buff.pushData("123".toBytes())
|
|
|
|
var data: array[5, byte]
|
|
|
|
var readFut = buff.readExactly(addr data[0], data.len)
|
|
|
|
await buff.close()
|
|
|
|
|
|
|
|
expect LPStreamIncompleteError:
|
|
|
|
await readFut
|
|
|
|
|
|
|
|
asyncTest "readOnce":
|
|
|
|
let buff = newBufferStream()
|
|
|
|
check buff.len == 0
|
|
|
|
|
|
|
|
var data: array[3, byte]
|
|
|
|
let readFut = buff.readOnce(addr data[0], data.len)
|
|
|
|
await buff.pushData("123".toBytes())
|
|
|
|
check buff.len == 3
|
|
|
|
|
|
|
|
check (await readFut) == 3
|
|
|
|
check string.fromBytes(data) == ['1', '2', '3']
|
|
|
|
await buff.close()
|
|
|
|
|
|
|
|
asyncTest "reads should happen in order":
|
|
|
|
let buff = newBufferStream()
|
|
|
|
check buff.len == 0
|
|
|
|
|
2020-11-23 15:07:11 +00:00
|
|
|
proc writer1() {.async.} =
|
|
|
|
await buff.pushData("Msg 1".toBytes())
|
|
|
|
await buff.pushData("Msg 2".toBytes())
|
|
|
|
await buff.pushData("Msg 3".toBytes())
|
2020-11-13 03:44:02 +00:00
|
|
|
|
2020-11-23 15:07:11 +00:00
|
|
|
let writerFut1 = writer1()
|
2020-11-13 03:44:02 +00:00
|
|
|
var data: array[5, byte]
|
|
|
|
await buff.readExactly(addr data[0], data.len)
|
|
|
|
|
|
|
|
check string.fromBytes(data) == "Msg 1"
|
|
|
|
|
|
|
|
await buff.readExactly(addr data[0], data.len)
|
|
|
|
check string.fromBytes(data) == "Msg 2"
|
|
|
|
|
|
|
|
await buff.readExactly(addr data[0], data.len)
|
|
|
|
check string.fromBytes(data) == "Msg 3"
|
|
|
|
|
2020-11-23 15:07:11 +00:00
|
|
|
await writerFut1
|
2020-11-13 03:44:02 +00:00
|
|
|
|
2020-11-23 15:07:11 +00:00
|
|
|
proc writer2() {.async.} =
|
|
|
|
await buff.pushData("Msg 4".toBytes())
|
|
|
|
await buff.pushData("Msg 5".toBytes())
|
|
|
|
await buff.pushData("Msg 6".toBytes())
|
2020-11-13 03:44:02 +00:00
|
|
|
|
2020-11-23 15:07:11 +00:00
|
|
|
let writerFut2 = writer2()
|
2020-11-13 03:44:02 +00:00
|
|
|
|
|
|
|
await buff.readExactly(addr data[0], data.len)
|
|
|
|
check string.fromBytes(data) == "Msg 4"
|
|
|
|
|
|
|
|
await buff.readExactly(addr data[0], data.len)
|
|
|
|
check string.fromBytes(data) == "Msg 5"
|
|
|
|
|
|
|
|
await buff.readExactly(addr data[0], data.len)
|
|
|
|
check string.fromBytes(data) == "Msg 6"
|
2020-11-23 15:07:11 +00:00
|
|
|
|
|
|
|
await buff.close()
|
|
|
|
await writerFut2
|
2020-11-13 03:44:02 +00:00
|
|
|
|
|
|
|
asyncTest "small reads":
|
|
|
|
let buff = newBufferStream()
|
|
|
|
check buff.len == 0
|
|
|
|
|
|
|
|
var str: string
|
2020-11-23 15:07:11 +00:00
|
|
|
proc writer() {.async.} =
|
|
|
|
for i in 0..<10:
|
|
|
|
await buff.pushData("123".toBytes())
|
|
|
|
str &= "123"
|
|
|
|
await buff.close() # all data should still be read after close
|
2020-11-13 03:44:02 +00:00
|
|
|
|
|
|
|
var str2: string
|
|
|
|
|
2020-11-23 15:07:11 +00:00
|
|
|
proc reader() {.async.} =
|
|
|
|
var data: array[2, byte]
|
|
|
|
expect LPStreamEOFError:
|
|
|
|
while true:
|
|
|
|
let x = await buff.readOnce(addr data[0], data.len)
|
|
|
|
str2 &= string.fromBytes(data[0..<x])
|
|
|
|
|
|
|
|
|
|
|
|
await allFuturesThrowing(
|
|
|
|
allFinished(reader(), writer()))
|
2020-11-13 03:44:02 +00:00
|
|
|
check str == str2
|
|
|
|
await buff.close()
|
|
|
|
|
2020-11-17 14:59:25 +00:00
|
|
|
asyncTest "read all data after eof":
|
|
|
|
let buff = newBufferStream()
|
|
|
|
check buff.len == 0
|
|
|
|
|
|
|
|
await buff.pushData("12345".toBytes())
|
|
|
|
var data: array[2, byte]
|
|
|
|
check: (await buff.readOnce(addr data[0], data.len)) == 2
|
|
|
|
|
|
|
|
await buff.pushEof()
|
|
|
|
|
|
|
|
check:
|
|
|
|
not buff.atEof()
|
|
|
|
(await buff.readOnce(addr data[0], data.len)) == 2
|
|
|
|
not buff.atEof()
|
|
|
|
(await buff.readOnce(addr data[0], data.len)) == 1
|
|
|
|
buff.atEof()
|
|
|
|
# exactly one 0-byte read
|
|
|
|
(await buff.readOnce(addr data[0], data.len)) == 0
|
|
|
|
|
|
|
|
expect LPStreamEOFError:
|
|
|
|
discard (await buff.readOnce(addr data[0], data.len))
|
|
|
|
|
|
|
|
await buff.close() # all data should still be read after close
|
|
|
|
|
|
|
|
asyncTest "read more data after eof":
|
|
|
|
let buff = newBufferStream()
|
|
|
|
check buff.len == 0
|
|
|
|
|
|
|
|
await buff.pushData("12345".toBytes())
|
|
|
|
var data: array[5, byte]
|
|
|
|
check: (await buff.readOnce(addr data[0], 1)) == 1 # 4 bytes in readBuf
|
|
|
|
|
|
|
|
await buff.pushEof()
|
|
|
|
|
|
|
|
check:
|
|
|
|
not buff.atEof()
|
|
|
|
(await buff.readOnce(addr data[0], 1)) == 1 # 3 bytes in readBuf, eof marker processed
|
|
|
|
not buff.atEof()
|
|
|
|
(await buff.readOnce(addr data[0], data.len)) == 3 # 0 bytes in readBuf
|
|
|
|
buff.atEof()
|
|
|
|
# exactly one 0-byte read
|
|
|
|
(await buff.readOnce(addr data[0], data.len)) == 0
|
|
|
|
|
|
|
|
expect LPStreamEOFError:
|
|
|
|
discard (await buff.readOnce(addr data[0], data.len))
|
|
|
|
|
|
|
|
await buff.close() # all data should still be read after close
|
|
|
|
|
2020-11-13 03:44:02 +00:00
|
|
|
asyncTest "shouldn't get stuck on close":
|
|
|
|
var stream = newBufferStream()
|
|
|
|
var
|
|
|
|
fut = stream.pushData(toBytes("hello"))
|
|
|
|
fut2 = stream.pushData(toBytes("again"))
|
|
|
|
await stream.close()
|
2020-11-23 15:07:11 +00:00
|
|
|
|
|
|
|
# Both writes should be completed on close (technically, the should maybe
|
|
|
|
# be cancelled, at least the second one...
|
|
|
|
check await fut.withTimeout(100.milliseconds)
|
|
|
|
check await fut2.withTimeout(100.milliseconds)
|
2020-11-13 03:44:02 +00:00
|
|
|
|
|
|
|
await stream.close()
|
|
|
|
|
|
|
|
asyncTest "no push after close":
|
|
|
|
var stream = newBufferStream()
|
|
|
|
await stream.pushData("123".toBytes())
|
|
|
|
var data: array[3, byte]
|
|
|
|
await stream.readExactly(addr data[0], data.len)
|
|
|
|
await stream.close()
|
2019-09-01 21:51:39 +00:00
|
|
|
|
2020-11-13 03:44:02 +00:00
|
|
|
expect LPStreamEOFError:
|
2020-09-21 17:48:19 +00:00
|
|
|
await stream.pushData("123".toBytes())
|
2020-11-23 15:07:11 +00:00
|
|
|
|
|
|
|
asyncTest "no concurrent pushes":
|
|
|
|
var stream = newBufferStream()
|
|
|
|
await stream.pushData("123".toBytes())
|
|
|
|
let push = stream.pushData("123".toBytes())
|
|
|
|
|
|
|
|
expect AssertionError:
|
|
|
|
await stream.pushData("123".toBytes())
|
|
|
|
|
|
|
|
await stream.closeWithEOF()
|
|
|
|
await push
|