nim-libp2p/libp2p/stream/bufferstream.nim

361 lines
11 KiB
Nim
Raw Normal View History

2019-09-01 21:51:39 +00:00
## Nim-LibP2P
2019-09-24 17:48:23 +00:00
## Copyright (c) 2019 Status Research & Development GmbH
2019-09-01 21:51:39 +00:00
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
2020-01-07 08:02:37 +00:00
## This module implements an asynchronous buffer stream
## which emulates physical async IO.
2019-09-02 04:42:29 +00:00
##
2020-01-07 08:02:37 +00:00
## The stream is based on the standard library's `Deque`,
## which is itself based on a ring buffer.
2019-09-02 04:42:29 +00:00
##
2020-01-07 08:02:37 +00:00
## It works by exposing a regular LPStream interface and
## a method ``pushTo`` to push data to the internal read
## buffer; as well as a handler that can be registrered
## that gets triggered on every write to the stream. This
## allows using the buffered stream as a sort of proxy,
## which can be consumed as a regular LPStream but allows
## injecting data for reads and intercepting writes.
##
## Another notable feature is that the stream is fully
## ordered and asynchronous. Reads are queued up in order
2019-09-02 04:42:29 +00:00
## and are suspended when not enough data available. This
2020-01-07 08:02:37 +00:00
## allows preserving backpressure while maintaining full
## asynchrony. Both writting to the internal buffer with
## ``pushTo`` as well as reading with ``read*` methods,
## will suspend until either the amount of elements in the
2019-09-03 20:41:38 +00:00
## buffer goes below ``maxSize`` or more data becomes available.
2019-09-02 04:42:29 +00:00
import deques, math
2019-09-01 21:51:39 +00:00
import chronos
import ../stream/lpstream
2019-09-06 21:28:54 +00:00
const DefaultBufferSize* = 1024
2019-09-01 21:51:39 +00:00
type
2019-09-05 03:02:22 +00:00
# TODO: figure out how to make this generic to avoid casts
WriteHandler* = proc (data: seq[byte]): Future[void]
2019-09-01 21:51:39 +00:00
BufferStream* = ref object of LPStream
2019-09-03 20:41:38 +00:00
maxSize*: int # buffer's max size in bytes
readBuf: Deque[byte] # this is a ring buffer based dequeue, this makes it perfect as the backing store here
2019-09-05 03:02:22 +00:00
readReqs: Deque[Future[void]] # use dequeue to fire reads in order
2019-09-01 21:51:39 +00:00
dataReadEvent: AsyncEvent
2019-09-02 20:46:22 +00:00
writeHandler*: WriteHandler
lock: AsyncLock
isPiped: bool
2019-09-01 21:51:39 +00:00
AlreadyPipedError* = object of CatchableError
NotWritableError* = object of CatchableError
proc newAlreadyPipedError*(): ref Exception {.inline.} =
result = newException(AlreadyPipedError, "stream already piped")
proc newNotWritableError*(): ref Exception {.inline.} =
result = newException(NotWritableError, "stream is not writable")
proc requestReadBytes(s: BufferStream): Future[void] =
2020-01-07 08:02:37 +00:00
## create a future that will complete when more
2019-09-01 21:51:39 +00:00
## data becomes available in the read buffer
2019-09-05 03:02:22 +00:00
result = newFuture[void]()
2019-09-01 21:51:39 +00:00
s.readReqs.addLast(result)
proc initBufferStream*(s: BufferStream,
handler: WriteHandler = nil,
size: int = DefaultBufferSize) =
2019-09-04 01:40:54 +00:00
s.maxSize = if isPowerOfTwo(size): size else: nextPowerOfTwo(size)
s.readBuf = initDeque[byte](s.maxSize)
2019-09-05 03:02:22 +00:00
s.readReqs = initDeque[Future[void]]()
2019-09-04 01:40:54 +00:00
s.dataReadEvent = newAsyncEvent()
s.lock = newAsyncLock()
2019-09-04 01:40:54 +00:00
s.writeHandler = handler
s.closeEvent = newAsyncEvent()
2019-09-04 01:40:54 +00:00
proc newBufferStream*(handler: WriteHandler = nil,
size: int = DefaultBufferSize): BufferStream =
2019-09-01 21:51:39 +00:00
new result
2019-09-04 01:40:54 +00:00
result.initBufferStream(handler, size)
2019-09-01 21:51:39 +00:00
proc popFirst*(s: BufferStream): byte =
2019-09-01 21:51:39 +00:00
result = s.readBuf.popFirst()
s.dataReadEvent.fire()
proc popLast*(s: BufferStream): byte =
result = s.readBuf.popLast()
s.dataReadEvent.fire()
proc shrink(s: BufferStream, fromFirst = 0, fromLast = 0) =
s.readBuf.shrink(fromFirst, fromLast)
s.dataReadEvent.fire()
proc len*(s: BufferStream): int = s.readBuf.len
proc pushTo*(s: BufferStream, data: seq[byte]) {.async.} =
2020-01-07 08:02:37 +00:00
## Write bytes to internal read buffer, use this to fill up the
2019-09-01 21:51:39 +00:00
## buffer with data.
##
2020-01-07 08:02:37 +00:00
## This method is async and will wait until all data has been
2019-09-01 21:51:39 +00:00
## written to the internal buffer; this is done so that backpressure
## is preserved.
##
2020-02-12 14:39:32 +00:00
try:
await s.lock.acquire()
var index = 0
while true:
# give readers a chance free up the buffer
# it it's full.
if s.readBuf.len >= s.maxSize:
await sleepAsync(1.millis)
while index < data.len and s.readBuf.len < s.maxSize:
s.readBuf.addLast(data[index])
inc(index)
# resolve the next queued read request
if s.readReqs.len > 0:
s.readReqs.popFirst().complete()
if index >= data.len:
return
# if we couldn't transfer all the data to the
# internal buf wait on a read event
await s.dataReadEvent.wait()
s.dataReadEvent.clear()
finally:
s.lock.release()
2019-09-01 21:51:39 +00:00
method read*(s: BufferStream, n = -1): Future[seq[byte]] {.async.} =
2019-09-01 21:51:39 +00:00
## Read all bytes (n <= 0) or exactly `n` bytes from buffer
##
## This procedure allocates buffer seq[byte] and return it as result.
##
2019-09-01 21:51:39 +00:00
var size = if n > 0: n else: s.readBuf.len()
var index = 0
while index < size:
while s.readBuf.len() > 0 and index < size:
result.add(s.popFirst())
inc(index)
if index < size:
2019-09-05 03:02:22 +00:00
await s.requestReadBytes()
2019-09-01 21:51:39 +00:00
method readExactly*(s: BufferStream,
pbytes: pointer,
nbytes: int):
Future[void] {.async.} =
2019-09-01 21:51:39 +00:00
## Read exactly ``nbytes`` bytes from read-only stream ``rstream`` and store
## it to ``pbytes``.
##
2019-09-04 01:40:54 +00:00
## If EOF is received and ``nbytes`` is not yet read, the procedure
2019-09-01 21:51:39 +00:00
## will raise ``LPStreamIncompleteError``.
##
var buff = await s.read(nbytes)
2019-09-04 01:40:54 +00:00
if nbytes > buff.len():
2019-09-01 21:51:39 +00:00
raise newLPStreamIncompleteError()
copyMem(pbytes, addr buff[0], nbytes)
2019-09-01 21:51:39 +00:00
2020-01-07 08:02:37 +00:00
method readLine*(s: BufferStream,
limit = 0,
sep = "\r\n"):
Future[string] {.async.} =
2019-09-01 21:51:39 +00:00
## Read one line from read-only stream ``rstream``, where ``"line"`` is a
## sequence of bytes ending with ``sep`` (default is ``"\r\n"``).
##
## If EOF is received, and ``sep`` was not found, the method will return the
## partial read bytes.
##
## If the EOF was received and the internal buffer is empty, return an
## empty string.
##
## If ``limit`` more then 0, then result string will be limited to ``limit``
## bytes.
##
2019-09-01 21:51:39 +00:00
result = ""
var lim = if limit <= 0: -1 else: limit
var state = 0
var index = 0
index = 0
while index < s.readBuf.len:
let ch = char(s.readBuf[index])
if sep[state] == ch:
inc(state)
if state == len(sep):
s.shrink(index + 1)
break
else:
state = 0
result.add(ch)
if len(result) == lim:
s.shrink(index + 1)
break
inc(index)
2019-09-04 06:40:53 +00:00
method readOnce*(s: BufferStream,
pbytes: pointer,
nbytes: int):
Future[int] {.async.} =
2019-09-01 21:51:39 +00:00
## Perform one read operation on read-only stream ``rstream``.
##
## If internal buffer is not empty, ``nbytes`` bytes will be transferred from
## internal buffer, otherwise it will wait until some bytes will be received.
##
2019-09-01 21:51:39 +00:00
if s.readBuf.len == 0:
2019-09-05 03:02:22 +00:00
await s.requestReadBytes()
2019-09-01 21:51:39 +00:00
var len = if nbytes > s.readBuf.len: s.readBuf.len else: nbytes
await s.readExactly(pbytes, len)
result = len
method readUntil*(s: BufferStream,
2020-01-07 08:02:37 +00:00
pbytes: pointer,
2019-09-01 21:51:39 +00:00
nbytes: int,
2020-01-07 08:02:37 +00:00
sep: seq[byte]):
Future[int] {.async.} =
2019-09-01 21:51:39 +00:00
## Read data from the read-only stream ``rstream`` until separator ``sep`` is
## found.
##
## On success, the data and separator will be removed from the internal
## buffer (consumed). Returned data will include the separator at the end.
##
## If EOF is received, and `sep` was not found, procedure will raise
## ``LPStreamIncompleteError``.
##
## If ``nbytes`` bytes has been received and `sep` was not found, procedure
## will raise ``LPStreamLimitError``.
##
## Procedure returns actual number of bytes read.
##
2019-09-01 21:51:39 +00:00
var
dest = cast[ptr UncheckedArray[byte]](pbytes)
state = 0
k = 0
let datalen = s.readBuf.len()
if datalen == 0 and s.readBuf.len() == 0:
raise newLPStreamIncompleteError()
var index = 0
while index < datalen:
let ch = s.readBuf[index]
if sep[state] == ch:
inc(state)
else:
state = 0
if k < nbytes:
dest[k] = ch
inc(k)
else:
raise newLPStreamLimitError()
if state == len(sep):
break
inc(index)
if state == len(sep):
s.shrink(index + 1)
result = k
else:
s.shrink(datalen)
method write*(s: BufferStream,
pbytes: pointer,
nbytes: int): Future[void] =
2019-09-02 04:42:29 +00:00
## Consume (discard) all bytes (n <= 0) or ``n`` bytes from read-only stream
## ``rstream``.
##
## Return number of bytes actually consumed (discarded).
##
2019-09-01 21:51:39 +00:00
var buf: seq[byte] = newSeq[byte](nbytes)
copyMem(addr buf[0], pbytes, nbytes)
if not isNil(s.writeHandler):
result = s.writeHandler(buf)
2019-09-01 21:51:39 +00:00
2020-01-07 08:02:37 +00:00
method write*(s: BufferStream,
msg: string,
msglen = -1): Future[void] =
2019-09-02 04:42:29 +00:00
## Write string ``sbytes`` of length ``msglen`` to writer stream ``wstream``.
##
## String ``sbytes`` must not be zero-length.
##
## If ``msglen < 0`` whole string ``sbytes`` will be writen to stream.
## If ``msglen > len(sbytes)`` only ``len(sbytes)`` bytes will be written to
## stream.
##
2019-09-01 21:51:39 +00:00
var buf = ""
shallowCopy(buf, if msglen > 0: msg[0..<msglen] else: msg)
if not isNil(s.writeHandler):
result = s.writeHandler(cast[seq[byte]](buf))
2019-09-01 21:51:39 +00:00
2020-01-07 08:02:37 +00:00
method write*(s: BufferStream,
msg: seq[byte],
msglen = -1): Future[void] =
2019-09-02 04:42:29 +00:00
## Write sequence of bytes ``sbytes`` of length ``msglen`` to writer
## stream ``wstream``.
##
## Sequence of bytes ``sbytes`` must not be zero-length.
##
## If ``msglen < 0`` whole sequence ``sbytes`` will be writen to stream.
## If ``msglen > len(sbytes)`` only ``len(sbytes)`` bytes will be written to
## stream.
##
2019-09-01 21:51:39 +00:00
var buf: seq[byte]
shallowCopy(buf, if msglen > 0: msg[0..<msglen] else: msg)
if not isNil(s.writeHandler):
result = s.writeHandler(buf)
proc pipe*(s: BufferStream,
target: BufferStream): BufferStream =
2020-01-07 08:02:37 +00:00
## pipe the write end of this stream to
## be the source of the target stream
##
## Note that this only works with the LPStream
2020-01-07 08:02:37 +00:00
## interface methods `read*` and `write` are
## piped.
##
if s.isPiped:
raise newAlreadyPipedError()
s.isPiped = true
let oldHandler = target.writeHandler
proc handler(data: seq[byte]) {.async, closure.} =
if not isNil(oldHandler):
await oldHandler(data)
2019-09-01 21:51:39 +00:00
2020-01-07 08:02:37 +00:00
# if we're piping to self,
# then add the data to the
# buffer directly and fire
# the read event
if s == target:
for b in data:
s.readBuf.addLast(b)
2020-01-07 08:02:37 +00:00
# notify main loop of available
# data
s.dataReadEvent.fire()
else:
await target.pushTo(data)
s.writeHandler = handler
result = target
proc `|`*(s: BufferStream, target: BufferStream): BufferStream =
## pipe operator to make piping less verbose
pipe(s, target)
method close*(s: BufferStream) {.async.} =
2019-09-02 04:42:29 +00:00
## close the stream and clear the buffer
2019-09-01 21:51:39 +00:00
for r in s.readReqs:
2020-01-07 08:02:37 +00:00
if not(isNil(r)) and not(r.finished()):
r.cancel()
s.dataReadEvent.fire()
2019-09-01 21:51:39 +00:00
s.readBuf.clear()
s.closeEvent.fire()
s.isClosed = true