2019-09-01 21:51:39 +00:00
|
|
|
## Nim-LibP2P
|
2019-09-24 17:48:23 +00:00
|
|
|
## Copyright (c) 2019 Status Research & Development GmbH
|
2019-09-01 21:51:39 +00:00
|
|
|
## Licensed under either of
|
|
|
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
|
|
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
|
|
## at your option.
|
|
|
|
## This file may not be copied, modified, or distributed except according to
|
|
|
|
## those terms.
|
|
|
|
|
2020-01-07 08:02:37 +00:00
|
|
|
## This module implements an asynchronous buffer stream
|
2019-12-04 04:44:54 +00:00
|
|
|
## which emulates physical async IO.
|
2019-09-02 04:42:29 +00:00
|
|
|
##
|
2020-01-07 08:02:37 +00:00
|
|
|
## The stream is based on the standard library's `Deque`,
|
|
|
|
## which is itself based on a ring buffer.
|
2019-09-02 04:42:29 +00:00
|
|
|
##
|
2020-01-07 08:02:37 +00:00
|
|
|
## It works by exposing a regular LPStream interface and
|
|
|
|
## a method ``pushTo`` to push data to the internal read
|
|
|
|
## buffer; as well as a handler that can be registrered
|
|
|
|
## that gets triggered on every write to the stream. This
|
|
|
|
## allows using the buffered stream as a sort of proxy,
|
|
|
|
## which can be consumed as a regular LPStream but allows
|
|
|
|
## injecting data for reads and intercepting writes.
|
|
|
|
##
|
|
|
|
## Another notable feature is that the stream is fully
|
|
|
|
## ordered and asynchronous. Reads are queued up in order
|
2019-09-02 04:42:29 +00:00
|
|
|
## and are suspended when not enough data available. This
|
2020-01-07 08:02:37 +00:00
|
|
|
## allows preserving backpressure while maintaining full
|
|
|
|
## asynchrony. Both writting to the internal buffer with
|
|
|
|
## ``pushTo`` as well as reading with ``read*` methods,
|
|
|
|
## will suspend until either the amount of elements in the
|
2019-09-03 20:41:38 +00:00
|
|
|
## buffer goes below ``maxSize`` or more data becomes available.
|
2019-09-02 04:42:29 +00:00
|
|
|
|
2020-04-14 13:21:16 +00:00
|
|
|
import deques, math, oids
|
2020-04-14 13:27:07 +00:00
|
|
|
import chronos, chronicles, metrics
|
2019-09-01 21:51:39 +00:00
|
|
|
import ../stream/lpstream
|
|
|
|
|
2020-04-21 01:24:42 +00:00
|
|
|
const
|
|
|
|
BufferStreamTrackerName* = "libp2p.bufferstream"
|
|
|
|
DefaultBufferSize* = 1024
|
2019-09-06 21:28:54 +00:00
|
|
|
|
2019-09-01 21:51:39 +00:00
|
|
|
type
|
2019-09-05 03:02:22 +00:00
|
|
|
# TODO: figure out how to make this generic to avoid casts
|
2020-03-24 07:48:05 +00:00
|
|
|
WriteHandler* = proc (data: seq[byte]): Future[void] {.gcsafe.}
|
2019-09-01 21:51:39 +00:00
|
|
|
|
|
|
|
BufferStream* = ref object of LPStream
|
2019-09-03 20:41:38 +00:00
|
|
|
maxSize*: int # buffer's max size in bytes
|
2019-12-04 04:44:54 +00:00
|
|
|
readBuf: Deque[byte] # this is a ring buffer based dequeue, this makes it perfect as the backing store here
|
2019-09-05 03:02:22 +00:00
|
|
|
readReqs: Deque[Future[void]] # use dequeue to fire reads in order
|
2019-09-01 21:51:39 +00:00
|
|
|
dataReadEvent: AsyncEvent
|
2019-09-02 20:46:22 +00:00
|
|
|
writeHandler*: WriteHandler
|
2019-12-04 04:44:54 +00:00
|
|
|
lock: AsyncLock
|
|
|
|
isPiped: bool
|
2019-09-01 21:51:39 +00:00
|
|
|
|
2019-12-04 04:44:54 +00:00
|
|
|
AlreadyPipedError* = object of CatchableError
|
|
|
|
NotWritableError* = object of CatchableError
|
|
|
|
|
2020-04-21 01:24:42 +00:00
|
|
|
BufferStreamTracker* = ref object of TrackerBase
|
|
|
|
opened*: uint64
|
|
|
|
closed*: uint64
|
|
|
|
|
|
|
|
proc setupBufferStreamTracker(): BufferStreamTracker {.gcsafe.}
|
|
|
|
|
|
|
|
proc getBufferStreamTracker(): BufferStreamTracker {.gcsafe.} =
|
|
|
|
result = cast[BufferStreamTracker](getTracker(BufferStreamTrackerName))
|
|
|
|
if isNil(result):
|
|
|
|
result = setupBufferStreamTracker()
|
|
|
|
|
|
|
|
proc dumpTracking(): string {.gcsafe.} =
|
|
|
|
var tracker = getBufferStreamTracker()
|
|
|
|
result = "Opened buffers: " & $tracker.opened & "\n" &
|
|
|
|
"Closed buffers: " & $tracker.closed
|
|
|
|
|
|
|
|
proc leakTransport(): bool {.gcsafe.} =
|
|
|
|
var tracker = getBufferStreamTracker()
|
|
|
|
result = (tracker.opened != tracker.closed)
|
|
|
|
|
|
|
|
proc setupBufferStreamTracker(): BufferStreamTracker =
|
|
|
|
result = new BufferStreamTracker
|
|
|
|
result.opened = 0
|
|
|
|
result.closed = 0
|
|
|
|
result.dump = dumpTracking
|
|
|
|
result.isLeaked = leakTransport
|
|
|
|
addTracker(BufferStreamTrackerName, result)
|
2020-04-14 13:27:07 +00:00
|
|
|
declareGauge libp2p_open_bufferstream, "open BufferStream instances"
|
|
|
|
|
2019-12-04 04:44:54 +00:00
|
|
|
proc newAlreadyPipedError*(): ref Exception {.inline.} =
|
|
|
|
result = newException(AlreadyPipedError, "stream already piped")
|
|
|
|
|
|
|
|
proc newNotWritableError*(): ref Exception {.inline.} =
|
|
|
|
result = newException(NotWritableError, "stream is not writable")
|
|
|
|
|
|
|
|
proc requestReadBytes(s: BufferStream): Future[void] =
|
2020-01-07 08:02:37 +00:00
|
|
|
## create a future that will complete when more
|
2019-09-01 21:51:39 +00:00
|
|
|
## data becomes available in the read buffer
|
2019-09-05 03:02:22 +00:00
|
|
|
result = newFuture[void]()
|
2019-09-01 21:51:39 +00:00
|
|
|
s.readReqs.addLast(result)
|
2020-04-14 13:21:16 +00:00
|
|
|
trace "requestReadBytes(): added a future to readReqs"
|
2019-09-01 21:51:39 +00:00
|
|
|
|
2019-12-04 04:44:54 +00:00
|
|
|
proc initBufferStream*(s: BufferStream,
|
|
|
|
handler: WriteHandler = nil,
|
|
|
|
size: int = DefaultBufferSize) =
|
2019-09-04 01:40:54 +00:00
|
|
|
s.maxSize = if isPowerOfTwo(size): size else: nextPowerOfTwo(size)
|
|
|
|
s.readBuf = initDeque[byte](s.maxSize)
|
2019-09-05 03:02:22 +00:00
|
|
|
s.readReqs = initDeque[Future[void]]()
|
2019-09-04 01:40:54 +00:00
|
|
|
s.dataReadEvent = newAsyncEvent()
|
2019-12-04 04:44:54 +00:00
|
|
|
s.lock = newAsyncLock()
|
2019-09-04 01:40:54 +00:00
|
|
|
s.writeHandler = handler
|
2019-12-04 04:44:54 +00:00
|
|
|
s.closeEvent = newAsyncEvent()
|
2020-04-21 01:24:42 +00:00
|
|
|
inc getBufferStreamTracker().opened
|
2020-04-14 13:21:16 +00:00
|
|
|
when chronicles.enabledLogLevel == LogLevel.TRACE:
|
|
|
|
s.oid = genOid()
|
2020-04-14 13:27:07 +00:00
|
|
|
s.isClosed = false
|
|
|
|
libp2p_open_bufferstream.inc()
|
2019-09-04 01:40:54 +00:00
|
|
|
|
2019-12-04 04:44:54 +00:00
|
|
|
proc newBufferStream*(handler: WriteHandler = nil,
|
|
|
|
size: int = DefaultBufferSize): BufferStream =
|
2019-09-01 21:51:39 +00:00
|
|
|
new result
|
2019-09-04 01:40:54 +00:00
|
|
|
result.initBufferStream(handler, size)
|
2019-09-01 21:51:39 +00:00
|
|
|
|
2019-12-04 04:44:54 +00:00
|
|
|
proc popFirst*(s: BufferStream): byte =
|
2019-09-01 21:51:39 +00:00
|
|
|
result = s.readBuf.popFirst()
|
|
|
|
s.dataReadEvent.fire()
|
|
|
|
|
|
|
|
proc popLast*(s: BufferStream): byte =
|
|
|
|
result = s.readBuf.popLast()
|
|
|
|
s.dataReadEvent.fire()
|
|
|
|
|
|
|
|
proc shrink(s: BufferStream, fromFirst = 0, fromLast = 0) =
|
|
|
|
s.readBuf.shrink(fromFirst, fromLast)
|
|
|
|
s.dataReadEvent.fire()
|
|
|
|
|
|
|
|
proc len*(s: BufferStream): int = s.readBuf.len
|
|
|
|
|
2019-12-04 04:44:54 +00:00
|
|
|
proc pushTo*(s: BufferStream, data: seq[byte]) {.async.} =
|
2020-01-07 08:02:37 +00:00
|
|
|
## Write bytes to internal read buffer, use this to fill up the
|
2019-09-01 21:51:39 +00:00
|
|
|
## buffer with data.
|
|
|
|
##
|
2020-01-07 08:02:37 +00:00
|
|
|
## This method is async and will wait until all data has been
|
2019-09-01 21:51:39 +00:00
|
|
|
## written to the internal buffer; this is done so that backpressure
|
|
|
|
## is preserved.
|
2019-12-04 04:44:54 +00:00
|
|
|
##
|
|
|
|
|
2020-04-14 13:21:16 +00:00
|
|
|
when chronicles.enabledLogLevel == LogLevel.TRACE:
|
|
|
|
logScope:
|
|
|
|
stream_oid = $s.oid
|
|
|
|
|
2020-02-12 14:39:32 +00:00
|
|
|
try:
|
|
|
|
await s.lock.acquire()
|
|
|
|
var index = 0
|
2020-02-22 00:09:33 +00:00
|
|
|
while not s.closed():
|
2020-02-12 14:39:32 +00:00
|
|
|
while index < data.len and s.readBuf.len < s.maxSize:
|
|
|
|
s.readBuf.addLast(data[index])
|
|
|
|
inc(index)
|
2020-04-14 13:21:16 +00:00
|
|
|
trace "pushTo()", msg = "added " & $index & " bytes to readBuf"
|
2020-02-12 14:39:32 +00:00
|
|
|
|
|
|
|
# resolve the next queued read request
|
|
|
|
if s.readReqs.len > 0:
|
|
|
|
s.readReqs.popFirst().complete()
|
2020-04-14 13:21:16 +00:00
|
|
|
trace "pushTo(): completed a readReqs future"
|
2020-02-12 14:39:32 +00:00
|
|
|
|
|
|
|
if index >= data.len:
|
|
|
|
return
|
|
|
|
|
|
|
|
# if we couldn't transfer all the data to the
|
|
|
|
# internal buf wait on a read event
|
|
|
|
await s.dataReadEvent.wait()
|
|
|
|
s.dataReadEvent.clear()
|
|
|
|
finally:
|
|
|
|
s.lock.release()
|
2019-09-01 21:51:39 +00:00
|
|
|
|
2019-12-04 04:44:54 +00:00
|
|
|
method read*(s: BufferStream, n = -1): Future[seq[byte]] {.async.} =
|
2019-09-01 21:51:39 +00:00
|
|
|
## Read all bytes (n <= 0) or exactly `n` bytes from buffer
|
|
|
|
##
|
|
|
|
## This procedure allocates buffer seq[byte] and return it as result.
|
2019-12-04 04:44:54 +00:00
|
|
|
##
|
2020-04-14 13:21:16 +00:00
|
|
|
when chronicles.enabledLogLevel == LogLevel.TRACE:
|
|
|
|
logScope:
|
|
|
|
stream_oid = $s.oid
|
|
|
|
|
|
|
|
trace "read()", requested_bytes = n
|
2019-09-01 21:51:39 +00:00
|
|
|
var size = if n > 0: n else: s.readBuf.len()
|
|
|
|
var index = 0
|
2020-02-29 16:20:18 +00:00
|
|
|
|
|
|
|
if s.readBuf.len() == 0:
|
|
|
|
await s.requestReadBytes()
|
|
|
|
|
2019-09-01 21:51:39 +00:00
|
|
|
while index < size:
|
|
|
|
while s.readBuf.len() > 0 and index < size:
|
|
|
|
result.add(s.popFirst())
|
|
|
|
inc(index)
|
2020-04-14 13:21:16 +00:00
|
|
|
trace "read()", read_bytes = index
|
2019-09-01 21:51:39 +00:00
|
|
|
|
|
|
|
if index < size:
|
2019-09-05 03:02:22 +00:00
|
|
|
await s.requestReadBytes()
|
2019-09-01 21:51:39 +00:00
|
|
|
|
2019-12-04 04:44:54 +00:00
|
|
|
method readExactly*(s: BufferStream,
|
|
|
|
pbytes: pointer,
|
|
|
|
nbytes: int):
|
|
|
|
Future[void] {.async.} =
|
2019-09-01 21:51:39 +00:00
|
|
|
## Read exactly ``nbytes`` bytes from read-only stream ``rstream`` and store
|
|
|
|
## it to ``pbytes``.
|
|
|
|
##
|
2019-09-04 01:40:54 +00:00
|
|
|
## If EOF is received and ``nbytes`` is not yet read, the procedure
|
2019-09-01 21:51:39 +00:00
|
|
|
## will raise ``LPStreamIncompleteError``.
|
2019-12-04 04:44:54 +00:00
|
|
|
##
|
2020-04-14 13:21:16 +00:00
|
|
|
when chronicles.enabledLogLevel == LogLevel.TRACE:
|
|
|
|
logScope:
|
|
|
|
stream_oid = $s.oid
|
|
|
|
|
2020-03-27 14:25:52 +00:00
|
|
|
var buff: seq[byte]
|
|
|
|
try:
|
|
|
|
buff = await s.read(nbytes)
|
|
|
|
except LPStreamEOFError as exc:
|
2020-04-21 01:24:42 +00:00
|
|
|
trace "Exception occurred", exc = exc.msg
|
2020-03-27 14:25:52 +00:00
|
|
|
|
2019-09-04 01:40:54 +00:00
|
|
|
if nbytes > buff.len():
|
2019-09-01 21:51:39 +00:00
|
|
|
raise newLPStreamIncompleteError()
|
|
|
|
|
2019-12-04 04:44:54 +00:00
|
|
|
copyMem(pbytes, addr buff[0], nbytes)
|
2019-09-01 21:51:39 +00:00
|
|
|
|
2020-01-07 08:02:37 +00:00
|
|
|
method readLine*(s: BufferStream,
|
|
|
|
limit = 0,
|
2019-12-04 04:44:54 +00:00
|
|
|
sep = "\r\n"):
|
|
|
|
Future[string] {.async.} =
|
2019-09-01 21:51:39 +00:00
|
|
|
## Read one line from read-only stream ``rstream``, where ``"line"`` is a
|
|
|
|
## sequence of bytes ending with ``sep`` (default is ``"\r\n"``).
|
|
|
|
##
|
|
|
|
## If EOF is received, and ``sep`` was not found, the method will return the
|
|
|
|
## partial read bytes.
|
|
|
|
##
|
|
|
|
## If the EOF was received and the internal buffer is empty, return an
|
|
|
|
## empty string.
|
|
|
|
##
|
|
|
|
## If ``limit`` more then 0, then result string will be limited to ``limit``
|
|
|
|
## bytes.
|
2019-12-04 04:44:54 +00:00
|
|
|
##
|
2019-09-01 21:51:39 +00:00
|
|
|
result = ""
|
|
|
|
var lim = if limit <= 0: -1 else: limit
|
|
|
|
var state = 0
|
|
|
|
var index = 0
|
|
|
|
|
|
|
|
index = 0
|
|
|
|
while index < s.readBuf.len:
|
|
|
|
let ch = char(s.readBuf[index])
|
|
|
|
if sep[state] == ch:
|
|
|
|
inc(state)
|
|
|
|
if state == len(sep):
|
|
|
|
s.shrink(index + 1)
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
state = 0
|
|
|
|
result.add(ch)
|
|
|
|
if len(result) == lim:
|
|
|
|
s.shrink(index + 1)
|
|
|
|
break
|
|
|
|
inc(index)
|
|
|
|
|
2019-09-04 06:40:53 +00:00
|
|
|
method readOnce*(s: BufferStream,
|
|
|
|
pbytes: pointer,
|
|
|
|
nbytes: int):
|
2019-12-04 04:44:54 +00:00
|
|
|
Future[int] {.async.} =
|
2019-09-01 21:51:39 +00:00
|
|
|
## Perform one read operation on read-only stream ``rstream``.
|
|
|
|
##
|
|
|
|
## If internal buffer is not empty, ``nbytes`` bytes will be transferred from
|
|
|
|
## internal buffer, otherwise it will wait until some bytes will be received.
|
2019-12-04 04:44:54 +00:00
|
|
|
##
|
2019-09-01 21:51:39 +00:00
|
|
|
if s.readBuf.len == 0:
|
2019-09-05 03:02:22 +00:00
|
|
|
await s.requestReadBytes()
|
2019-12-04 04:44:54 +00:00
|
|
|
|
2019-09-01 21:51:39 +00:00
|
|
|
var len = if nbytes > s.readBuf.len: s.readBuf.len else: nbytes
|
|
|
|
await s.readExactly(pbytes, len)
|
|
|
|
result = len
|
|
|
|
|
|
|
|
method readUntil*(s: BufferStream,
|
2020-01-07 08:02:37 +00:00
|
|
|
pbytes: pointer,
|
2019-09-01 21:51:39 +00:00
|
|
|
nbytes: int,
|
2020-01-07 08:02:37 +00:00
|
|
|
sep: seq[byte]):
|
2019-12-04 04:44:54 +00:00
|
|
|
Future[int] {.async.} =
|
2019-09-01 21:51:39 +00:00
|
|
|
## Read data from the read-only stream ``rstream`` until separator ``sep`` is
|
|
|
|
## found.
|
|
|
|
##
|
|
|
|
## On success, the data and separator will be removed from the internal
|
|
|
|
## buffer (consumed). Returned data will include the separator at the end.
|
|
|
|
##
|
|
|
|
## If EOF is received, and `sep` was not found, procedure will raise
|
|
|
|
## ``LPStreamIncompleteError``.
|
|
|
|
##
|
|
|
|
## If ``nbytes`` bytes has been received and `sep` was not found, procedure
|
|
|
|
## will raise ``LPStreamLimitError``.
|
|
|
|
##
|
|
|
|
## Procedure returns actual number of bytes read.
|
2019-12-04 04:44:54 +00:00
|
|
|
##
|
2019-09-01 21:51:39 +00:00
|
|
|
var
|
|
|
|
dest = cast[ptr UncheckedArray[byte]](pbytes)
|
|
|
|
state = 0
|
|
|
|
k = 0
|
|
|
|
|
|
|
|
let datalen = s.readBuf.len()
|
|
|
|
if datalen == 0 and s.readBuf.len() == 0:
|
|
|
|
raise newLPStreamIncompleteError()
|
|
|
|
|
|
|
|
var index = 0
|
|
|
|
while index < datalen:
|
|
|
|
let ch = s.readBuf[index]
|
|
|
|
if sep[state] == ch:
|
|
|
|
inc(state)
|
|
|
|
else:
|
|
|
|
state = 0
|
|
|
|
if k < nbytes:
|
|
|
|
dest[k] = ch
|
|
|
|
inc(k)
|
|
|
|
else:
|
|
|
|
raise newLPStreamLimitError()
|
|
|
|
if state == len(sep):
|
|
|
|
break
|
|
|
|
inc(index)
|
|
|
|
|
|
|
|
if state == len(sep):
|
|
|
|
s.shrink(index + 1)
|
|
|
|
result = k
|
|
|
|
else:
|
|
|
|
s.shrink(datalen)
|
|
|
|
|
2019-12-04 04:44:54 +00:00
|
|
|
method write*(s: BufferStream,
|
|
|
|
pbytes: pointer,
|
|
|
|
nbytes: int): Future[void] =
|
2019-09-02 04:42:29 +00:00
|
|
|
## Consume (discard) all bytes (n <= 0) or ``n`` bytes from read-only stream
|
|
|
|
## ``rstream``.
|
|
|
|
##
|
|
|
|
## Return number of bytes actually consumed (discarded).
|
2019-12-04 04:44:54 +00:00
|
|
|
##
|
2020-03-04 19:45:14 +00:00
|
|
|
if isNil(s.writeHandler):
|
|
|
|
var retFuture = newFuture[void]("BufferStream.write(pointer)")
|
|
|
|
retFuture.fail(newNotWritableError())
|
|
|
|
return retFuture
|
|
|
|
|
2019-09-01 21:51:39 +00:00
|
|
|
var buf: seq[byte] = newSeq[byte](nbytes)
|
|
|
|
copyMem(addr buf[0], pbytes, nbytes)
|
2020-03-04 19:45:14 +00:00
|
|
|
result = s.writeHandler(buf)
|
2019-09-01 21:51:39 +00:00
|
|
|
|
2020-01-07 08:02:37 +00:00
|
|
|
method write*(s: BufferStream,
|
|
|
|
msg: string,
|
2019-12-04 04:44:54 +00:00
|
|
|
msglen = -1): Future[void] =
|
2019-09-02 04:42:29 +00:00
|
|
|
## Write string ``sbytes`` of length ``msglen`` to writer stream ``wstream``.
|
|
|
|
##
|
|
|
|
## String ``sbytes`` must not be zero-length.
|
|
|
|
##
|
|
|
|
## If ``msglen < 0`` whole string ``sbytes`` will be writen to stream.
|
|
|
|
## If ``msglen > len(sbytes)`` only ``len(sbytes)`` bytes will be written to
|
|
|
|
## stream.
|
2019-12-04 04:44:54 +00:00
|
|
|
##
|
2020-03-04 19:45:14 +00:00
|
|
|
if isNil(s.writeHandler):
|
|
|
|
var retFuture = newFuture[void]("BufferStream.write(string)")
|
|
|
|
retFuture.fail(newNotWritableError())
|
|
|
|
return retFuture
|
|
|
|
|
2019-09-01 21:51:39 +00:00
|
|
|
var buf = ""
|
|
|
|
shallowCopy(buf, if msglen > 0: msg[0..<msglen] else: msg)
|
2020-03-04 19:45:14 +00:00
|
|
|
result = s.writeHandler(cast[seq[byte]](buf))
|
2019-09-01 21:51:39 +00:00
|
|
|
|
2020-01-07 08:02:37 +00:00
|
|
|
method write*(s: BufferStream,
|
|
|
|
msg: seq[byte],
|
2019-12-04 04:44:54 +00:00
|
|
|
msglen = -1): Future[void] =
|
2019-09-02 04:42:29 +00:00
|
|
|
## Write sequence of bytes ``sbytes`` of length ``msglen`` to writer
|
|
|
|
## stream ``wstream``.
|
|
|
|
##
|
|
|
|
## Sequence of bytes ``sbytes`` must not be zero-length.
|
|
|
|
##
|
|
|
|
## If ``msglen < 0`` whole sequence ``sbytes`` will be writen to stream.
|
|
|
|
## If ``msglen > len(sbytes)`` only ``len(sbytes)`` bytes will be written to
|
|
|
|
## stream.
|
2019-12-04 04:44:54 +00:00
|
|
|
##
|
2020-03-04 19:45:14 +00:00
|
|
|
if isNil(s.writeHandler):
|
|
|
|
var retFuture = newFuture[void]("BufferStream.write(seq)")
|
|
|
|
retFuture.fail(newNotWritableError())
|
|
|
|
return retFuture
|
|
|
|
|
2019-09-01 21:51:39 +00:00
|
|
|
var buf: seq[byte]
|
|
|
|
shallowCopy(buf, if msglen > 0: msg[0..<msglen] else: msg)
|
2020-03-04 19:45:14 +00:00
|
|
|
result = s.writeHandler(buf)
|
2019-12-04 04:44:54 +00:00
|
|
|
|
|
|
|
proc pipe*(s: BufferStream,
|
|
|
|
target: BufferStream): BufferStream =
|
2020-01-07 08:02:37 +00:00
|
|
|
## pipe the write end of this stream to
|
2019-12-04 04:44:54 +00:00
|
|
|
## be the source of the target stream
|
|
|
|
##
|
|
|
|
## Note that this only works with the LPStream
|
2020-01-07 08:02:37 +00:00
|
|
|
## interface methods `read*` and `write` are
|
2019-12-04 04:44:54 +00:00
|
|
|
## piped.
|
|
|
|
##
|
|
|
|
if s.isPiped:
|
|
|
|
raise newAlreadyPipedError()
|
|
|
|
|
|
|
|
s.isPiped = true
|
|
|
|
let oldHandler = target.writeHandler
|
2020-03-24 07:48:05 +00:00
|
|
|
proc handler(data: seq[byte]) {.async, closure, gcsafe.} =
|
2019-12-04 04:44:54 +00:00
|
|
|
if not isNil(oldHandler):
|
|
|
|
await oldHandler(data)
|
2019-09-01 21:51:39 +00:00
|
|
|
|
2020-01-07 08:02:37 +00:00
|
|
|
# if we're piping to self,
|
|
|
|
# then add the data to the
|
2019-12-04 04:44:54 +00:00
|
|
|
# buffer directly and fire
|
|
|
|
# the read event
|
|
|
|
if s == target:
|
|
|
|
for b in data:
|
|
|
|
s.readBuf.addLast(b)
|
|
|
|
|
2020-01-07 08:02:37 +00:00
|
|
|
# notify main loop of available
|
2019-12-04 04:44:54 +00:00
|
|
|
# data
|
|
|
|
s.dataReadEvent.fire()
|
|
|
|
else:
|
|
|
|
await target.pushTo(data)
|
|
|
|
|
|
|
|
s.writeHandler = handler
|
|
|
|
result = target
|
|
|
|
|
|
|
|
proc `|`*(s: BufferStream, target: BufferStream): BufferStream =
|
|
|
|
## pipe operator to make piping less verbose
|
|
|
|
pipe(s, target)
|
|
|
|
|
|
|
|
method close*(s: BufferStream) {.async.} =
|
2019-09-02 04:42:29 +00:00
|
|
|
## close the stream and clear the buffer
|
2020-04-14 13:27:07 +00:00
|
|
|
if not s.isClosed:
|
|
|
|
trace "closing bufferstream"
|
|
|
|
for r in s.readReqs:
|
|
|
|
if not(isNil(r)) and not(r.finished()):
|
|
|
|
r.fail(newLPStreamEOFError())
|
|
|
|
s.dataReadEvent.fire()
|
|
|
|
s.readBuf.clear()
|
|
|
|
s.closeEvent.fire()
|
|
|
|
s.isClosed = true
|
2020-04-21 01:24:42 +00:00
|
|
|
inc getBufferStreamTracker().closed
|
2020-04-14 13:27:07 +00:00
|
|
|
libp2p_open_bufferstream.dec()
|
2020-04-21 01:24:42 +00:00
|
|
|
else:
|
|
|
|
trace "attempt to close an already closed bufferstream", trace=getStackTrace()
|