mirror of
https://github.com/vacp2p/nim-libp2p.git
synced 2025-01-09 16:25:47 +00:00
96d4c44fec
This change modifies how the backpressure algorithm in bufferstream works - in particular, instead of working byte-by-byte, it will now work seq-by-seq. When data arrives, it usually does so in packets - in the current bufferstream, the packet is read then split into bytes which are fed one by one to the bufferstream. On the reading side, the bytes are popped of the bufferstream, again byte by byte, to satisfy `readOnce` requests - this introduces a lot of synchronization traffic because the checks for full buffer and for async event handling must be done for every byte. In this PR, a queue of length 1 is used instead - this means there will at most exist one "packet" in `pushTo`, one in the queue and one in the slush buffer that is used to store incomplete reads. * avoid byte-by-byte copy to buffer, with synchronization in-between * reuse AsyncQueue synchronization logic instead of rolling own * avoid writeHandler callback - implement `write` method instead * simplify EOF signalling by only setting EOF flag in queue reader (and reset) * remove BufferStream pipes (unused) * fixes drainBuffer deadlock when drain is called from within read loop and thus blocks draining * fix lpchannel init order
181 lines
5.5 KiB
Nim
181 lines
5.5 KiB
Nim
## Nim-LibP2P
|
|
## Copyright (c) 2019 Status Research & Development GmbH
|
|
## Licensed under either of
|
|
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
## at your option.
|
|
## This file may not be copied, modified, or distributed except according to
|
|
## those terms.
|
|
|
|
## This module implements an asynchronous buffer stream
|
|
## which emulates physical async IO.
|
|
##
|
|
## The stream is based on the standard library's `Deque`,
|
|
## which is itself based on a ring buffer.
|
|
##
|
|
## It works by exposing a regular LPStream interface and
|
|
## a method ``pushTo`` to push data to the internal read
|
|
## buffer; as well as a handler that can be registered
|
|
## that gets triggered on every write to the stream. This
|
|
## allows using the buffered stream as a sort of proxy,
|
|
## which can be consumed as a regular LPStream but allows
|
|
## injecting data for reads and intercepting writes.
|
|
##
|
|
## Another notable feature is that the stream is fully
|
|
## ordered and asynchronous. Reads are queued up in order
|
|
## and are suspended when not enough data available. This
|
|
## allows preserving backpressure while maintaining full
|
|
## asynchrony. Both writing to the internal buffer with
|
|
## ``pushTo`` as well as reading with ``read*` methods,
|
|
## will suspend until either the amount of elements in the
|
|
## buffer goes below ``maxSize`` or more data becomes available.
|
|
|
|
import std/strformat
|
|
import stew/byteutils
|
|
import chronos, chronicles, metrics
|
|
import ../stream/connection
|
|
import ./streamseq
|
|
|
|
when chronicles.enabledLogLevel == LogLevel.TRACE:
|
|
import oids
|
|
|
|
export connection
|
|
|
|
logScope:
|
|
topics = "bufferstream"
|
|
|
|
const
|
|
BufferStreamTrackerName* = "libp2p.bufferstream"
|
|
|
|
type
|
|
BufferStreamTracker* = ref object of TrackerBase
|
|
opened*: uint64
|
|
closed*: uint64
|
|
|
|
proc setupBufferStreamTracker(): BufferStreamTracker {.gcsafe.}
|
|
|
|
proc getBufferStreamTracker(): BufferStreamTracker {.gcsafe.} =
|
|
result = cast[BufferStreamTracker](getTracker(BufferStreamTrackerName))
|
|
if isNil(result):
|
|
result = setupBufferStreamTracker()
|
|
|
|
proc dumpTracking(): string {.gcsafe.} =
|
|
var tracker = getBufferStreamTracker()
|
|
result = "Opened buffers: " & $tracker.opened & "\n" &
|
|
"Closed buffers: " & $tracker.closed
|
|
|
|
proc leakTransport(): bool {.gcsafe.} =
|
|
var tracker = getBufferStreamTracker()
|
|
result = (tracker.opened != tracker.closed)
|
|
|
|
proc setupBufferStreamTracker(): BufferStreamTracker =
|
|
result = new BufferStreamTracker
|
|
result.opened = 0
|
|
result.closed = 0
|
|
result.dump = dumpTracking
|
|
result.isLeaked = leakTransport
|
|
addTracker(BufferStreamTrackerName, result)
|
|
|
|
type
|
|
BufferStream* = ref object of Connection
|
|
readQueue*: AsyncQueue[seq[byte]] # read queue for managing backpressure
|
|
readBuf*: StreamSeq # overflow buffer for readOnce
|
|
|
|
func shortLog*(s: BufferStream): auto =
|
|
if s.isNil: "BufferStream(nil)"
|
|
elif s.peerInfo.isNil: $s.oid
|
|
else: &"{shortLog(s.peerInfo.peerId)}:{s.oid}"
|
|
chronicles.formatIt(BufferStream): shortLog(it)
|
|
|
|
proc len*(s: BufferStream): int =
|
|
s.readBuf.len + (if s.readQueue.len > 0: s.readQueue[0].len() else: 0)
|
|
|
|
method initStream*(s: BufferStream) =
|
|
if s.objName.len == 0:
|
|
s.objName = "BufferStream"
|
|
|
|
procCall Connection(s).initStream()
|
|
|
|
s.readQueue = newAsyncQueue[seq[byte]](1)
|
|
|
|
trace "BufferStream created", s
|
|
inc getBufferStreamTracker().opened
|
|
|
|
proc newBufferStream*(timeout: Duration = DefaultConnectionTimeout): BufferStream =
|
|
new result
|
|
result.timeout = timeout
|
|
result.initStream()
|
|
|
|
method pushTo*(s: BufferStream, data: seq[byte]) {.base, async.} =
|
|
## Write bytes to internal read buffer, use this to fill up the
|
|
## buffer with data.
|
|
##
|
|
## `pushTo` will block if the queue is full, thus maintaining backpressure.
|
|
##
|
|
|
|
if s.isClosed:
|
|
raise newLPStreamEOFError()
|
|
|
|
if data.len == 0:
|
|
return # Don't push 0-length buffers, these signal EOF
|
|
|
|
# We will block here if there is already data queued, until it has been
|
|
# processed
|
|
trace "Pushing readQueue", s, len = data.len
|
|
await s.readQueue.addLast(data)
|
|
|
|
method readOnce*(s: BufferStream,
|
|
pbytes: pointer,
|
|
nbytes: int):
|
|
Future[int] {.async.} =
|
|
if s.isEof and s.readBuf.len() == 0:
|
|
raise newLPStreamEOFError()
|
|
|
|
var
|
|
p = cast[ptr UncheckedArray[byte]](pbytes)
|
|
|
|
# First consume leftovers from previous read
|
|
var rbytes = s.readBuf.consumeTo(toOpenArray(p, 0, nbytes - 1))
|
|
|
|
if rbytes < nbytes:
|
|
# There's space in the buffer - consume some data from the read queue
|
|
trace "popping readQueue", s, rbytes, nbytes
|
|
let buf = await s.readQueue.popFirst()
|
|
|
|
if buf.len == 0:
|
|
# No more data will arrive on read queue
|
|
s.isEof = true
|
|
else:
|
|
let remaining = min(buf.len, nbytes - rbytes)
|
|
toOpenArray(p, rbytes, nbytes - 1)[0..<remaining] =
|
|
buf.toOpenArray(0, remaining - 1)
|
|
rbytes += remaining
|
|
|
|
if remaining < buf.len:
|
|
trace "add leftovers", s, len = buf.len - remaining
|
|
s.readBuf.add(buf.toOpenArray(remaining, buf.high))
|
|
|
|
if s.isEof and s.readBuf.len() == 0:
|
|
# We can clear the readBuf memory since it won't be used any more
|
|
s.readBuf = StreamSeq()
|
|
|
|
s.activity = true
|
|
|
|
return rbytes
|
|
|
|
method close*(s: BufferStream) {.async, gcsafe.} =
|
|
## close the stream and clear the buffer
|
|
if s.isClosed:
|
|
trace "Already closed", s
|
|
return
|
|
|
|
trace "Closing BufferStream", s
|
|
|
|
# Push empty block to signal close, but don't block
|
|
asyncSpawn s.readQueue.addLast(@[])
|
|
|
|
await procCall Connection(s).close() # noraises, nocancels
|
|
|
|
inc getBufferStreamTracker().closed
|
|
trace "Closed BufferStream", s
|