2022-07-01 18:19:57 +00:00
|
|
|
# Nim-LibP2P
|
2024-03-05 07:06:27 +00:00
|
|
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
2022-07-01 18:19:57 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
|
|
# at your option.
|
|
|
|
# This file may not be copied, modified, or distributed except according to
|
|
|
|
# those terms.
|
2019-09-04 03:08:51 +00:00
|
|
|
|
2023-06-07 11:12:49 +00:00
|
|
|
{.push raises: [].}
|
2021-05-21 16:27:01 +00:00
|
|
|
|
2020-09-06 08:31:47 +00:00
|
|
|
import std/[oids, strformat]
|
2023-05-18 08:24:17 +00:00
|
|
|
import pkg/[chronos, chronicles, metrics]
|
2020-09-14 08:19:54 +00:00
|
|
|
import ./coder,
|
2020-07-17 18:44:41 +00:00
|
|
|
../muxer,
|
refactor bufferstream to use a queue (#346)
This change modifies how the backpressure algorithm in bufferstream
works - in particular, instead of working byte-by-byte, it will now work
seq-by-seq.
When data arrives, it usually does so in packets - in the current
bufferstream, the packet is read then split into bytes which are fed one
by one to the bufferstream. On the reading side, the bytes are popped of
the bufferstream, again byte by byte, to satisfy `readOnce` requests -
this introduces a lot of synchronization traffic because the checks for
full buffer and for async event handling must be done for every byte.
In this PR, a queue of length 1 is used instead - this means there will
at most exist one "packet" in `pushTo`, one in the queue and one in the
slush buffer that is used to store incomplete reads.
* avoid byte-by-byte copy to buffer, with synchronization in-between
* reuse AsyncQueue synchronization logic instead of rolling own
* avoid writeHandler callback - implement `write` method instead
* simplify EOF signalling by only setting EOF flag in queue reader (and
reset)
* remove BufferStream pipes (unused)
* fixes drainBuffer deadlock when drain is called from within read loop
and thus blocks draining
* fix lpchannel init order
2020-09-10 06:19:13 +00:00
|
|
|
../../stream/[bufferstream, connection, streamseq],
|
2020-06-29 15:15:31 +00:00
|
|
|
../../peerinfo
|
2019-09-04 03:08:51 +00:00
|
|
|
|
2020-06-19 17:29:43 +00:00
|
|
|
export connection
|
2020-05-06 16:31:47 +00:00
|
|
|
|
2019-09-10 02:15:52 +00:00
|
|
|
logScope:
|
2020-12-01 17:34:27 +00:00
|
|
|
topics = "libp2p mplexchannel"
|
2019-09-10 02:15:52 +00:00
|
|
|
|
2022-04-06 14:00:24 +00:00
|
|
|
when defined(libp2p_mplex_metrics):
|
|
|
|
declareHistogram libp2p_mplex_qlen, "message queue length",
|
|
|
|
buckets = [0.0, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0]
|
|
|
|
declareCounter libp2p_mplex_qlenclose, "closed because of max queuelen"
|
|
|
|
declareHistogram libp2p_mplex_qtime, "message queuing time"
|
|
|
|
|
2021-09-27 12:30:22 +00:00
|
|
|
when defined(libp2p_network_protocols_metrics):
|
2024-03-05 07:06:27 +00:00
|
|
|
declareCounter libp2p_protocols_bytes,
|
|
|
|
"total sent or received bytes", ["protocol", "direction"]
|
2021-07-26 14:12:36 +00:00
|
|
|
|
2020-05-20 00:14:15 +00:00
|
|
|
## Channel half-closed states
|
|
|
|
##
|
|
|
|
## | State | Closed local | Closed remote
|
|
|
|
## |=============================================
|
|
|
|
## | Read | Yes (until EOF) | No
|
|
|
|
## | Write | No | Yes
|
|
|
|
##
|
2020-09-21 17:48:19 +00:00
|
|
|
## Channels are considered fully closed when both outgoing and incoming
|
|
|
|
## directions are closed and when the reader of the channel has read the
|
|
|
|
## EOF marker
|
2020-05-29 16:24:38 +00:00
|
|
|
|
2020-09-24 16:43:20 +00:00
|
|
|
const
|
|
|
|
MaxWrites = 1024 ##\
|
|
|
|
## Maximum number of in-flight writes - after this, we disconnect the peer
|
|
|
|
|
2020-11-05 03:52:54 +00:00
|
|
|
LPChannelTrackerName* = "LPChannel"
|
|
|
|
|
2019-09-04 03:08:51 +00:00
|
|
|
type
|
2019-09-12 17:07:34 +00:00
|
|
|
LPChannel* = ref object of BufferStream
|
2020-05-20 00:14:15 +00:00
|
|
|
id*: uint64 # channel id
|
|
|
|
name*: string # name of the channel (for debugging)
|
|
|
|
conn*: Connection # wrapped connection used to for writing
|
|
|
|
initiator*: bool # initiated remotely or locally flag
|
2020-09-21 17:48:19 +00:00
|
|
|
isOpen*: bool # has channel been opened
|
2020-05-20 00:14:15 +00:00
|
|
|
closedLocal*: bool # has channel been closed locally
|
2022-09-14 08:58:41 +00:00
|
|
|
remoteReset*: bool # has channel been remotely reset
|
|
|
|
localReset*: bool # has channel been reset locally
|
2020-05-20 00:14:15 +00:00
|
|
|
msgCode*: MessageType # cached in/out message code
|
|
|
|
closeCode*: MessageType # cached in/out close code
|
|
|
|
resetCode*: MessageType # cached in/out reset code
|
2020-09-24 16:43:20 +00:00
|
|
|
writes*: int # In-flight writes
|
2020-05-20 00:14:15 +00:00
|
|
|
|
2020-09-06 08:31:47 +00:00
|
|
|
func shortLog*(s: LPChannel): auto =
|
2021-05-21 16:27:01 +00:00
|
|
|
try:
|
2024-03-05 07:06:27 +00:00
|
|
|
if s == nil: "LPChannel(nil)"
|
2021-05-21 16:27:01 +00:00
|
|
|
elif s.name != $s.oid and s.name.len > 0:
|
2021-09-08 09:07:46 +00:00
|
|
|
&"{shortLog(s.conn.peerId)}:{s.oid}:{s.name}"
|
|
|
|
else: &"{shortLog(s.conn.peerId)}:{s.oid}"
|
2021-05-21 16:27:01 +00:00
|
|
|
except ValueError as exc:
|
2024-03-05 07:06:27 +00:00
|
|
|
raiseAssert(exc.msg)
|
2021-05-21 16:27:01 +00:00
|
|
|
|
2020-09-06 08:31:47 +00:00
|
|
|
chronicles.formatIt(LPChannel): shortLog(it)
|
2020-06-29 15:15:31 +00:00
|
|
|
|
2024-03-05 07:06:27 +00:00
|
|
|
proc open*(s: LPChannel) {.async: (raises: [CancelledError, LPStreamError]).} =
|
2020-09-21 17:48:19 +00:00
|
|
|
trace "Opening channel", s, conn = s.conn
|
2020-12-09 14:56:40 +00:00
|
|
|
if s.conn.isClosed:
|
|
|
|
return
|
|
|
|
try:
|
|
|
|
await s.conn.writeMsg(s.id, MessageType.New, s.name)
|
|
|
|
s.isOpen = true
|
2022-06-24 09:11:23 +00:00
|
|
|
except CancelledError as exc:
|
|
|
|
raise exc
|
2024-03-05 07:06:27 +00:00
|
|
|
except LPStreamError as exc:
|
2020-12-09 14:56:40 +00:00
|
|
|
await s.conn.close()
|
|
|
|
raise exc
|
2020-05-20 00:14:15 +00:00
|
|
|
|
2022-08-03 11:33:19 +00:00
|
|
|
method closed*(s: LPChannel): bool =
|
2020-05-20 00:14:15 +00:00
|
|
|
s.closedLocal
|
2019-12-04 04:44:54 +00:00
|
|
|
|
2024-03-05 07:06:27 +00:00
|
|
|
proc closeUnderlying(s: LPChannel): Future[void] {.async: (raises: []).} =
|
2020-11-01 20:49:25 +00:00
|
|
|
## Channels may be closed for reading and writing in any order - we'll close
|
|
|
|
## the underlying bufferstream when both directions are closed
|
2020-11-17 14:59:25 +00:00
|
|
|
if s.closedLocal and s.atEof():
|
2020-11-01 20:49:25 +00:00
|
|
|
await procCall BufferStream(s).close()
|
|
|
|
|
2024-03-05 07:06:27 +00:00
|
|
|
proc reset*(s: LPChannel) {.async: (raises: []).} =
|
2020-09-21 17:48:19 +00:00
|
|
|
if s.isClosed:
|
|
|
|
trace "Already closed", s
|
2020-06-29 15:15:31 +00:00
|
|
|
return
|
|
|
|
|
2020-11-17 14:59:25 +00:00
|
|
|
s.isClosed = true
|
2020-11-23 15:07:11 +00:00
|
|
|
s.closedLocal = true
|
2022-09-14 08:58:41 +00:00
|
|
|
s.localReset = not s.remoteReset
|
2020-11-17 14:59:25 +00:00
|
|
|
|
2020-09-14 08:19:54 +00:00
|
|
|
trace "Resetting channel", s, len = s.len
|
2020-07-27 19:33:51 +00:00
|
|
|
|
2020-11-23 15:07:11 +00:00
|
|
|
if s.isOpen and not s.conn.isClosed:
|
2020-09-21 17:48:19 +00:00
|
|
|
# If the connection is still active, notify the other end
|
2024-03-05 07:06:27 +00:00
|
|
|
proc resetMessage() {.async: (raises: []).} =
|
2020-09-21 17:48:19 +00:00
|
|
|
try:
|
2020-12-09 14:56:40 +00:00
|
|
|
trace "sending reset message", s, conn = s.conn
|
2024-03-05 07:06:27 +00:00
|
|
|
await noCancel s.conn.writeMsg(s.id, s.resetCode) # write reset
|
|
|
|
except LPStreamError as exc:
|
2020-09-21 17:48:19 +00:00
|
|
|
trace "Can't send reset message", s, conn = s.conn, msg = exc.msg
|
2024-03-05 07:06:27 +00:00
|
|
|
await s.conn.close()
|
2020-09-21 17:48:19 +00:00
|
|
|
|
|
|
|
asyncSpawn resetMessage()
|
refactor bufferstream to use a queue (#346)
This change modifies how the backpressure algorithm in bufferstream
works - in particular, instead of working byte-by-byte, it will now work
seq-by-seq.
When data arrives, it usually does so in packets - in the current
bufferstream, the packet is read then split into bytes which are fed one
by one to the bufferstream. On the reading side, the bytes are popped of
the bufferstream, again byte by byte, to satisfy `readOnce` requests -
this introduces a lot of synchronization traffic because the checks for
full buffer and for async event handling must be done for every byte.
In this PR, a queue of length 1 is used instead - this means there will
at most exist one "packet" in `pushTo`, one in the queue and one in the
slush buffer that is used to store incomplete reads.
* avoid byte-by-byte copy to buffer, with synchronization in-between
* reuse AsyncQueue synchronization logic instead of rolling own
* avoid writeHandler callback - implement `write` method instead
* simplify EOF signalling by only setting EOF flag in queue reader (and
reset)
* remove BufferStream pipes (unused)
* fixes drainBuffer deadlock when drain is called from within read loop
and thus blocks draining
* fix lpchannel init order
2020-09-10 06:19:13 +00:00
|
|
|
|
2024-03-05 07:06:27 +00:00
|
|
|
await s.closeImpl()
|
refactor bufferstream to use a queue (#346)
This change modifies how the backpressure algorithm in bufferstream
works - in particular, instead of working byte-by-byte, it will now work
seq-by-seq.
When data arrives, it usually does so in packets - in the current
bufferstream, the packet is read then split into bytes which are fed one
by one to the bufferstream. On the reading side, the bytes are popped of
the bufferstream, again byte by byte, to satisfy `readOnce` requests -
this introduces a lot of synchronization traffic because the checks for
full buffer and for async event handling must be done for every byte.
In this PR, a queue of length 1 is used instead - this means there will
at most exist one "packet" in `pushTo`, one in the queue and one in the
slush buffer that is used to store incomplete reads.
* avoid byte-by-byte copy to buffer, with synchronization in-between
* reuse AsyncQueue synchronization logic instead of rolling own
* avoid writeHandler callback - implement `write` method instead
* simplify EOF signalling by only setting EOF flag in queue reader (and
reset)
* remove BufferStream pipes (unused)
* fixes drainBuffer deadlock when drain is called from within read loop
and thus blocks draining
* fix lpchannel init order
2020-09-10 06:19:13 +00:00
|
|
|
|
2020-09-09 17:12:08 +00:00
|
|
|
trace "Channel reset", s
|
2020-06-29 15:15:31 +00:00
|
|
|
|
2024-03-05 07:06:27 +00:00
|
|
|
method close*(s: LPChannel) {.async: (raises: []).} =
|
2020-09-21 17:48:19 +00:00
|
|
|
## Close channel for writing - a message will be sent to the other peer
|
|
|
|
## informing them that the channel is closed and that we're waiting for
|
|
|
|
## their acknowledgement.
|
2020-11-01 20:49:25 +00:00
|
|
|
if s.closedLocal:
|
|
|
|
trace "Already closed", s
|
|
|
|
return
|
|
|
|
s.closedLocal = true
|
|
|
|
|
|
|
|
trace "Closing channel", s, conn = s.conn, len = s.len
|
|
|
|
|
2020-11-23 15:07:11 +00:00
|
|
|
if s.isOpen and not s.conn.isClosed:
|
2020-11-01 20:49:25 +00:00
|
|
|
try:
|
|
|
|
await s.conn.writeMsg(s.id, s.closeCode) # write close
|
2024-03-05 07:06:27 +00:00
|
|
|
except CancelledError:
|
2020-12-09 14:56:40 +00:00
|
|
|
await s.conn.close()
|
2024-03-05 07:06:27 +00:00
|
|
|
except LPStreamError as exc:
|
2020-11-01 20:49:25 +00:00
|
|
|
# It's harmless that close message cannot be sent - the connection is
|
|
|
|
# likely down already
|
2020-12-09 14:56:40 +00:00
|
|
|
await s.conn.close()
|
2020-11-17 14:59:25 +00:00
|
|
|
trace "Cannot send close message", s, id = s.id, msg = exc.msg
|
2020-11-01 20:49:25 +00:00
|
|
|
|
|
|
|
await s.closeUnderlying() # maybe already eofed
|
|
|
|
|
|
|
|
trace "Closed channel", s, len = s.len
|
2020-07-17 18:44:41 +00:00
|
|
|
|
|
|
|
method initStream*(s: LPChannel) =
|
|
|
|
if s.objName.len == 0:
|
2021-06-14 08:26:11 +00:00
|
|
|
s.objName = LPChannelTrackerName
|
2020-07-17 18:44:41 +00:00
|
|
|
|
2024-03-05 07:06:27 +00:00
|
|
|
s.timeoutHandler = proc(): Future[void] {.async: (raises: [], raw: true).} =
|
refactor bufferstream to use a queue (#346)
This change modifies how the backpressure algorithm in bufferstream
works - in particular, instead of working byte-by-byte, it will now work
seq-by-seq.
When data arrives, it usually does so in packets - in the current
bufferstream, the packet is read then split into bytes which are fed one
by one to the bufferstream. On the reading side, the bytes are popped of
the bufferstream, again byte by byte, to satisfy `readOnce` requests -
this introduces a lot of synchronization traffic because the checks for
full buffer and for async event handling must be done for every byte.
In this PR, a queue of length 1 is used instead - this means there will
at most exist one "packet" in `pushTo`, one in the queue and one in the
slush buffer that is used to store incomplete reads.
* avoid byte-by-byte copy to buffer, with synchronization in-between
* reuse AsyncQueue synchronization logic instead of rolling own
* avoid writeHandler callback - implement `write` method instead
* simplify EOF signalling by only setting EOF flag in queue reader (and
reset)
* remove BufferStream pipes (unused)
* fixes drainBuffer deadlock when drain is called from within read loop
and thus blocks draining
* fix lpchannel init order
2020-09-10 06:19:13 +00:00
|
|
|
trace "Idle timeout expired, resetting LPChannel", s
|
|
|
|
s.reset()
|
2020-07-17 18:44:41 +00:00
|
|
|
|
2020-08-04 13:22:05 +00:00
|
|
|
procCall BufferStream(s).initStream()
|
2020-07-17 18:44:41 +00:00
|
|
|
|
2024-03-05 07:06:27 +00:00
|
|
|
method readOnce*(
|
|
|
|
s: LPChannel,
|
|
|
|
pbytes: pointer,
|
|
|
|
nbytes: int
|
|
|
|
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
|
2020-09-24 16:43:20 +00:00
|
|
|
## Mplex relies on reading being done regularly from every channel, or all
|
|
|
|
## channels are blocked - in particular, this means that reading from one
|
|
|
|
## channel must not be done from within a callback / read handler of another
|
|
|
|
## or the reads will lock each other.
|
2022-09-14 08:58:41 +00:00
|
|
|
if s.remoteReset:
|
|
|
|
raise newLPStreamResetError()
|
|
|
|
if s.localReset:
|
|
|
|
raise newLPStreamClosedError()
|
|
|
|
if s.atEof():
|
|
|
|
raise newLPStreamRemoteClosedError()
|
|
|
|
if s.conn.closed:
|
|
|
|
raise newLPStreamConnDownError()
|
2020-09-21 17:48:19 +00:00
|
|
|
try:
|
|
|
|
let bytes = await procCall BufferStream(s).readOnce(pbytes, nbytes)
|
2021-09-27 12:30:22 +00:00
|
|
|
when defined(libp2p_network_protocols_metrics):
|
2022-08-01 12:31:22 +00:00
|
|
|
if s.protocol.len > 0:
|
|
|
|
libp2p_protocols_bytes.inc(bytes.int64, labelValues=[s.protocol, "in"])
|
2021-07-26 14:12:36 +00:00
|
|
|
|
2020-09-21 17:48:19 +00:00
|
|
|
trace "readOnce", s, bytes
|
|
|
|
if bytes == 0:
|
|
|
|
await s.closeUnderlying()
|
|
|
|
return bytes
|
2024-03-05 07:06:27 +00:00
|
|
|
except CancelledError as exc:
|
|
|
|
await s.reset()
|
|
|
|
raise exc
|
|
|
|
except LPStreamError as exc:
|
|
|
|
# Resetting is necessary because data has been lost in s.readBuf and
|
|
|
|
# there's no way to gracefully recover / use the channel any more
|
2020-11-17 14:59:25 +00:00
|
|
|
await s.reset()
|
2022-09-14 08:58:41 +00:00
|
|
|
raise newLPStreamConnDownError(exc)
|
refactor bufferstream to use a queue (#346)
This change modifies how the backpressure algorithm in bufferstream
works - in particular, instead of working byte-by-byte, it will now work
seq-by-seq.
When data arrives, it usually does so in packets - in the current
bufferstream, the packet is read then split into bytes which are fed one
by one to the bufferstream. On the reading side, the bytes are popped of
the bufferstream, again byte by byte, to satisfy `readOnce` requests -
this introduces a lot of synchronization traffic because the checks for
full buffer and for async event handling must be done for every byte.
In this PR, a queue of length 1 is used instead - this means there will
at most exist one "packet" in `pushTo`, one in the queue and one in the
slush buffer that is used to store incomplete reads.
* avoid byte-by-byte copy to buffer, with synchronization in-between
* reuse AsyncQueue synchronization logic instead of rolling own
* avoid writeHandler callback - implement `write` method instead
* simplify EOF signalling by only setting EOF flag in queue reader (and
reset)
* remove BufferStream pipes (unused)
* fixes drainBuffer deadlock when drain is called from within read loop
and thus blocks draining
* fix lpchannel init order
2020-09-10 06:19:13 +00:00
|
|
|
|
2024-03-05 07:06:27 +00:00
|
|
|
proc prepareWrite(
|
|
|
|
s: LPChannel,
|
|
|
|
msg: seq[byte]
|
|
|
|
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
|
2021-12-14 09:55:17 +00:00
|
|
|
# prepareWrite is the slow path of writing a message - see conditions in
|
|
|
|
# write
|
2022-09-14 08:58:41 +00:00
|
|
|
if s.remoteReset:
|
|
|
|
raise newLPStreamResetError()
|
|
|
|
if s.closedLocal:
|
refactor bufferstream to use a queue (#346)
This change modifies how the backpressure algorithm in bufferstream
works - in particular, instead of working byte-by-byte, it will now work
seq-by-seq.
When data arrives, it usually does so in packets - in the current
bufferstream, the packet is read then split into bytes which are fed one
by one to the bufferstream. On the reading side, the bytes are popped of
the bufferstream, again byte by byte, to satisfy `readOnce` requests -
this introduces a lot of synchronization traffic because the checks for
full buffer and for async event handling must be done for every byte.
In this PR, a queue of length 1 is used instead - this means there will
at most exist one "packet" in `pushTo`, one in the queue and one in the
slush buffer that is used to store incomplete reads.
* avoid byte-by-byte copy to buffer, with synchronization in-between
* reuse AsyncQueue synchronization logic instead of rolling own
* avoid writeHandler callback - implement `write` method instead
* simplify EOF signalling by only setting EOF flag in queue reader (and
reset)
* remove BufferStream pipes (unused)
* fixes drainBuffer deadlock when drain is called from within read loop
and thus blocks draining
* fix lpchannel init order
2020-09-10 06:19:13 +00:00
|
|
|
raise newLPStreamClosedError()
|
2022-09-14 08:58:41 +00:00
|
|
|
if s.conn.closed:
|
|
|
|
raise newLPStreamConnDownError()
|
refactor bufferstream to use a queue (#346)
This change modifies how the backpressure algorithm in bufferstream
works - in particular, instead of working byte-by-byte, it will now work
seq-by-seq.
When data arrives, it usually does so in packets - in the current
bufferstream, the packet is read then split into bytes which are fed one
by one to the bufferstream. On the reading side, the bytes are popped of
the bufferstream, again byte by byte, to satisfy `readOnce` requests -
this introduces a lot of synchronization traffic because the checks for
full buffer and for async event handling must be done for every byte.
In this PR, a queue of length 1 is used instead - this means there will
at most exist one "packet" in `pushTo`, one in the queue and one in the
slush buffer that is used to store incomplete reads.
* avoid byte-by-byte copy to buffer, with synchronization in-between
* reuse AsyncQueue synchronization logic instead of rolling own
* avoid writeHandler callback - implement `write` method instead
* simplify EOF signalling by only setting EOF flag in queue reader (and
reset)
* remove BufferStream pipes (unused)
* fixes drainBuffer deadlock when drain is called from within read loop
and thus blocks draining
* fix lpchannel init order
2020-09-10 06:19:13 +00:00
|
|
|
|
2020-09-24 05:30:19 +00:00
|
|
|
if msg.len == 0:
|
|
|
|
return
|
|
|
|
|
2020-09-24 16:43:20 +00:00
|
|
|
if s.writes >= MaxWrites:
|
|
|
|
debug "Closing connection, too many in-flight writes on channel",
|
|
|
|
s, conn = s.conn, writes = s.writes
|
2022-04-06 14:00:24 +00:00
|
|
|
when defined(libp2p_mplex_metrics):
|
|
|
|
libp2p_mplex_qlenclose.inc()
|
2020-11-06 15:24:24 +00:00
|
|
|
await s.reset()
|
2020-09-24 16:43:20 +00:00
|
|
|
await s.conn.close()
|
|
|
|
return
|
|
|
|
|
2021-12-14 09:55:17 +00:00
|
|
|
if not s.isOpen:
|
|
|
|
await s.open()
|
refactor bufferstream to use a queue (#346)
This change modifies how the backpressure algorithm in bufferstream
works - in particular, instead of working byte-by-byte, it will now work
seq-by-seq.
When data arrives, it usually does so in packets - in the current
bufferstream, the packet is read then split into bytes which are fed one
by one to the bufferstream. On the reading side, the bytes are popped of
the bufferstream, again byte by byte, to satisfy `readOnce` requests -
this introduces a lot of synchronization traffic because the checks for
full buffer and for async event handling must be done for every byte.
In this PR, a queue of length 1 is used instead - this means there will
at most exist one "packet" in `pushTo`, one in the queue and one in the
slush buffer that is used to store incomplete reads.
* avoid byte-by-byte copy to buffer, with synchronization in-between
* reuse AsyncQueue synchronization logic instead of rolling own
* avoid writeHandler callback - implement `write` method instead
* simplify EOF signalling by only setting EOF flag in queue reader (and
reset)
* remove BufferStream pipes (unused)
* fixes drainBuffer deadlock when drain is called from within read loop
and thus blocks draining
* fix lpchannel init order
2020-09-10 06:19:13 +00:00
|
|
|
|
2021-12-14 09:55:17 +00:00
|
|
|
await s.conn.writeMsg(s.id, s.msgCode, msg)
|
refactor bufferstream to use a queue (#346)
This change modifies how the backpressure algorithm in bufferstream
works - in particular, instead of working byte-by-byte, it will now work
seq-by-seq.
When data arrives, it usually does so in packets - in the current
bufferstream, the packet is read then split into bytes which are fed one
by one to the bufferstream. On the reading side, the bytes are popped of
the bufferstream, again byte by byte, to satisfy `readOnce` requests -
this introduces a lot of synchronization traffic because the checks for
full buffer and for async event handling must be done for every byte.
In this PR, a queue of length 1 is used instead - this means there will
at most exist one "packet" in `pushTo`, one in the queue and one in the
slush buffer that is used to store incomplete reads.
* avoid byte-by-byte copy to buffer, with synchronization in-between
* reuse AsyncQueue synchronization logic instead of rolling own
* avoid writeHandler callback - implement `write` method instead
* simplify EOF signalling by only setting EOF flag in queue reader (and
reset)
* remove BufferStream pipes (unused)
* fixes drainBuffer deadlock when drain is called from within read loop
and thus blocks draining
* fix lpchannel init order
2020-09-10 06:19:13 +00:00
|
|
|
|
2021-12-14 09:55:17 +00:00
|
|
|
proc completeWrite(
|
2024-03-05 07:06:27 +00:00
|
|
|
s: LPChannel,
|
|
|
|
fut: Future[void].Raising([CancelledError, LPStreamError]),
|
|
|
|
msgLen: int
|
|
|
|
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
|
2021-12-14 09:55:17 +00:00
|
|
|
try:
|
|
|
|
s.writes += 1
|
2021-07-26 14:12:36 +00:00
|
|
|
|
2022-04-06 14:00:24 +00:00
|
|
|
when defined(libp2p_mplex_metrics):
|
|
|
|
libp2p_mplex_qlen.observe(s.writes.int64 - 1)
|
|
|
|
libp2p_mplex_qtime.time:
|
|
|
|
await fut
|
|
|
|
else:
|
|
|
|
await fut
|
|
|
|
|
2023-04-26 07:52:06 +00:00
|
|
|
when defined(libp2p_network_protocols_metrics):
|
2022-08-01 12:31:22 +00:00
|
|
|
if s.protocol.len > 0:
|
2024-03-04 18:34:09 +00:00
|
|
|
# This crashes on Nim 2.0.2 with `--mm:orc` during `nimble test`
|
|
|
|
# https://github.com/status-im/nim-metrics/issues/79
|
|
|
|
libp2p_protocols_bytes.inc(
|
|
|
|
msgLen.int64, labelValues = [s.protocol, "out"])
|
2021-07-26 14:12:36 +00:00
|
|
|
|
refactor bufferstream to use a queue (#346)
This change modifies how the backpressure algorithm in bufferstream
works - in particular, instead of working byte-by-byte, it will now work
seq-by-seq.
When data arrives, it usually does so in packets - in the current
bufferstream, the packet is read then split into bytes which are fed one
by one to the bufferstream. On the reading side, the bytes are popped of
the bufferstream, again byte by byte, to satisfy `readOnce` requests -
this introduces a lot of synchronization traffic because the checks for
full buffer and for async event handling must be done for every byte.
In this PR, a queue of length 1 is used instead - this means there will
at most exist one "packet" in `pushTo`, one in the queue and one in the
slush buffer that is used to store incomplete reads.
* avoid byte-by-byte copy to buffer, with synchronization in-between
* reuse AsyncQueue synchronization logic instead of rolling own
* avoid writeHandler callback - implement `write` method instead
* simplify EOF signalling by only setting EOF flag in queue reader (and
reset)
* remove BufferStream pipes (unused)
* fixes drainBuffer deadlock when drain is called from within read loop
and thus blocks draining
* fix lpchannel init order
2020-09-10 06:19:13 +00:00
|
|
|
s.activity = true
|
2022-06-24 09:11:23 +00:00
|
|
|
except CancelledError as exc:
|
|
|
|
# Chronos may still send the data
|
|
|
|
raise exc
|
2023-01-06 14:18:16 +00:00
|
|
|
except LPStreamConnDownError as exc:
|
|
|
|
await s.reset()
|
|
|
|
await s.conn.close()
|
|
|
|
raise exc
|
|
|
|
except LPStreamEOFError as exc:
|
2022-06-24 09:11:23 +00:00
|
|
|
raise exc
|
2024-03-05 07:06:27 +00:00
|
|
|
except LPStreamError as exc:
|
2020-09-14 08:19:54 +00:00
|
|
|
trace "exception in lpchannel write handler", s, msg = exc.msg
|
2020-11-06 15:24:24 +00:00
|
|
|
await s.reset()
|
refactor bufferstream to use a queue (#346)
This change modifies how the backpressure algorithm in bufferstream
works - in particular, instead of working byte-by-byte, it will now work
seq-by-seq.
When data arrives, it usually does so in packets - in the current
bufferstream, the packet is read then split into bytes which are fed one
by one to the bufferstream. On the reading side, the bytes are popped of
the bufferstream, again byte by byte, to satisfy `readOnce` requests -
this introduces a lot of synchronization traffic because the checks for
full buffer and for async event handling must be done for every byte.
In this PR, a queue of length 1 is used instead - this means there will
at most exist one "packet" in `pushTo`, one in the queue and one in the
slush buffer that is used to store incomplete reads.
* avoid byte-by-byte copy to buffer, with synchronization in-between
* reuse AsyncQueue synchronization logic instead of rolling own
* avoid writeHandler callback - implement `write` method instead
* simplify EOF signalling by only setting EOF flag in queue reader (and
reset)
* remove BufferStream pipes (unused)
* fixes drainBuffer deadlock when drain is called from within read loop
and thus blocks draining
* fix lpchannel init order
2020-09-10 06:19:13 +00:00
|
|
|
await s.conn.close()
|
2022-09-14 08:58:41 +00:00
|
|
|
raise newLPStreamConnDownError(exc)
|
2020-09-24 16:43:20 +00:00
|
|
|
finally:
|
|
|
|
s.writes -= 1
|
refactor bufferstream to use a queue (#346)
This change modifies how the backpressure algorithm in bufferstream
works - in particular, instead of working byte-by-byte, it will now work
seq-by-seq.
When data arrives, it usually does so in packets - in the current
bufferstream, the packet is read then split into bytes which are fed one
by one to the bufferstream. On the reading side, the bytes are popped of
the bufferstream, again byte by byte, to satisfy `readOnce` requests -
this introduces a lot of synchronization traffic because the checks for
full buffer and for async event handling must be done for every byte.
In this PR, a queue of length 1 is used instead - this means there will
at most exist one "packet" in `pushTo`, one in the queue and one in the
slush buffer that is used to store incomplete reads.
* avoid byte-by-byte copy to buffer, with synchronization in-between
* reuse AsyncQueue synchronization logic instead of rolling own
* avoid writeHandler callback - implement `write` method instead
* simplify EOF signalling by only setting EOF flag in queue reader (and
reset)
* remove BufferStream pipes (unused)
* fixes drainBuffer deadlock when drain is called from within read loop
and thus blocks draining
* fix lpchannel init order
2020-09-10 06:19:13 +00:00
|
|
|
|
2024-03-05 07:06:27 +00:00
|
|
|
method write*(
|
|
|
|
s: LPChannel,
|
|
|
|
msg: seq[byte]
|
|
|
|
): Future[void] {.async: (raises: [
|
|
|
|
CancelledError, LPStreamError], raw: true).} =
|
2021-12-14 09:55:17 +00:00
|
|
|
## Write to mplex channel - there may be up to MaxWrite concurrent writes
|
|
|
|
## pending after which the peer is disconnected
|
|
|
|
|
|
|
|
let
|
|
|
|
closed = s.closedLocal or s.conn.closed
|
|
|
|
|
|
|
|
let fut =
|
|
|
|
if (not closed) and msg.len > 0 and s.writes < MaxWrites and s.isOpen:
|
|
|
|
# Fast path: Avoid a copy of msg being kept in the closure created by
|
|
|
|
# `{.async.}` as this drives up memory usage - the conditions are laid out
|
|
|
|
# in prepareWrite
|
|
|
|
s.conn.writeMsg(s.id, s.msgCode, msg)
|
|
|
|
else:
|
|
|
|
prepareWrite(s, msg)
|
|
|
|
|
|
|
|
s.completeWrite(fut, msg.len)
|
|
|
|
|
2022-08-01 12:31:22 +00:00
|
|
|
method getWrapped*(s: LPChannel): Connection = s.conn
|
|
|
|
|
2020-07-17 18:44:41 +00:00
|
|
|
proc init*(
|
2024-03-05 07:06:27 +00:00
|
|
|
L: type LPChannel,
|
|
|
|
id: uint64,
|
|
|
|
conn: Connection,
|
|
|
|
initiator: bool,
|
|
|
|
name: string = "",
|
|
|
|
timeout: Duration = DefaultChanTimeout): LPChannel =
|
2020-07-17 18:44:41 +00:00
|
|
|
let chann = L(
|
|
|
|
id: id,
|
|
|
|
name: name,
|
|
|
|
conn: conn,
|
|
|
|
initiator: initiator,
|
|
|
|
timeout: timeout,
|
2020-09-21 17:48:19 +00:00
|
|
|
isOpen: if initiator: false else: true,
|
2020-07-17 18:44:41 +00:00
|
|
|
msgCode: if initiator: MessageType.MsgOut else: MessageType.MsgIn,
|
|
|
|
closeCode: if initiator: MessageType.CloseOut else: MessageType.CloseIn,
|
|
|
|
resetCode: if initiator: MessageType.ResetOut else: MessageType.ResetIn,
|
|
|
|
dir: if initiator: Direction.Out else: Direction.In)
|
|
|
|
|
refactor bufferstream to use a queue (#346)
This change modifies how the backpressure algorithm in bufferstream
works - in particular, instead of working byte-by-byte, it will now work
seq-by-seq.
When data arrives, it usually does so in packets - in the current
bufferstream, the packet is read then split into bytes which are fed one
by one to the bufferstream. On the reading side, the bytes are popped of
the bufferstream, again byte by byte, to satisfy `readOnce` requests -
this introduces a lot of synchronization traffic because the checks for
full buffer and for async event handling must be done for every byte.
In this PR, a queue of length 1 is used instead - this means there will
at most exist one "packet" in `pushTo`, one in the queue and one in the
slush buffer that is used to store incomplete reads.
* avoid byte-by-byte copy to buffer, with synchronization in-between
* reuse AsyncQueue synchronization logic instead of rolling own
* avoid writeHandler callback - implement `write` method instead
* simplify EOF signalling by only setting EOF flag in queue reader (and
reset)
* remove BufferStream pipes (unused)
* fixes drainBuffer deadlock when drain is called from within read loop
and thus blocks draining
* fix lpchannel init order
2020-09-10 06:19:13 +00:00
|
|
|
chann.initStream()
|
2020-07-17 18:44:41 +00:00
|
|
|
|
|
|
|
when chronicles.enabledLogLevel == LogLevel.TRACE:
|
|
|
|
chann.name = if chann.name.len > 0: chann.name else: $chann.oid
|
|
|
|
|
2020-11-23 21:02:23 +00:00
|
|
|
trace "Created new lpchannel", s = chann, id, initiator
|
2020-07-17 18:44:41 +00:00
|
|
|
|
|
|
|
return chann
|