fast path for writes (#659)

avoids several copies of the various message buffers being kept alive
for the lifetime of the future
This commit is contained in:
Jacek Sieka 2021-12-14 10:55:17 +01:00 committed by GitHub
parent 47a35e26d7
commit c49932b55a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 65 additions and 35 deletions

View File

@ -175,9 +175,9 @@ method readOnce*(s: LPChannel,
await s.reset()
raise exc
method write*(s: LPChannel, msg: seq[byte]): Future[void] {.async.} =
## Write to mplex channel - there may be up to MaxWrite concurrent writes
## pending after which the peer is disconnected
proc prepareWrite(s: LPChannel, msg: seq[byte]): Future[void] {.async.} =
# prepareWrite is the slow path of writing a message - see conditions in
# write
if s.closedLocal or s.conn.closed:
raise newLPStreamClosedError()
@ -191,19 +191,20 @@ method write*(s: LPChannel, msg: seq[byte]): Future[void] {.async.} =
await s.conn.close()
return
s.writes += 1
try:
if not s.isOpen:
await s.open()
# writes should happen in sequence
trace "write msg", s, conn = s.conn, len = msg.len
await s.conn.writeMsg(s.id, s.msgCode, msg)
proc completeWrite(
s: LPChannel, fut: Future[void], msgLen: int): Future[void] {.async.} =
try:
s.writes += 1
await fut
when defined(libp2p_network_protocols_metrics):
if s.tag.len > 0:
libp2p_protocols_bytes.inc(msg.len.int64, labelValues=[s.tag, "out"])
libp2p_protocols_bytes.inc(msgLen.int64, labelValues=[s.tag, "out"])
s.activity = true
except CatchableError as exc:
@ -214,6 +215,24 @@ method write*(s: LPChannel, msg: seq[byte]): Future[void] {.async.} =
finally:
s.writes -= 1
method write*(s: LPChannel, msg: seq[byte]): Future[void] =
## Write to mplex channel - there may be up to MaxWrite concurrent writes
## pending after which the peer is disconnected
let
closed = s.closedLocal or s.conn.closed
let fut =
if (not closed) and msg.len > 0 and s.writes < MaxWrites and s.isOpen:
# Fast path: Avoid a copy of msg being kept in the closure created by
# `{.async.}` as this drives up memory usage - the conditions are laid out
# in prepareWrite
s.conn.writeMsg(s.id, s.msgCode, msg)
else:
prepareWrite(s, msg)
s.completeWrite(fut, msg.len)
proc init*(
L: type LPChannel,
id: uint64,

View File

@ -460,10 +460,8 @@ proc encryptFrame(
cipherFrame[2 + src.len()..<cipherFrame.len] = tag
method write*(sconn: NoiseConnection, message: seq[byte]): Future[void] {.async.} =
if message.len == 0:
return
method write*(sconn: NoiseConnection, message: seq[byte]): Future[void] =
# Fast path: `{.async.}` would introduce a copy of `message`
const FramingSize = 2 + sizeof(ChaChaPolyTag)
let
@ -479,10 +477,16 @@ method write*(sconn: NoiseConnection, message: seq[byte]): Future[void] {.async.
let
chunkSize = min(MaxPlainSize, left)
try:
encryptFrame(
sconn,
cipherFrames.toOpenArray(woffset, woffset + chunkSize + FramingSize - 1),
message.toOpenArray(offset, offset + chunkSize - 1))
except NoiseNonceMaxError as exc:
debug "Noise nonce exceeded"
let fut = newFuture[void]("noise.write.nonce")
fut.fail(exc)
return fut
when defined(libp2p_dump):
dumpMessage(
@ -492,9 +496,12 @@ method write*(sconn: NoiseConnection, message: seq[byte]): Future[void] {.async.
left = left - chunkSize
offset += chunkSize
woffset += chunkSize + FramingSize
sconn.activity = true
await sconn.stream.write(cipherFrames)
# Write all `cipherFrames` in a single write, to avoid interleaving /
# sequencing issues
sconn.stream.write(cipherFrames)
method handshake*(p: Noise, conn: Connection, initiator: bool): Future[SecureConn] {.async.} =
trace "Starting Noise handshake", conn, initiator

View File

@ -94,7 +94,6 @@ when defined(libp2p_agents_metrics):
method readOnce*(s: ChronosStream, pbytes: pointer, nbytes: int): Future[int] {.async.} =
if s.atEof:
raise newLPStreamEOFError()
withExceptions:
result = await s.client.readOnce(pbytes, nbytes)
s.activity = true # reset activity flag
@ -104,31 +103,36 @@ method readOnce*(s: ChronosStream, pbytes: pointer, nbytes: int): Future[int] {.
if s.tracked:
libp2p_peers_traffic_read.inc(nbytes.int64, labelValues = [s.shortAgent])
method write*(s: ChronosStream, msg: seq[byte]) {.async.} =
if s.closed:
raise newLPStreamClosedError()
if msg.len == 0:
return
proc completeWrite(
s: ChronosStream, fut: Future[int], msgLen: int): Future[void] {.async.} =
withExceptions:
# StreamTransport will only return written < msg.len on fatal failures where
# further writing is not possible - in such cases, we'll raise here,
# since we don't return partial writes lengths
var written = await s.client.write(msg)
var written = await fut
if written < msg.len:
if written < msgLen:
raise (ref LPStreamClosedError)(msg: "Write couldn't finish writing")
s.activity = true # reset activity flag
libp2p_network_bytes.inc(msg.len.int64, labelValues = ["out"])
libp2p_network_bytes.inc(msgLen.int64, labelValues = ["out"])
when defined(libp2p_agents_metrics):
s.trackPeerIdentity()
if s.tracked:
libp2p_peers_traffic_write.inc(msg.len.int64, labelValues = [s.shortAgent])
libp2p_peers_traffic_write.inc(msgLen.int64, labelValues = [s.shortAgent])
method write*(s: ChronosStream, msg: seq[byte]): Future[void] =
# Avoid a copy of msg being kept in the closure created by `{.async.}` as this
# drives up memory usage
if s.closed:
let fut = newFuture[void]("chronosstream.write.closed")
fut.fail(newLPStreamClosedError())
return fut
s.completeWrite(s.client.write(msg), msg.len)
method closed*(s: ChronosStream): bool =
result = s.client.closed
s.client.closed
method atEof*(s: ChronosStream): bool =
s.client.atEof()