simplify mplex (#327)
* less async * less copying of data * less redundant cleanup
This commit is contained in:
parent
9c7e055310
commit
397f9edfd4
|
@ -46,7 +46,8 @@ proc writeMsg*(conn: Connection,
|
||||||
id: uint64,
|
id: uint64,
|
||||||
msgType: MessageType,
|
msgType: MessageType,
|
||||||
data: seq[byte] = @[]) {.async, gcsafe.} =
|
data: seq[byte] = @[]) {.async, gcsafe.} =
|
||||||
trace "sending data over mplex", id,
|
trace "sending data over mplex", oid = $conn.oid,
|
||||||
|
id,
|
||||||
msgType,
|
msgType,
|
||||||
data = data.len
|
data = data.len
|
||||||
var
|
var
|
||||||
|
@ -55,15 +56,14 @@ proc writeMsg*(conn: Connection,
|
||||||
while left > 0 or data.len == 0:
|
while left > 0 or data.len == 0:
|
||||||
let
|
let
|
||||||
chunkSize = if left > MaxMsgSize: MaxMsgSize - 64 else: left
|
chunkSize = if left > MaxMsgSize: MaxMsgSize - 64 else: left
|
||||||
chunk = if chunkSize > 0 : data[offset..(offset + chunkSize - 1)] else: data
|
|
||||||
## write length prefixed
|
## write length prefixed
|
||||||
var buf = initVBuffer()
|
var buf = initVBuffer()
|
||||||
buf.writePBVarint(id shl 3 or ord(msgType).uint64)
|
buf.writePBVarint(id shl 3 or ord(msgType).uint64)
|
||||||
buf.writePBVarint(chunkSize.uint64) # size should be always sent
|
buf.writeSeq(data.toOpenArray(offset, offset + chunkSize - 1))
|
||||||
buf.finish()
|
buf.finish()
|
||||||
left = left - chunkSize
|
left = left - chunkSize
|
||||||
offset = offset + chunkSize
|
offset = offset + chunkSize
|
||||||
await conn.write(buf.buffer & chunk)
|
await conn.write(buf.buffer)
|
||||||
|
|
||||||
if data.len == 0:
|
if data.len == 0:
|
||||||
return
|
return
|
||||||
|
|
|
@ -268,7 +268,7 @@ proc init*(
|
||||||
await chann.open()
|
await chann.open()
|
||||||
|
|
||||||
# writes should happen in sequence
|
# writes should happen in sequence
|
||||||
trace "sending data"
|
trace "sending data", len = data.len
|
||||||
|
|
||||||
await conn.writeMsg(chann.id,
|
await conn.writeMsg(chann.id,
|
||||||
chann.msgCode,
|
chann.msgCode,
|
||||||
|
|
|
@ -34,10 +34,8 @@ type
|
||||||
TooManyChannels* = object of CatchableError
|
TooManyChannels* = object of CatchableError
|
||||||
|
|
||||||
Mplex* = ref object of Muxer
|
Mplex* = ref object of Muxer
|
||||||
remote: Table[uint64, LPChannel]
|
channels: array[bool, Table[uint64, LPChannel]]
|
||||||
local: Table[uint64, LPChannel]
|
currentId: uint64
|
||||||
currentId*: uint64
|
|
||||||
maxChannels*: uint64
|
|
||||||
inChannTimeout: Duration
|
inChannTimeout: Duration
|
||||||
outChannTimeout: Duration
|
outChannTimeout: Duration
|
||||||
isClosed: bool
|
isClosed: bool
|
||||||
|
@ -47,13 +45,17 @@ type
|
||||||
proc newTooManyChannels(): ref TooManyChannels =
|
proc newTooManyChannels(): ref TooManyChannels =
|
||||||
newException(TooManyChannels, "max allowed channel count exceeded")
|
newException(TooManyChannels, "max allowed channel count exceeded")
|
||||||
|
|
||||||
proc getChannelList(m: Mplex, initiator: bool): var Table[uint64, LPChannel] =
|
proc cleanupChann(m: Mplex, chann: LPChannel) {.async, inline.} =
|
||||||
if initiator:
|
## remove the local channel from the internal tables
|
||||||
trace "picking local channels", initiator = initiator, oid = $m.oid
|
##
|
||||||
result = m.local
|
await chann.join()
|
||||||
else:
|
m.channels[chann.initiator].del(chann.id)
|
||||||
trace "picking remote channels", initiator = initiator, oid = $m.oid
|
trace "cleaned up channel", id = chann.id, oid = $chann.oid
|
||||||
result = m.remote
|
|
||||||
|
when defined(libp2p_expensive_metrics):
|
||||||
|
libp2p_mplex_channels.set(
|
||||||
|
m.channels[chann.initiator].len.int64,
|
||||||
|
labelValues = [$chann.initiator, $m.connection.peerInfo])
|
||||||
|
|
||||||
proc newStreamInternal*(m: Mplex,
|
proc newStreamInternal*(m: Mplex,
|
||||||
initiator: bool = true,
|
initiator: bool = true,
|
||||||
|
@ -61,7 +63,7 @@ proc newStreamInternal*(m: Mplex,
|
||||||
name: string = "",
|
name: string = "",
|
||||||
lazy: bool = false,
|
lazy: bool = false,
|
||||||
timeout: Duration):
|
timeout: Duration):
|
||||||
Future[LPChannel] {.async, gcsafe.} =
|
LPChannel {.gcsafe.} =
|
||||||
## create new channel/stream
|
## create new channel/stream
|
||||||
##
|
##
|
||||||
let id = if initiator:
|
let id = if initiator:
|
||||||
|
@ -83,29 +85,17 @@ proc newStreamInternal*(m: Mplex,
|
||||||
result.peerInfo = m.connection.peerInfo
|
result.peerInfo = m.connection.peerInfo
|
||||||
result.observedAddr = m.connection.observedAddr
|
result.observedAddr = m.connection.observedAddr
|
||||||
|
|
||||||
doAssert(id notin m.getChannelList(initiator),
|
doAssert(id notin m.channels[initiator],
|
||||||
"channel slot already taken!")
|
"channel slot already taken!")
|
||||||
|
|
||||||
m.getChannelList(initiator)[id] = result
|
m.channels[initiator][id] = result
|
||||||
when defined(libp2p_expensive_metrics):
|
|
||||||
libp2p_mplex_channels.set(
|
|
||||||
m.getChannelList(initiator).len.int64,
|
|
||||||
labelValues = [$initiator,
|
|
||||||
$m.connection.peerInfo])
|
|
||||||
|
|
||||||
proc cleanupChann(m: Mplex, chann: LPChannel) {.async, inline.} =
|
asyncCheck m.cleanupChann(result)
|
||||||
## remove the local channel from the internal tables
|
|
||||||
##
|
|
||||||
await chann.join()
|
|
||||||
if not isNil(chann):
|
|
||||||
m.getChannelList(chann.initiator).del(chann.id)
|
|
||||||
trace "cleaned up channel", id = chann.id
|
|
||||||
|
|
||||||
when defined(libp2p_expensive_metrics):
|
when defined(libp2p_expensive_metrics):
|
||||||
libp2p_mplex_channels.set(
|
libp2p_mplex_channels.set(
|
||||||
m.getChannelList(chann.initiator).len.int64,
|
m.channels[initiator].len.int64,
|
||||||
labelValues = [$chann.initiator,
|
labelValues = [$initiator, $m.connection.peerInfo])
|
||||||
$m.connection.peerInfo])
|
|
||||||
|
|
||||||
proc handleStream(m: Mplex, chann: LPChannel) {.async.} =
|
proc handleStream(m: Mplex, chann: LPChannel) {.async.} =
|
||||||
## call the muxer stream handler for this channel
|
## call the muxer stream handler for this channel
|
||||||
|
@ -121,99 +111,75 @@ proc handleStream(m: Mplex, chann: LPChannel) {.async.} =
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "exception in stream handler", exc = exc.msg
|
trace "exception in stream handler", exc = exc.msg
|
||||||
await chann.reset()
|
await chann.reset()
|
||||||
await m.cleanupChann(chann)
|
|
||||||
|
|
||||||
method handle*(m: Mplex) {.async, gcsafe.} =
|
method handle*(m: Mplex) {.async, gcsafe.} =
|
||||||
trace "starting mplex main loop", oid = $m.oid
|
logScope: moid = $m.oid
|
||||||
|
|
||||||
|
trace "starting mplex main loop"
|
||||||
try:
|
try:
|
||||||
defer:
|
defer:
|
||||||
trace "stopping mplex main loop", oid = $m.oid
|
trace "stopping mplex main loop"
|
||||||
await m.close()
|
await m.close()
|
||||||
|
|
||||||
while not m.connection.atEof:
|
while not m.connection.atEof:
|
||||||
trace "waiting for data", oid = $m.oid
|
trace "waiting for data"
|
||||||
let (id, msgType, data) = await m.connection.readMsg()
|
let
|
||||||
trace "read message from connection", id = id,
|
(id, msgType, data) = await m.connection.readMsg()
|
||||||
msgType = msgType,
|
initiator = bool(ord(msgType) and 1)
|
||||||
data = data.shortLog,
|
|
||||||
oid = $m.oid
|
|
||||||
|
|
||||||
let initiator = bool(ord(msgType) and 1)
|
|
||||||
var channel: LPChannel
|
|
||||||
if MessageType(msgType) != MessageType.New:
|
|
||||||
let channels = m.getChannelList(initiator)
|
|
||||||
if id notin channels:
|
|
||||||
|
|
||||||
trace "Channel not found, skipping", id = id,
|
|
||||||
initiator = initiator,
|
|
||||||
msg = msgType,
|
|
||||||
oid = $m.oid
|
|
||||||
continue
|
|
||||||
channel = channels[id]
|
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
id = id
|
id = id
|
||||||
initiator = initiator
|
initiator = initiator
|
||||||
msgType = msgType
|
msgType = msgType
|
||||||
size = data.len
|
size = data.len
|
||||||
muxer_oid = $m.oid
|
|
||||||
|
|
||||||
case msgType:
|
trace "read message from connection", data = data.shortLog
|
||||||
of MessageType.New:
|
|
||||||
let name = string.fromBytes(data)
|
var channel =
|
||||||
if m.getChannelList(false).len > m.maxChannCount - 1:
|
if MessageType(msgType) != MessageType.New:
|
||||||
|
let tmp = m.channels[initiator].getOrDefault(id, nil)
|
||||||
|
if tmp == nil:
|
||||||
|
trace "Channel not found, skipping"
|
||||||
|
continue
|
||||||
|
|
||||||
|
tmp
|
||||||
|
else:
|
||||||
|
if m.channels[false].len > m.maxChannCount - 1:
|
||||||
warn "too many channels created by remote peer", allowedMax = MaxChannelCount
|
warn "too many channels created by remote peer", allowedMax = MaxChannelCount
|
||||||
raise newTooManyChannels()
|
raise newTooManyChannels()
|
||||||
|
|
||||||
channel = await m.newStreamInternal(
|
let name = string.fromBytes(data)
|
||||||
false,
|
m.newStreamInternal(false, id, name, timeout = m.outChannTimeout)
|
||||||
id,
|
|
||||||
name,
|
|
||||||
timeout = m.outChannTimeout)
|
|
||||||
|
|
||||||
trace "created channel", name = channel.name,
|
logScope:
|
||||||
|
name = channel.name
|
||||||
oid = $channel.oid
|
oid = $channel.oid
|
||||||
|
|
||||||
|
case msgType:
|
||||||
|
of MessageType.New:
|
||||||
|
trace "created channel"
|
||||||
|
|
||||||
if not isNil(m.streamHandler):
|
if not isNil(m.streamHandler):
|
||||||
# launch handler task
|
# launch handler task
|
||||||
asyncCheck m.handleStream(channel)
|
asyncCheck m.handleStream(channel)
|
||||||
|
|
||||||
of MessageType.MsgIn, MessageType.MsgOut:
|
of MessageType.MsgIn, MessageType.MsgOut:
|
||||||
logScope:
|
|
||||||
name = channel.name
|
|
||||||
oid = $channel.oid
|
|
||||||
|
|
||||||
trace "pushing data to channel"
|
|
||||||
|
|
||||||
if data.len > MaxMsgSize:
|
if data.len > MaxMsgSize:
|
||||||
warn "attempting to send a packet larger than allowed", allowed = MaxMsgSize,
|
warn "attempting to send a packet larger than allowed", allowed = MaxMsgSize
|
||||||
sending = data.len
|
|
||||||
raise newLPStreamLimitError()
|
raise newLPStreamLimitError()
|
||||||
|
|
||||||
|
trace "pushing data to channel"
|
||||||
await channel.pushTo(data)
|
await channel.pushTo(data)
|
||||||
|
trace "pushed data to channel"
|
||||||
|
|
||||||
of MessageType.CloseIn, MessageType.CloseOut:
|
of MessageType.CloseIn, MessageType.CloseOut:
|
||||||
logScope:
|
|
||||||
name = channel.name
|
|
||||||
oid = $channel.oid
|
|
||||||
|
|
||||||
trace "closing channel"
|
trace "closing channel"
|
||||||
|
|
||||||
await channel.closeRemote()
|
await channel.closeRemote()
|
||||||
await m.cleanupChann(channel)
|
trace "closed channel"
|
||||||
|
|
||||||
trace "deleted channel"
|
|
||||||
of MessageType.ResetIn, MessageType.ResetOut:
|
of MessageType.ResetIn, MessageType.ResetOut:
|
||||||
logScope:
|
|
||||||
name = channel.name
|
|
||||||
oid = $channel.oid
|
|
||||||
|
|
||||||
trace "resetting channel"
|
trace "resetting channel"
|
||||||
|
|
||||||
await channel.reset()
|
await channel.reset()
|
||||||
await m.cleanupChann(channel)
|
trace "reset channel"
|
||||||
|
|
||||||
trace "deleted channel"
|
|
||||||
except CancelledError as exc:
|
except CancelledError as exc:
|
||||||
raise exc
|
raise exc
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
|
@ -221,45 +187,41 @@ method handle*(m: Mplex) {.async, gcsafe.} =
|
||||||
|
|
||||||
proc init*(M: type Mplex,
|
proc init*(M: type Mplex,
|
||||||
conn: Connection,
|
conn: Connection,
|
||||||
maxChanns: uint = MaxChannels,
|
|
||||||
inTimeout, outTimeout: Duration = DefaultChanTimeout,
|
inTimeout, outTimeout: Duration = DefaultChanTimeout,
|
||||||
maxChannCount: int = MaxChannelCount): Mplex =
|
maxChannCount: int = MaxChannelCount): Mplex =
|
||||||
M(connection: conn,
|
M(connection: conn,
|
||||||
maxChannels: maxChanns,
|
|
||||||
inChannTimeout: inTimeout,
|
inChannTimeout: inTimeout,
|
||||||
outChannTimeout: outTimeout,
|
outChannTimeout: outTimeout,
|
||||||
remote: initTable[uint64, LPChannel](),
|
|
||||||
local: initTable[uint64, LPChannel](),
|
|
||||||
oid: genOid(),
|
oid: genOid(),
|
||||||
maxChannCount: maxChannCount)
|
maxChannCount: maxChannCount)
|
||||||
|
|
||||||
method newStream*(m: Mplex,
|
method newStream*(m: Mplex,
|
||||||
name: string = "",
|
name: string = "",
|
||||||
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
|
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
|
||||||
let channel = await m.newStreamInternal(
|
let channel = m.newStreamInternal(
|
||||||
lazy = lazy, timeout = m.inChannTimeout)
|
lazy = lazy, timeout = m.inChannTimeout)
|
||||||
|
|
||||||
if not lazy:
|
if not lazy:
|
||||||
await channel.open()
|
await channel.open()
|
||||||
|
|
||||||
asyncCheck m.cleanupChann(channel)
|
|
||||||
return Connection(channel)
|
return Connection(channel)
|
||||||
|
|
||||||
method close*(m: Mplex) {.async, gcsafe.} =
|
method close*(m: Mplex) {.async, gcsafe.} =
|
||||||
if m.isClosed:
|
if m.isClosed:
|
||||||
return
|
return
|
||||||
|
|
||||||
defer:
|
trace "closing mplex muxer", moid = $m.oid
|
||||||
m.remote.clear()
|
|
||||||
m.local.clear()
|
|
||||||
m.isClosed = true
|
m.isClosed = true
|
||||||
|
|
||||||
trace "closing mplex muxer", oid = $m.oid
|
let channs = toSeq(m.channels[false].values) & toSeq(m.channels[true].values)
|
||||||
let channs = toSeq(m.remote.values) &
|
|
||||||
toSeq(m.local.values)
|
|
||||||
|
|
||||||
for chann in channs:
|
for chann in channs:
|
||||||
await chann.reset()
|
await chann.reset()
|
||||||
await m.cleanupChann(chann)
|
|
||||||
|
|
||||||
await m.connection.close()
|
await m.connection.close()
|
||||||
|
|
||||||
|
# TODO while we're resetting, new channels may be created that will not be
|
||||||
|
# closed properly
|
||||||
|
m.channels[false].clear()
|
||||||
|
m.channels[true].clear()
|
||||||
|
|
|
@ -11,7 +11,6 @@ import chronos
|
||||||
|
|
||||||
# https://github.com/libp2p/specs/tree/master/mplex#writing-to-a-stream
|
# https://github.com/libp2p/specs/tree/master/mplex#writing-to-a-stream
|
||||||
const MaxMsgSize* = 1 shl 20 # 1mb
|
const MaxMsgSize* = 1 shl 20 # 1mb
|
||||||
const MaxChannels* = 1000
|
|
||||||
const MplexCodec* = "/mplex/6.7.0"
|
const MplexCodec* = "/mplex/6.7.0"
|
||||||
const MaxReadWriteTime* = 5.seconds
|
const MaxReadWriteTime* = 5.seconds
|
||||||
|
|
||||||
|
|
|
@ -204,7 +204,7 @@ proc drainBuffer*(s: BufferStream) {.async.} =
|
||||||
## wait for all data in the buffer to be consumed
|
## wait for all data in the buffer to be consumed
|
||||||
##
|
##
|
||||||
|
|
||||||
trace "draining buffer", len = s.len
|
trace "draining buffer", len = s.len, oid = $s.oid
|
||||||
while s.len > 0:
|
while s.len > 0:
|
||||||
await s.dataReadEvent.wait()
|
await s.dataReadEvent.wait()
|
||||||
s.dataReadEvent.clear()
|
s.dataReadEvent.clear()
|
||||||
|
@ -306,7 +306,8 @@ method close*(s: BufferStream) {.async, gcsafe.} =
|
||||||
inc getBufferStreamTracker().closed
|
inc getBufferStreamTracker().closed
|
||||||
trace "bufferstream closed", oid = $s.oid
|
trace "bufferstream closed", oid = $s.oid
|
||||||
else:
|
else:
|
||||||
trace "attempt to close an already closed bufferstream", trace = getStackTrace()
|
trace "attempt to close an already closed bufferstream",
|
||||||
|
trace = getStackTrace(), oid = $s.oid
|
||||||
except CancelledError as exc:
|
except CancelledError as exc:
|
||||||
raise exc
|
raise exc
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
|
|
Loading…
Reference in New Issue