nim-libp2p-experimental/libp2p/muxers/mplex/mplex.nim

158 lines
5.5 KiB
Nim
Raw Normal View History

2019-09-03 20:40:51 +00:00
## Nim-LibP2P
2019-09-24 17:48:23 +00:00
## Copyright (c) 2019 Status Research & Development GmbH
2019-09-03 20:40:51 +00:00
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
## TODO:
## Timeouts and message limits are still missing
## they need to be added ASAP
2019-09-04 16:40:05 +00:00
import tables, sequtils, options
2019-09-09 17:33:32 +00:00
import chronos, chronicles
import ../muxer,
../../connection,
../../stream/lpstream,
coder,
types,
lpchannel
2019-09-03 20:40:51 +00:00
2019-09-10 02:15:52 +00:00
logScope:
2019-09-12 02:10:38 +00:00
topic = "Mplex"
2019-09-10 02:15:52 +00:00
2020-02-12 14:37:22 +00:00
const DefaultRWTimeout = InfiniteDuration
2019-09-04 03:08:51 +00:00
type
2019-09-03 20:40:51 +00:00
Mplex* = ref object of Muxer
2019-09-12 17:07:34 +00:00
remote*: Table[uint, LPChannel]
local*: Table[uint, LPChannel]
currentId*: uint
2019-09-03 20:40:51 +00:00
maxChannels*: uint
2019-09-12 17:07:34 +00:00
proc getChannelList(m: Mplex, initiator: bool): var Table[uint, LPChannel] =
2019-09-04 01:42:00 +00:00
if initiator:
trace "picking local channels", initiator = initiator
2019-09-03 20:40:51 +00:00
result = m.local
else:
trace "picking remote channels", initiator = initiator
result = m.remote
2019-09-03 20:40:51 +00:00
2019-09-04 01:42:00 +00:00
proc newStreamInternal*(m: Mplex,
initiator: bool = true,
chanId: uint = 0,
name: string = "",
lazy: bool = false):
Future[LPChannel] {.async, gcsafe.} =
2019-09-03 20:40:51 +00:00
## create new channel/stream
2019-09-04 01:42:00 +00:00
let id = if initiator: m.currentId.inc(); m.currentId else: chanId
trace "creating new channel", channelId = id, initiator = initiator
result = newChannel(id, m.connection, initiator, name, lazy = lazy)
2019-09-04 01:42:00 +00:00
m.getChannelList(initiator)[id] = result
2019-09-03 20:40:51 +00:00
proc cleanupChann(m: Mplex, chann: LPChannel, initiator: bool) {.async, inline.} =
## call the channel's `close` to signal the
## remote that the channel is closing
if not isNil(chann) and not chann.closed:
await chann.close()
await chann.cleanUp()
m.getChannelList(initiator).del(chann.id)
trace "cleaned up channel", id = chann.id
method handle*(m: Mplex) {.async, gcsafe.} =
trace "starting mplex main loop"
try:
2019-09-04 01:42:00 +00:00
while not m.connection.closed:
trace "waiting for data"
2020-02-12 14:37:22 +00:00
let (id, msgType, data) = await m.connection.readMsg()
trace "read message from connection", id = id,
msgType = msgType,
data = data
let initiator = bool(ord(msgType) and 1)
2019-09-12 17:07:34 +00:00
var channel: LPChannel
if MessageType(msgType) != MessageType.New:
let channels = m.getChannelList(initiator)
if id notin channels:
trace "Channel not found, skipping", id = id,
initiator = initiator,
msg = msgType
2019-09-08 06:34:08 +00:00
continue
channel = channels[id]
case msgType:
of MessageType.New:
2019-09-08 06:34:08 +00:00
let name = cast[string](data)
channel = await m.newStreamInternal(false, id, name)
trace "created channel", id = id, name = name, inititator = true
if not isNil(m.streamHandler):
2019-09-11 19:04:27 +00:00
let stream = newConnection(channel)
stream.peerInfo = m.connection.peerInfo
# cleanup channel once handler is finished
# stream.closeEvent.wait().addCallback(
# proc(udata: pointer) =
# asyncCheck cleanupChann(m, channel, initiator))
asyncCheck m.streamHandler(stream)
continue
of MessageType.MsgIn, MessageType.MsgOut:
trace "pushing data to channel", id = id,
initiator = initiator,
msgType = msgType,
size = data.len
if data.len > MaxMsgSize:
2020-02-11 05:48:52 +00:00
raise newLPStreamLimitError();
await channel.pushTo(data)
of MessageType.CloseIn, MessageType.CloseOut:
trace "closing channel", id = id,
initiator = initiator,
msgType = msgType
await channel.closedByRemote()
PubSub (Gossip & Flood) Implementation (#36) This adds gossipsub and floodsub, as well as basic interop testing with the go libp2p daemon. * add close event * wip: gossipsub * splitting rpc message * making message handling more consistent * initial gossipsub implementation * feat: nim 1.0 cleanup * wip: gossipsub protobuf * adding encoding/decoding of gossipsub messages * add disconnect handler * add proper gossipsub msg handling * misc: cleanup for nim 1.0 * splitting floodsub and gossipsub tests * feat: add mesh rebalansing * test pubsub * add mesh rebalansing tests * testing mesh maintenance * finishing mcache implementatin * wip: commenting out broken tests * wip: don't run heartbeat for now * switchout debug for trace logging * testing gossip peer selection algorithm * test stream piping * more work around message amplification * get the peerid from message * use timed cache as backing store * allow setting timeout in constructor * several changes to improve performance * more through testing of msg amplification * prevent gc issues * allow piping to self and prevent deadlocks * improove floodsub * allow running hook on cache eviction * prevent race conditions * prevent race conditions and improove tests * use hashes as cache keys * removing useless file * don't create a new seq * re-enable pubsub tests * fix imports * reduce number of runs to speed up tests * break out control message processing * normalize sleeps between steps * implement proper transport filtering * initial interop testing * clean up floodsub publish logic * allow dialing without a protocol * adding multiple reads/writes * use protobuf varint in mplex * don't loose conn's peerInfo * initial interop pubsub tests * don't duplicate connections/peers * bring back interop tests * wip: interop * re-enable interop and daemon tests * add multiple read write tests from handlers * don't cleanup channel prematurely * use correct channel to send/receive msgs * adjust tests with latest changes * include interop tests * remove temp logging output * fix ci * use correct public key serialization * additional tests for pubsub interop
2019-12-06 02:16:18 +00:00
# m.getChannelList(initiator).del(id)
of MessageType.ResetIn, MessageType.ResetOut:
trace "resetting channel", id = id,
initiator = initiator,
msgType = msgType
await channel.resetByRemote()
m.getChannelList(initiator).del(id)
break
except CatchableError as exc:
trace "exception occurred", exception = exc.msg
finally:
trace "stopping mplex main loop"
2020-01-07 08:02:37 +00:00
if not m.connection.closed():
await m.connection.close()
2019-09-03 20:40:51 +00:00
proc newMplex*(conn: Connection,
2019-09-03 20:40:51 +00:00
maxChanns: uint = MaxChannels): Mplex =
new result
result.connection = conn
result.maxChannels = maxChanns
2019-09-12 17:07:34 +00:00
result.remote = initTable[uint, LPChannel]()
result.local = initTable[uint, LPChannel]()
2019-09-03 20:40:51 +00:00
let m = result
2020-01-07 08:02:37 +00:00
conn.closeEvent.wait()
.addCallback do (udata: pointer):
trace "connection closed, cleaning up mplex"
asyncCheck m.close()
2020-02-12 14:37:22 +00:00
method newStream*(m: Mplex,
name: string = "",
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
let channel = await m.newStreamInternal(lazy = lazy)
if not lazy:
await channel.open()
2019-09-04 01:42:00 +00:00
result = newConnection(channel)
2019-09-11 19:04:27 +00:00
result.peerInfo = m.connection.peerInfo
2019-09-03 20:40:51 +00:00
method close*(m: Mplex) {.async, gcsafe.} =
trace "closing mplex muxer"
await allFutures(@[allFutures(toSeq(m.remote.values).mapIt(it.reset())),
allFutures(toSeq(m.local.values).mapIt(it.reset()))])