nim-libp2p/libp2p/muxers/mplex/coder.nim

88 lines
2.6 KiB
Nim
Raw Normal View History

2019-09-04 03:08:51 +00:00
## Nim-LibP2P
2019-09-24 17:48:23 +00:00
## Copyright (c) 2019 Status Research & Development GmbH
2019-09-04 03:08:51 +00:00
## Licensed under either of
## * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
## * MIT license ([LICENSE-MIT](LICENSE-MIT))
## at your option.
## This file may not be copied, modified, or distributed except according to
## those terms.
import chronos
2019-09-09 17:33:32 +00:00
import nimcrypto/utils, chronicles
import types,
2019-09-12 17:07:34 +00:00
../../connection,
../../varint,
../../vbuffer,
2019-09-09 17:33:32 +00:00
../../stream/lpstream
2019-09-04 03:08:51 +00:00
2019-09-10 02:15:52 +00:00
logScope:
2019-09-28 19:54:52 +00:00
topic = "MplexCoder"
2019-09-10 02:15:52 +00:00
2020-03-11 15:26:27 +00:00
const DefaultChannelSize* = 1 shl 20
2019-09-06 21:27:55 +00:00
type
Msg* = tuple
2020-03-23 17:14:06 +00:00
id: uint64
2019-09-07 23:34:11 +00:00
msgType: MessageType
data: seq[byte]
2019-09-06 21:27:55 +00:00
2020-03-24 13:32:15 +00:00
proc readMplexVarint(conn: Connection): Future[uint64] {.async, gcsafe.} =
2019-09-04 03:08:51 +00:00
var
varint: uint
2019-09-04 03:08:51 +00:00
length: int
res: VarintStatus
buffer = newSeq[byte](10)
2019-09-04 03:08:51 +00:00
try:
for i in 0..<len(buffer):
PubSub (Gossip & Flood) Implementation (#36) This adds gossipsub and floodsub, as well as basic interop testing with the go libp2p daemon. * add close event * wip: gossipsub * splitting rpc message * making message handling more consistent * initial gossipsub implementation * feat: nim 1.0 cleanup * wip: gossipsub protobuf * adding encoding/decoding of gossipsub messages * add disconnect handler * add proper gossipsub msg handling * misc: cleanup for nim 1.0 * splitting floodsub and gossipsub tests * feat: add mesh rebalansing * test pubsub * add mesh rebalansing tests * testing mesh maintenance * finishing mcache implementatin * wip: commenting out broken tests * wip: don't run heartbeat for now * switchout debug for trace logging * testing gossip peer selection algorithm * test stream piping * more work around message amplification * get the peerid from message * use timed cache as backing store * allow setting timeout in constructor * several changes to improve performance * more through testing of msg amplification * prevent gc issues * allow piping to self and prevent deadlocks * improove floodsub * allow running hook on cache eviction * prevent race conditions * prevent race conditions and improove tests * use hashes as cache keys * removing useless file * don't create a new seq * re-enable pubsub tests * fix imports * reduce number of runs to speed up tests * break out control message processing * normalize sleeps between steps * implement proper transport filtering * initial interop testing * clean up floodsub publish logic * allow dialing without a protocol * adding multiple reads/writes * use protobuf varint in mplex * don't loose conn's peerInfo * initial interop pubsub tests * don't duplicate connections/peers * bring back interop tests * wip: interop * re-enable interop and daemon tests * add multiple read write tests from handlers * don't cleanup channel prematurely * use correct channel to send/receive msgs * adjust tests with latest changes * include interop tests * remove temp logging output * fix ci * use correct public key serialization * additional tests for pubsub interop
2019-12-06 02:16:18 +00:00
await conn.readExactly(addr buffer[i], 1)
res = PB.getUVarint(buffer.toOpenArray(0, i), length, varint)
if res == VarintStatus.Success:
break
2019-09-04 03:08:51 +00:00
if res != VarintStatus.Success:
raise newInvalidVarintException()
if varint.int > DefaultReadSize:
raise newInvalidVarintSizeException()
return varint
except LPStreamIncompleteError as exc:
trace "unable to read varint", exc = exc.msg
raise exc
2019-09-04 03:08:51 +00:00
2020-02-12 14:37:22 +00:00
proc readMsg*(conn: Connection): Future[Msg] {.async, gcsafe.} =
2019-09-07 23:34:11 +00:00
let headerVarint = await conn.readMplexVarint()
2020-02-12 14:37:22 +00:00
trace "read header varint", varint = headerVarint
2019-09-07 23:34:11 +00:00
let dataLenVarint = await conn.readMplexVarint()
2020-02-12 17:29:09 +00:00
trace "read data len varint", varint = dataLenVarint
var data: seq[byte] = newSeq[byte](dataLenVarint.int)
2020-02-12 14:37:22 +00:00
if dataLenVarint.int > 0:
2020-03-11 22:23:39 +00:00
await conn.readExactly(addr data[0], dataLenVarint.int)
2020-03-11 23:04:40 +00:00
trace "read data", data = data.len
2019-09-07 23:34:11 +00:00
2020-02-12 14:37:22 +00:00
let header = headerVarint
2020-03-23 17:14:06 +00:00
result = (uint64(header shr 3), MessageType(header and 0x7), data)
proc writeMsg*(conn: Connection,
2020-03-23 17:14:06 +00:00
id: uint64,
msgType: MessageType,
2019-09-07 23:34:11 +00:00
data: seq[byte] = @[]) {.async, gcsafe.} =
trace "seding data over mplex", id,
msgType,
2020-03-11 23:04:40 +00:00
data = data.len
2019-09-04 03:08:51 +00:00
## write lenght prefixed
var buf = initVBuffer()
buf.writePBVarint(id shl 3 or ord(msgType).uint)
buf.writePBVarint(data.len().uint) # size should be always sent
2019-09-04 03:08:51 +00:00
buf.finish()
try:
await conn.write(buf.buffer & data)
except LPStreamIncompleteError as exc:
trace "unable to send message", exc = exc.msg
2019-09-07 23:34:11 +00:00
proc writeMsg*(conn: Connection,
2020-03-23 17:14:06 +00:00
id: uint64,
msgType: MessageType,
2019-09-07 23:34:11 +00:00
data: string) {.async, gcsafe.} =
2019-09-12 02:10:38 +00:00
result = conn.writeMsg(id, msgType, cast[seq[byte]](data))