nim-libp2p-experimental/tests/testmplex.nim

349 lines
12 KiB
Nim
Raw Normal View History

2019-09-09 17:33:32 +00:00
import unittest, sequtils, sugar, strformat, options, strformat
import chronos, nimcrypto/utils, chronicles
import ../libp2p/[connection,
stream/lpstream,
stream/bufferstream,
transports/tcptransport,
transports/transport,
protocols/identify,
2019-10-03 19:30:22 +00:00
multiaddress,
muxers/mplex/mplex,
muxers/mplex/coder,
2019-10-03 19:30:22 +00:00
muxers/mplex/types,
muxers/mplex/lpchannel]
2019-09-03 20:40:51 +00:00
when defined(nimHasUsed): {.used.}
2019-09-03 20:40:51 +00:00
suite "Mplex":
test "encode header with channel id 0":
proc testEncodeHeader(): Future[bool] {.async.} =
proc encHandler(msg: seq[byte]) {.async.} =
check msg == fromHex("000873747265616d2031")
let stream = newBufferStream(encHandler)
let conn = newConnection(stream)
2019-09-12 02:10:38 +00:00
await conn.writeMsg(0, MessageType.New, cast[seq[byte]]("stream 1"))
result = true
check:
waitFor(testEncodeHeader()) == true
test "encode header with channel id other than 0":
2019-09-04 01:43:57 +00:00
proc testEncodeHeader(): Future[bool] {.async.} =
proc encHandler(msg: seq[byte]) {.async.} =
check msg == fromHex("88010873747265616d2031")
let stream = newBufferStream(encHandler)
let conn = newConnection(stream)
2019-09-12 02:10:38 +00:00
await conn.writeMsg(17, MessageType.New, cast[seq[byte]]("stream 1"))
2019-09-04 01:43:57 +00:00
result = true
2019-09-03 20:40:51 +00:00
2019-09-04 01:43:57 +00:00
check:
waitFor(testEncodeHeader()) == true
2019-09-03 20:40:51 +00:00
test "encode header and body with channel id 0":
proc testEncodeHeaderBody(): Future[bool] {.async.} =
var step = 0
proc encHandler(msg: seq[byte]) {.async.} =
check msg == fromHex("020873747265616d2031")
let stream = newBufferStream(encHandler)
let conn = newConnection(stream)
2019-09-12 02:10:38 +00:00
await conn.writeMsg(0, MessageType.MsgOut, cast[seq[byte]]("stream 1"))
result = true
check:
waitFor(testEncodeHeaderBody()) == true
test "encode header and body with channel id other than 0":
proc testEncodeHeaderBody(): Future[bool] {.async.} =
var step = 0
proc encHandler(msg: seq[byte]) {.async.} =
check msg == fromHex("8a010873747265616d2031")
let stream = newBufferStream(encHandler)
let conn = newConnection(stream)
2019-09-12 02:10:38 +00:00
await conn.writeMsg(17, MessageType.MsgOut, cast[seq[byte]]("stream 1"))
await conn.close()
result = true
check:
waitFor(testEncodeHeaderBody()) == true
test "decode header with channel id 0":
2019-09-04 01:43:57 +00:00
proc testDecodeHeader(): Future[bool] {.async.} =
PubSub (Gossip & Flood) Implementation (#36) This adds gossipsub and floodsub, as well as basic interop testing with the go libp2p daemon. * add close event * wip: gossipsub * splitting rpc message * making message handling more consistent * initial gossipsub implementation * feat: nim 1.0 cleanup * wip: gossipsub protobuf * adding encoding/decoding of gossipsub messages * add disconnect handler * add proper gossipsub msg handling * misc: cleanup for nim 1.0 * splitting floodsub and gossipsub tests * feat: add mesh rebalansing * test pubsub * add mesh rebalansing tests * testing mesh maintenance * finishing mcache implementatin * wip: commenting out broken tests * wip: don't run heartbeat for now * switchout debug for trace logging * testing gossip peer selection algorithm * test stream piping * more work around message amplification * get the peerid from message * use timed cache as backing store * allow setting timeout in constructor * several changes to improve performance * more through testing of msg amplification * prevent gc issues * allow piping to self and prevent deadlocks * improove floodsub * allow running hook on cache eviction * prevent race conditions * prevent race conditions and improove tests * use hashes as cache keys * removing useless file * don't create a new seq * re-enable pubsub tests * fix imports * reduce number of runs to speed up tests * break out control message processing * normalize sleeps between steps * implement proper transport filtering * initial interop testing * clean up floodsub publish logic * allow dialing without a protocol * adding multiple reads/writes * use protobuf varint in mplex * don't loose conn's peerInfo * initial interop pubsub tests * don't duplicate connections/peers * bring back interop tests * wip: interop * re-enable interop and daemon tests * add multiple read write tests from handlers * don't cleanup channel prematurely * use correct channel to send/receive msgs * adjust tests with latest changes * include interop tests * remove temp logging output * fix ci * use correct public key serialization * additional tests for pubsub interop
2019-12-06 02:16:18 +00:00
let stream = newBufferStream()
let conn = newConnection(stream)
await stream.pushTo(fromHex("000873747265616d2031"))
2019-09-08 06:32:41 +00:00
let msg = await conn.readMsg()
2019-09-03 20:40:51 +00:00
2019-09-08 06:32:41 +00:00
if msg.isSome:
check msg.get().id == 0
check msg.get().msgType == MessageType.New
result = true
2019-09-03 20:40:51 +00:00
2019-09-04 01:43:57 +00:00
check:
waitFor(testDecodeHeader()) == true
test "decode header and body with channel id 0":
proc testDecodeHeader(): Future[bool] {.async.} =
PubSub (Gossip & Flood) Implementation (#36) This adds gossipsub and floodsub, as well as basic interop testing with the go libp2p daemon. * add close event * wip: gossipsub * splitting rpc message * making message handling more consistent * initial gossipsub implementation * feat: nim 1.0 cleanup * wip: gossipsub protobuf * adding encoding/decoding of gossipsub messages * add disconnect handler * add proper gossipsub msg handling * misc: cleanup for nim 1.0 * splitting floodsub and gossipsub tests * feat: add mesh rebalansing * test pubsub * add mesh rebalansing tests * testing mesh maintenance * finishing mcache implementatin * wip: commenting out broken tests * wip: don't run heartbeat for now * switchout debug for trace logging * testing gossip peer selection algorithm * test stream piping * more work around message amplification * get the peerid from message * use timed cache as backing store * allow setting timeout in constructor * several changes to improve performance * more through testing of msg amplification * prevent gc issues * allow piping to self and prevent deadlocks * improove floodsub * allow running hook on cache eviction * prevent race conditions * prevent race conditions and improove tests * use hashes as cache keys * removing useless file * don't create a new seq * re-enable pubsub tests * fix imports * reduce number of runs to speed up tests * break out control message processing * normalize sleeps between steps * implement proper transport filtering * initial interop testing * clean up floodsub publish logic * allow dialing without a protocol * adding multiple reads/writes * use protobuf varint in mplex * don't loose conn's peerInfo * initial interop pubsub tests * don't duplicate connections/peers * bring back interop tests * wip: interop * re-enable interop and daemon tests * add multiple read write tests from handlers * don't cleanup channel prematurely * use correct channel to send/receive msgs * adjust tests with latest changes * include interop tests * remove temp logging output * fix ci * use correct public key serialization * additional tests for pubsub interop
2019-12-06 02:16:18 +00:00
let stream = newBufferStream()
let conn = newConnection(stream)
await stream.pushTo(fromHex("021668656C6C6F2066726F6D206368616E6E656C20302121"))
2019-09-08 06:32:41 +00:00
let msg = await conn.readMsg()
2019-09-08 06:32:41 +00:00
if msg.isSome:
check msg.get().id == 0
check msg.get().msgType == MessageType.MsgOut
check cast[string](msg.get().data) == "hello from channel 0!!"
result = true
check:
waitFor(testDecodeHeader()) == true
test "decode header and body with channel id other than 0":
proc testDecodeHeader(): Future[bool] {.async.} =
PubSub (Gossip & Flood) Implementation (#36) This adds gossipsub and floodsub, as well as basic interop testing with the go libp2p daemon. * add close event * wip: gossipsub * splitting rpc message * making message handling more consistent * initial gossipsub implementation * feat: nim 1.0 cleanup * wip: gossipsub protobuf * adding encoding/decoding of gossipsub messages * add disconnect handler * add proper gossipsub msg handling * misc: cleanup for nim 1.0 * splitting floodsub and gossipsub tests * feat: add mesh rebalansing * test pubsub * add mesh rebalansing tests * testing mesh maintenance * finishing mcache implementatin * wip: commenting out broken tests * wip: don't run heartbeat for now * switchout debug for trace logging * testing gossip peer selection algorithm * test stream piping * more work around message amplification * get the peerid from message * use timed cache as backing store * allow setting timeout in constructor * several changes to improve performance * more through testing of msg amplification * prevent gc issues * allow piping to self and prevent deadlocks * improove floodsub * allow running hook on cache eviction * prevent race conditions * prevent race conditions and improove tests * use hashes as cache keys * removing useless file * don't create a new seq * re-enable pubsub tests * fix imports * reduce number of runs to speed up tests * break out control message processing * normalize sleeps between steps * implement proper transport filtering * initial interop testing * clean up floodsub publish logic * allow dialing without a protocol * adding multiple reads/writes * use protobuf varint in mplex * don't loose conn's peerInfo * initial interop pubsub tests * don't duplicate connections/peers * bring back interop tests * wip: interop * re-enable interop and daemon tests * add multiple read write tests from handlers * don't cleanup channel prematurely * use correct channel to send/receive msgs * adjust tests with latest changes * include interop tests * remove temp logging output * fix ci * use correct public key serialization * additional tests for pubsub interop
2019-12-06 02:16:18 +00:00
let stream = newBufferStream()
let conn = newConnection(stream)
await stream.pushTo(fromHex("8a011668656C6C6F2066726F6D206368616E6E656C20302121"))
2019-09-08 06:32:41 +00:00
let msg = await conn.readMsg()
2019-09-08 06:32:41 +00:00
if msg.isSome:
check msg.get().id == 17
check msg.get().msgType == MessageType.MsgOut
check cast[string](msg.get().data) == "hello from channel 0!!"
result = true
check:
waitFor(testDecodeHeader()) == true
2019-09-25 22:57:27 +00:00
test "e2e - read/write receiver":
proc testNewStream(): Future[bool] {.async.} =
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0")
proc connHandler(conn: Connection) {.async, gcsafe.} =
proc handleMplexListen(stream: Connection) {.async, gcsafe.} =
2019-09-25 22:57:27 +00:00
let msg = await stream.readLp()
check cast[string](msg) == "Hello from stream!"
await stream.close()
let mplexListen = newMplex(conn)
mplexListen.streamHandler = handleMplexListen
discard mplexListen.handle()
let transport1: TcpTransport = newTransport(TcpTransport)
discard await transport1.listen(ma, connHandler)
let transport2: TcpTransport = newTransport(TcpTransport)
let conn = await transport2.dial(transport1.ma)
let mplexDial = newMplex(conn)
let stream = await mplexDial.newStream()
let openState = cast[LPChannel](stream.stream).isOpen
await stream.writeLp("Hello from stream!")
await conn.close()
check openState # not lazy
result = true
check:
waitFor(testNewStream()) == true
test "e2e - read/write receiver lazy":
proc testNewStream(): Future[bool] {.async.} =
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0")
proc connHandler(conn: Connection) {.async, gcsafe.} =
proc handleMplexListen(stream: Connection) {.async, gcsafe.} =
let msg = await stream.readLp()
check cast[string](msg) == "Hello from stream!"
await stream.close()
let mplexListen = newMplex(conn)
mplexListen.streamHandler = handleMplexListen
discard mplexListen.handle()
let transport1: TcpTransport = newTransport(TcpTransport)
discard await transport1.listen(ma, connHandler)
let transport2: TcpTransport = newTransport(TcpTransport)
let conn = await transport2.dial(transport1.ma)
let mplexDial = newMplex(conn)
let stream = await mplexDial.newStream("", true)
let openState = cast[LPChannel](stream.stream).isOpen
2019-09-25 22:57:27 +00:00
await stream.writeLp("Hello from stream!")
await conn.close()
check not openState # assert lazy
2019-09-25 22:57:27 +00:00
result = true
check:
waitFor(testNewStream()) == true
test "e2e - read/write initiator":
proc testNewStream(): Future[bool] {.async.} =
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0")
proc connHandler(conn: Connection) {.async, gcsafe.} =
proc handleMplexListen(stream: Connection) {.async, gcsafe.} =
2019-09-25 22:57:27 +00:00
await stream.writeLp("Hello from stream!")
await stream.close()
let mplexListen = newMplex(conn)
mplexListen.streamHandler = handleMplexListen
await mplexListen.handle()
let transport1: TcpTransport = newTransport(TcpTransport)
discard await transport1.listen(ma, connHandler)
let transport2: TcpTransport = newTransport(TcpTransport)
let conn = await transport2.dial(transport1.ma)
let mplexDial = newMplex(conn)
let dialFut = mplexDial.handle()
let stream = await mplexDial.newStream("DIALER")
let msg = cast[string](await stream.readLp())
check msg == "Hello from stream!"
await conn.close()
# await dialFut
result = true
check:
waitFor(testNewStream()) == true
test "e2e - multiple streams":
proc testNewStream(): Future[bool] {.async.} =
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0")
var count = 1
var listenConn: Connection
proc connHandler(conn: Connection) {.async, gcsafe.} =
proc handleMplexListen(stream: Connection) {.async, gcsafe.} =
2019-09-25 22:57:27 +00:00
let msg = await stream.readLp()
check cast[string](msg) == &"stream {count}!"
count.inc
await stream.close()
listenConn = conn
let mplexListen = newMplex(conn)
mplexListen.streamHandler = handleMplexListen
await mplexListen.handle()
let transport1: TcpTransport = newTransport(TcpTransport)
discard await transport1.listen(ma, connHandler)
let transport2: TcpTransport = newTransport(TcpTransport)
let conn = await transport2.dial(transport1.ma)
let mplexDial = newMplex(conn)
for i in 1..<10:
let stream = await mplexDial.newStream()
await stream.writeLp(&"stream {i}!")
await stream.close()
await conn.close()
await listenConn.close()
result = true
check:
waitFor(testNewStream()) == true
test "e2e - multiple read/write streams":
proc testNewStream(): Future[bool] {.async.} =
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0")
var count = 1
var listenFut: Future[void]
var listenConn: Connection
proc connHandler(conn: Connection) {.async, gcsafe.} =
listenConn = conn
proc handleMplexListen(stream: Connection) {.async, gcsafe.} =
2019-09-25 22:57:27 +00:00
let msg = await stream.readLp()
check cast[string](msg) == &"stream {count} from dialer!"
await stream.writeLp(&"stream {count} from listener!")
count.inc
await stream.close()
let mplexListen = newMplex(conn)
mplexListen.streamHandler = handleMplexListen
listenFut = mplexListen.handle()
listenFut.addCallback(proc(udata: pointer) {.gcsafe.}
= trace "completed listener")
2019-09-25 22:57:27 +00:00
let transport1: TcpTransport = newTransport(TcpTransport)
asyncCheck transport1.listen(ma, connHandler)
let transport2: TcpTransport = newTransport(TcpTransport)
let conn = await transport2.dial(transport1.ma)
let mplexDial = newMplex(conn)
let dialFut = mplexDial.handle()
dialFut.addCallback(proc(udata: pointer = nil) {.gcsafe.}
= trace "completed dialer")
2019-09-25 22:57:27 +00:00
for i in 1..10:
let stream = await mplexDial.newStream("dialer stream")
await stream.writeLp(&"stream {i} from dialer!")
let msg = await stream.readLp()
check cast[string](msg) == &"stream {i} from listener!"
await stream.close()
await conn.close()
await listenConn.close()
await allFutures(dialFut, listenFut)
result = true
check:
waitFor(testNewStream()) == true
2019-09-08 06:32:41 +00:00
2019-09-04 06:40:11 +00:00
test "half closed - channel should close for write":
proc testClosedForWrite(): Future[void] {.async.} =
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
let chann = newChannel(1, newConnection(newBufferStream(writeHandler)), true)
2019-09-04 06:40:11 +00:00
await chann.close()
await chann.write("Hello")
expect LPStreamEOFError:
2019-09-04 06:40:11 +00:00
waitFor(testClosedForWrite())
test "half closed - channel should close for read by remote":
2019-09-04 06:40:11 +00:00
proc testClosedForRead(): Future[void] {.async.} =
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
let chann = newChannel(1, newConnection(newBufferStream(writeHandler)), true)
2019-09-04 06:40:11 +00:00
2019-09-12 02:10:38 +00:00
await chann.pushTo(cast[seq[byte]]("Hello!"))
await chann.closedByRemote()
discard await chann.read() # this should work, since there is data in the buffer
discard await chann.read() # this should throw
2019-09-04 06:40:11 +00:00
expect LPStreamEOFError:
2019-09-04 06:40:11 +00:00
waitFor(testClosedForRead())
test "reset - channel should fail reading":
proc testResetRead(): Future[void] {.async.} =
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
let chann = newChannel(1, newConnection(newBufferStream(writeHandler)), true)
2019-09-04 06:40:11 +00:00
await chann.reset()
asyncDiscard chann.read()
expect LPStreamEOFError:
2019-09-04 06:40:11 +00:00
waitFor(testResetRead())
test "reset - channel should fail writing":
proc testResetWrite(): Future[void] {.async.} =
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
let chann = newChannel(1, newConnection(newBufferStream(writeHandler)), true)
2019-09-04 06:40:11 +00:00
await chann.reset()
await chann.write(cast[seq[byte]]("Hello!"))
2019-09-04 06:40:11 +00:00
expect LPStreamEOFError:
2019-09-04 06:40:11 +00:00
waitFor(testResetWrite())
2019-09-04 06:51:16 +00:00
test "should not allow pushing data to channel when remote end closed":
proc testResetWrite(): Future[void] {.async.} =
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
let chann = newChannel(1, newConnection(newBufferStream(writeHandler)), true)
2019-09-04 16:40:05 +00:00
await chann.closedByRemote()
2019-09-04 06:51:16 +00:00
await chann.pushTo(@[byte(1)])
expect LPStreamEOFError:
2019-09-04 06:51:16 +00:00
waitFor(testResetWrite())