nim-libp2p/tests/testtransport.nim

203 lines
6.1 KiB
Nim
Raw Normal View History

{.used.}
import sequtils
2020-06-03 02:21:11 +00:00
import chronos, stew/byteutils
import ../libp2p/[stream/connection,
transports/transport,
2019-10-03 19:30:22 +00:00
transports/tcptransport,
multiaddress,
errors,
2019-10-03 19:30:22 +00:00
wire]
import ./helpers
suite "TCP transport":
teardown:
checkTrackers()
asyncTest "test listener: handle write":
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
let transport: TcpTransport = TcpTransport.init()
asyncCheck transport.start(ma)
proc acceptHandler() {.async, gcsafe.} =
let conn = await transport.accept()
await conn.write("Hello!")
await conn.close()
let handlerWait = acceptHandler()
let streamTransport = await connect(transport.ma)
2019-08-21 22:53:16 +00:00
let msg = await streamTransport.read(6)
await handlerWait.wait(5000.millis) # when no issues will not wait that long!
await streamTransport.closeWait()
await transport.stop()
check string.fromBytes(msg) == "Hello!"
asyncTest "test listener: handle read":
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
let transport: TcpTransport = TcpTransport.init()
asyncCheck transport.start(ma)
proc acceptHandler() {.async, gcsafe.} =
var msg = newSeq[byte](6)
let conn = await transport.accept()
await conn.readExactly(addr msg[0], 6)
check string.fromBytes(msg) == "Hello!"
await conn.close()
let handlerWait = acceptHandler()
let streamTransport: StreamTransport = await connect(transport.ma)
let sent = await streamTransport.write("Hello!")
await handlerWait.wait(5000.millis) # when no issues will not wait that long!
await streamTransport.closeWait()
await transport.stop()
check sent == 6
asyncTest "test dialer: handle write":
let address = initTAddress("0.0.0.0:0")
let handlerWait = newFuture[void]()
proc serveClient(server: StreamServer,
transp: StreamTransport) {.async, gcsafe.} =
var wstream = newAsyncStreamWriter(transp)
await wstream.write("Hello!")
await wstream.finish()
await wstream.closeWait()
await transp.closeWait()
server.stop()
server.close()
handlerWait.complete()
var server = createStreamServer(address, serveClient, {ReuseAddr})
server.start()
let ma: MultiAddress = MultiAddress.init(server.sock.getLocalAddress()).tryGet()
let transport: TcpTransport = TcpTransport.init()
let conn = await transport.dial(ma)
var msg = newSeq[byte](6)
await conn.readExactly(addr msg[0], 6)
check string.fromBytes(msg) == "Hello!"
await handlerWait.wait(5000.millis) # when no issues will not wait that long!
await conn.close()
await transport.stop()
server.stop()
server.close()
await server.join()
asyncTest "test dialer: handle write":
let address = initTAddress("0.0.0.0:0")
let handlerWait = newFuture[void]()
proc serveClient(server: StreamServer,
transp: StreamTransport) {.async, gcsafe.} =
var rstream = newAsyncStreamReader(transp)
let msg = await rstream.read(6)
check string.fromBytes(msg) == "Hello!"
await rstream.closeWait()
await transp.closeWait()
server.stop()
server.close()
handlerWait.complete()
var server = createStreamServer(address, serveClient, {ReuseAddr})
server.start()
let ma: MultiAddress = MultiAddress.init(server.sock.getLocalAddress()).tryGet()
let transport: TcpTransport = TcpTransport.init()
let conn = await transport.dial(ma)
await conn.write("Hello!")
await handlerWait.wait(5000.millis) # when no issues will not wait that long!
await conn.close()
await transport.stop()
server.stop()
server.close()
await server.join()
asyncTest "e2e: handle write":
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
let transport1: TcpTransport = TcpTransport.init()
await transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
let conn = await transport1.accept()
await conn.write("Hello!")
await conn.close()
let handlerWait = acceptHandler()
let transport2: TcpTransport = TcpTransport.init()
let conn = await transport2.dial(transport1.ma)
var msg = newSeq[byte](6)
await conn.readExactly(addr msg[0], 6)
await handlerWait.wait(5000.millis) # when no issues will not wait that long!
await conn.close()
await transport2.stop()
await transport1.stop()
check string.fromBytes(msg) == "Hello!"
asyncTest "e2e: handle read":
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
let transport1: TcpTransport = TcpTransport.init()
asyncCheck transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
let conn = await transport1.accept()
var msg = newSeq[byte](6)
await conn.readExactly(addr msg[0], 6)
check string.fromBytes(msg) == "Hello!"
await conn.close()
let handlerWait = acceptHandler()
let transport2: TcpTransport = TcpTransport.init()
let conn = await transport2.dial(transport1.ma)
await conn.write("Hello!")
await handlerWait.wait(5000.millis) # when no issues will not wait that long!
2019-08-21 22:53:16 +00:00
await conn.close()
await transport2.stop()
await transport1.stop()
asyncTest "e2e: handle dial cancellation":
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
let transport1: TcpTransport = TcpTransport.init()
await transport1.start(ma)
let transport2: TcpTransport = TcpTransport.init()
let cancellation = transport2.dial(transport1.ma)
Connection limits (#384) * master merge * wip * avoid deadlocks * tcp limits * expose client field in chronosstream * limit incoming connections * update with new listen api * fix release * don't override peerinfo in connection * rework transport with accept * use semaphore to track resource ussage * rework with new transport accept api * move events to conn manager (#373) * use semaphore to track resource ussage * merge master * expose api to acquire conn slots * don't fail expensive metrics * allow tracking and updating connections * set global connection limits to 80 * add per peer connection limits * make sure conn is closed if tracking failed * more descriptive naming for handle * rework with new transport accept api * add `getStream` hide `selectConn` * add TransportClosedError * make nil explicit * don't make unnecessary copies of message * logging * error handling * cleanup semaphore * track connections properly * throw `TooManyConnections` when tracking outgoing * use proper exception and handle conventions * check onCloseHandle for nil * revert internalConnect changes * adding upgraded flag * await stream before closing * simplify tracking * wip * logging * split connection limits into incoming and outgoing * further streamline connection limits split counts * don't use closeWithEOF * move peer and conn event triggers from switch * wip * wip * wip * merge master * handle nil connections properly * add clarifying comment * don't raise exc on nil * no finally * add proper min/max connections logic * rebase master * merge master * master merge * remove request timeout should be addressed in separate PR * merge master * share semaphore when in/out limits arent enforced * merge master * use import * pass semaphore to trackConn * don't close last conn * use storeConn * merge master * use storeConn
2021-01-21 04:00:24 +00:00
await cancellation.cancelAndWait()
check cancellation.cancelled
await transport2.stop()
await transport1.stop()
asyncTest "e2e: handle accept cancellation":
let ma: MultiAddress = Multiaddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
let transport1: TcpTransport = TcpTransport.init()
await transport1.start(ma)
let acceptHandler = transport1.accept()
Connection limits (#384) * master merge * wip * avoid deadlocks * tcp limits * expose client field in chronosstream * limit incoming connections * update with new listen api * fix release * don't override peerinfo in connection * rework transport with accept * use semaphore to track resource ussage * rework with new transport accept api * move events to conn manager (#373) * use semaphore to track resource ussage * merge master * expose api to acquire conn slots * don't fail expensive metrics * allow tracking and updating connections * set global connection limits to 80 * add per peer connection limits * make sure conn is closed if tracking failed * more descriptive naming for handle * rework with new transport accept api * add `getStream` hide `selectConn` * add TransportClosedError * make nil explicit * don't make unnecessary copies of message * logging * error handling * cleanup semaphore * track connections properly * throw `TooManyConnections` when tracking outgoing * use proper exception and handle conventions * check onCloseHandle for nil * revert internalConnect changes * adding upgraded flag * await stream before closing * simplify tracking * wip * logging * split connection limits into incoming and outgoing * further streamline connection limits split counts * don't use closeWithEOF * move peer and conn event triggers from switch * wip * wip * wip * merge master * handle nil connections properly * add clarifying comment * don't raise exc on nil * no finally * add proper min/max connections logic * rebase master * merge master * master merge * remove request timeout should be addressed in separate PR * merge master * share semaphore when in/out limits arent enforced * merge master * use import * pass semaphore to trackConn * don't close last conn * use storeConn * merge master * use storeConn
2021-01-21 04:00:24 +00:00
await acceptHandler.cancelAndWait()
check acceptHandler.cancelled
await transport1.stop()