Merge branch 'unstable' into dev/etan/zz-dbg

This commit is contained in:
Etan Kissling 2024-01-25 16:40:36 +01:00
commit 65d57c0aff
No known key found for this signature in database
GPG Key ID: B21DA824C5A3D03D
88 changed files with 1115 additions and 780 deletions

12
.github/workflows/daily.yml vendored Normal file
View File

@ -0,0 +1,12 @@
name: Daily
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
call-multi-nim-common:
uses: ./.github/workflows/daily_common.yml
with:
nim-branch: "['version-1-6','version-2-0']"
cpu: "['amd64']"

84
.github/workflows/daily_common.yml vendored Normal file
View File

@ -0,0 +1,84 @@
name: daily-common
on:
workflow_call:
inputs:
nim-branch:
description: 'Nim branch'
required: true
type: string
cpu:
description: 'CPU'
required: true
type: string
exclude:
description: 'Exclude matrix configurations'
required: false
type: string
default: "[]"
jobs:
delete-cache:
runs-on: ubuntu-latest
steps:
- uses: snnaplab/delete-branch-cache-action@v1
build:
needs: delete-cache
timeout-minutes: 120
strategy:
fail-fast: false
matrix:
platform:
- os: linux
builder: ubuntu-20
shell: bash
- os: macos
builder: macos-12
shell: bash
- os: windows
builder: windows-2019
shell: msys2 {0}
branch: ${{ fromJSON(inputs.nim-branch) }}
cpu: ${{ fromJSON(inputs.cpu) }}
exclude: ${{ fromJSON(inputs.exclude) }}
defaults:
run:
shell: ${{ matrix.platform.shell }}
name: '${{ matrix.platform.os }}-${{ matrix.cpu }} (Nim ${{ matrix.branch }})'
runs-on: ${{ matrix.platform.builder }}
continue-on-error: ${{ matrix.branch == 'devel' || matrix.branch == 'version-2-0' }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Nim
uses: "./.github/actions/install_nim"
with:
os: ${{ matrix.platform.os }}
shell: ${{ matrix.platform.shell }}
nim_branch: ${{ matrix.branch }}
cpu: ${{ matrix.cpu }}
- name: Setup Go
uses: actions/setup-go@v4
with:
go-version: '~1.15.5'
cache: false
- name: Install p2pd
run: |
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
- name: Run tests
run: |
nim --version
nimble --version
nimble install -y --depsOnly
NIMFLAGS="${NIMFLAGS} --mm:refc" nimble test
if [[ "${{ matrix.branch }}" == "devel" ]]; then
echo -e "\nTesting with '--mm:orc':\n"
NIMFLAGS="${NIMFLAGS} --mm:orc" nimble test
fi

13
.github/workflows/daily_i386.yml vendored Normal file
View File

@ -0,0 +1,13 @@
name: Daily i386
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
call-multi-nim-common:
uses: ./.github/workflows/daily_common.yml
with:
nim-branch: "['version-1-6','version-2-0', 'devel']"
cpu: "['i386']"
exclude: "[{'platform': {'os':'macos'}}, {'platform': {'os':'windows'}}]"

12
.github/workflows/daily_nim_devel.yml vendored Normal file
View File

@ -0,0 +1,12 @@
name: Daily Nim Devel
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
call-multi-nim-common:
uses: ./.github/workflows/daily_common.yml
with:
nim-branch: "['devel']"
cpu: "['amd64']"

View File

@ -52,3 +52,17 @@ jobs:
with:
test-filter: nim-libp2p-head
extra-versions: ${{ github.workspace }}/test_head.json
run-hole-punching-interop:
name: Run hole-punching interoperability tests
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- uses: docker/setup-buildx-action@v3
- name: Build image
run: docker buildx build --load -t nim-libp2p-head -f tests/hole-punching-interop/Dockerfile .
- name: Run tests
uses: libp2p/test-plans/.github/actions/run-interop-hole-punch-test@master
with:
test-filter: nim-libp2p-head
extra-versions: ${{ github.workspace }}/tests/hole-punching-interop/version.json

View File

@ -1,82 +0,0 @@
name: Daily
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
delete-cache:
runs-on: ubuntu-latest
steps:
- uses: snnaplab/delete-branch-cache-action@v1
build:
needs: delete-cache
timeout-minutes: 120
strategy:
fail-fast: false
matrix:
target:
- os: linux
cpu: amd64
- os: linux
cpu: i386
- os: macos
cpu: amd64
- os: windows
cpu: amd64
#- os: windows
#cpu: i386
branch: [version-1-6, version-2-0, devel]
include:
- target:
os: linux
builder: ubuntu-20.04
shell: bash
- target:
os: macos
builder: macos-12
shell: bash
- target:
os: windows
builder: windows-2019
shell: msys2 {0}
defaults:
run:
shell: ${{ matrix.shell }}
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
runs-on: ${{ matrix.builder }}
continue-on-error: ${{ matrix.branch == 'devel' || matrix.branch == 'version-2-0' }}
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup Nim
uses: "./.github/actions/install_nim"
with:
os: ${{ matrix.target.os }}
shell: ${{ matrix.shell }}
nim_branch: ${{ matrix.branch }}
cpu: ${{ matrix.target.cpu }}
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: '~1.15.5'
- name: Install p2pd
run: |
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
- name: Run tests
run: |
nim --version
nimble --version
nimble install -y --depsOnly
NIMFLAGS="${NIMFLAGS} --gc:refc" nimble test
if [[ "${{ matrix.branch }}" == "devel" ]]; then
echo -e "\nTesting with '--gc:orc':\n"
NIMFLAGS="${NIMFLAGS} --gc:orc" nimble test
fi

View File

@ -13,7 +13,7 @@ type
proc new(T: typedesc[TestProto]): T =
# every incoming connections will be in handled in this closure
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
await conn.writeLp("Roger p2p!")
@ -40,7 +40,7 @@ proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
##
# The actual application
##
proc main() {.async, gcsafe.} =
proc main() {.async.} =
let
rng = newRng() # Single random number source for the whole application
# port 0 will take a random available port

View File

@ -53,7 +53,7 @@ proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
##
##
## Let's now start to create our main procedure:
proc main() {.async, gcsafe.} =
proc main() {.async.} =
let
rng = newRng()
localAddress = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()

View File

@ -25,7 +25,7 @@ type TestProto = ref object of LPProtocol
proc new(T: typedesc[TestProto]): T =
# every incoming connections will in be handled in this closure
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
# Read up to 1024 bytes from this connection, and transform them into
# a string
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
@ -44,7 +44,7 @@ proc hello(p: TestProto, conn: Connection) {.async.} =
## Again, pretty straight-forward, we just send a message on the connection.
##
## We can now create our main procedure:
proc main() {.async, gcsafe.} =
proc main() {.async.} =
let
rng = newRng()
testProto = TestProto.new()

View File

@ -108,7 +108,7 @@ type
proc new(_: typedesc[MetricProto], cb: MetricCallback): MetricProto =
var res: MetricProto
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
let
metrics = await res.metricGetter()
asProtobuf = metrics.encode()
@ -126,7 +126,7 @@ proc fetch(p: MetricProto, conn: Connection): Future[MetricList] {.async.} =
return MetricList.decode(protobuf).tryGet()
## We can now create our main procedure:
proc main() {.async, gcsafe.} =
proc main() {.async.} =
let rng = newRng()
proc randomMetricGenerator: Future[MetricList] {.async.} =
let metricCount = rng[].generate(uint32) mod 16

View File

@ -33,7 +33,7 @@ proc createSwitch(rdv: RendezVous = RendezVous.new()): Switch =
const DumbCodec = "/dumb/proto/1.0.0"
type DumbProto = ref object of LPProtocol
proc new(T: typedesc[DumbProto], nodeNumber: int): T =
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
await conn.close()
return T.new(codecs = @[DumbCodec], handler = handle)
@ -49,7 +49,7 @@ proc new(T: typedesc[DumbProto], nodeNumber: int): T =
## (rendezvous in this case) as a bootnode. For this example, we'll
## create a bootnode, and then every peer will advertise itself on the
## bootnode, and use it to find other peers
proc main() {.async, gcsafe.} =
proc main() {.async.} =
let bootNode = createSwitch()
await bootNode.start()

View File

@ -143,7 +143,7 @@ proc draw(g: Game) =
## peer know that we are available, check that he is also available,
## and launch the game.
proc new(T: typedesc[GameProto], g: Game): T =
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
defer: await conn.closeWithEof()
if g.peerFound.finished or g.hasCandidate:
await conn.close()

View File

@ -17,7 +17,7 @@ requires "nim >= 1.6.0",
"secp256k1",
"stew#head",
"websock",
"unittest2 >= 0.0.5 & <= 0.1.0"
"unittest2"
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)

View File

@ -25,7 +25,7 @@ import
muxers/[muxer, mplex/mplex, yamux/yamux],
protocols/[identify, secure/secure, secure/noise, rendezvous],
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
connmanager, upgrademngrs/muxedupgrade,
connmanager, upgrademngrs/muxedupgrade, observedaddrmanager,
nameresolving/nameresolver,
errors, utility
@ -59,6 +59,7 @@ type
circuitRelay: Relay
rdv: RendezVous
services: seq[Service]
observedAddrManager: ObservedAddrManager
proc new*(T: type[SwitchBuilder]): T {.public.} =
## Creates a SwitchBuilder
@ -121,8 +122,8 @@ proc withMplex*(
b.muxers.add(MuxerProvider.new(newMuxer, MplexCodec))
b
proc withYamux*(b: SwitchBuilder): SwitchBuilder =
proc newMuxer(conn: Connection): Muxer = Yamux.new(conn)
proc withYamux*(b: SwitchBuilder, windowSize: int = YamuxDefaultWindowSize): SwitchBuilder =
proc newMuxer(conn: Connection): Muxer = Yamux.new(conn, windowSize)
assert b.muxers.countIt(it.codec == YamuxCodec) == 0, "Yamux build multiple times"
b.muxers.add(MuxerProvider.new(newMuxer, YamuxCodec))
@ -201,6 +202,10 @@ proc withServices*(b: SwitchBuilder, services: seq[Service]): SwitchBuilder =
b.services = services
b
proc withObservedAddrManager*(b: SwitchBuilder, observedAddrManager: ObservedAddrManager): SwitchBuilder =
b.observedAddrManager = observedAddrManager
b
proc build*(b: SwitchBuilder): Switch
{.raises: [LPError], public.} =
@ -223,8 +228,13 @@ proc build*(b: SwitchBuilder): Switch
protoVersion = b.protoVersion,
agentVersion = b.agentVersion)
let identify =
if b.observedAddrManager != nil:
Identify.new(peerInfo, b.sendSignedPeerRecord, b.observedAddrManager)
else:
Identify.new(peerInfo, b.sendSignedPeerRecord)
let
identify = Identify.new(peerInfo, b.sendSignedPeerRecord)
connManager = ConnManager.new(b.maxConnsPerPeer, b.maxConnections, b.maxIn, b.maxOut)
ms = MultistreamSelect.new()
muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, ms)

View File

@ -128,7 +128,7 @@ proc removeConnEventHandler*(c: ConnManager,
proc triggerConnEvent*(c: ConnManager,
peerId: PeerId,
event: ConnEvent) {.async, gcsafe.} =
event: ConnEvent) {.async.} =
try:
trace "About to trigger connection events", peer = peerId
if c.connEvents[event.kind].len() > 0:
@ -160,7 +160,7 @@ proc removePeerEventHandler*(c: ConnManager,
proc triggerPeerEvents*(c: ConnManager,
peerId: PeerId,
event: PeerEvent) {.async, gcsafe.} =
event: PeerEvent) {.async.} =
trace "About to trigger peer events", peer = peerId
if c.peerEvents[event.kind].len == 0:
@ -379,7 +379,7 @@ proc trackMuxer*(cs: ConnectionSlot, mux: Muxer) =
cs.trackConnection(mux.connection)
proc getStream*(c: ConnManager,
muxer: Muxer): Future[Connection] {.async, gcsafe.} =
muxer: Muxer): Future[Connection] {.async.} =
## get a muxed stream for the passed muxer
##
@ -387,7 +387,7 @@ proc getStream*(c: ConnManager,
return await muxer.newStream()
proc getStream*(c: ConnManager,
peerId: PeerId): Future[Connection] {.async, gcsafe.} =
peerId: PeerId): Future[Connection] {.async.} =
## get a muxed stream for the passed peer from any connection
##
@ -395,7 +395,7 @@ proc getStream*(c: ConnManager,
proc getStream*(c: ConnManager,
peerId: PeerId,
dir: Direction): Future[Connection] {.async, gcsafe.} =
dir: Direction): Future[Connection] {.async.} =
## get a muxed stream for the passed peer from a connection with `dir`
##

View File

@ -553,7 +553,7 @@ proc getSocket(pattern: string,
closeSocket(sock)
# This is forward declaration needed for newDaemonApi()
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async, gcsafe.}
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.}
proc copyEnv(): StringTableRef =
## This procedure copy all environment variables into StringTable.

View File

@ -26,7 +26,7 @@ method connect*(
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out) {.async, base.} =
dir = Direction.Out) {.async, base.} =
## connect remote peer without negotiating
## a protocol
##

View File

@ -53,7 +53,7 @@ proc dialAndUpgrade(
peerId: Opt[PeerId],
hostname: string,
address: MultiAddress,
upgradeDir = Direction.Out):
dir = Direction.Out):
Future[Muxer] {.async.} =
for transport in self.transports: # for each transport
@ -75,15 +75,19 @@ proc dialAndUpgrade(
let mux =
try:
dialed.transportDir = upgradeDir
await transport.upgrade(dialed, upgradeDir, peerId)
# This is for the very specific case of a simultaneous dial during DCUtR. In this case, both sides will have
# an Outbound direction at the transport level. Therefore we update the DCUtR initiator transport direction to Inbound.
# The if below is more general and might handle other use cases in the future.
if dialed.dir != dir:
dialed.dir = dir
await transport.upgrade(dialed, peerId)
except CatchableError as exc:
# If we failed to establish the connection through one transport,
# we won't succeeded through another - no use in trying again
await dialed.close()
debug "Upgrade failed", err = exc.msg, peerId = peerId.get(default(PeerId))
if exc isnot CancelledError:
if upgradeDir == Direction.Out:
if dialed.dir == Direction.Out:
libp2p_failed_upgrades_outgoing.inc()
else:
libp2p_failed_upgrades_incoming.inc()
@ -91,7 +95,7 @@ proc dialAndUpgrade(
# Try other address
return nil
doAssert not isNil(mux), "connection died after upgrade " & $upgradeDir
doAssert not isNil(mux), "connection died after upgrade " & $dialed.dir
debug "Dial successful", peerId = mux.connection.peerId
return mux
return nil
@ -128,7 +132,7 @@ proc dialAndUpgrade(
self: Dialer,
peerId: Opt[PeerId],
addrs: seq[MultiAddress],
upgradeDir = Direction.Out):
dir = Direction.Out):
Future[Muxer] {.async.} =
debug "Dialing peer", peerId = peerId.get(default(PeerId))
@ -146,7 +150,7 @@ proc dialAndUpgrade(
else: await self.nameResolver.resolveMAddress(expandedAddress)
for resolvedAddress in resolvedAddresses:
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, upgradeDir)
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, dir)
if not isNil(result):
return result
@ -164,7 +168,7 @@ proc internalConnect(
addrs: seq[MultiAddress],
forceDial: bool,
reuseConnection = true,
upgradeDir = Direction.Out):
dir = Direction.Out):
Future[Muxer] {.async.} =
if Opt.some(self.localPeerId) == peerId:
raise newException(CatchableError, "can't dial self!")
@ -182,7 +186,7 @@ proc internalConnect(
let slot = self.connManager.getOutgoingSlot(forceDial)
let muxed =
try:
await self.dialAndUpgrade(peerId, addrs, upgradeDir)
await self.dialAndUpgrade(peerId, addrs, dir)
except CatchableError as exc:
slot.release()
raise exc
@ -209,7 +213,7 @@ method connect*(
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out) {.async.} =
dir = Direction.Out) {.async.} =
## connect remote peer without negotiating
## a protocol
##
@ -217,7 +221,7 @@ method connect*(
if self.connManager.connCount(peerId) > 0 and reuseConnection:
return
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, upgradeDir)
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, dir)
method connect*(
self: Dialer,

View File

@ -398,6 +398,9 @@ const
MAProtocol(
mcodec: multiCodec("quic"), kind: Marker, size: 0
),
MAProtocol(
mcodec: multiCodec("quic-v1"), kind: Marker, size: 0
),
MAProtocol(
mcodec: multiCodec("ip6zone"), kind: Length, size: 0,
coder: TranscoderIP6Zone
@ -955,7 +958,7 @@ proc init*(mtype: typedesc[MultiAddress]): MultiAddress =
## Initialize empty MultiAddress.
result.data = initVBuffer()
proc init*(mtype: typedesc[MultiAddress], address: ValidIpAddress,
proc init*(mtype: typedesc[MultiAddress], address: IpAddress,
protocol: IpTransportProtocol, port: Port): MultiAddress =
var res: MultiAddress
res.data = initVBuffer()

View File

@ -193,6 +193,7 @@ const MultiCodecList = [
("https", 0x01BB),
("tls", 0x01C0),
("quic", 0x01CC),
("quic-v1", 0x01CD),
("ws", 0x01DD),
("wss", 0x01DE),
("p2p-websocket-star", 0x01DF), # not in multicodec list

View File

@ -131,7 +131,7 @@ proc handle*(
protos: seq[string],
matchers = newSeq[Matcher](),
active: bool = false,
): Future[string] {.async, gcsafe.} =
): Future[string] {.async.} =
trace "Starting multistream negotiation", conn, handshaked = active
var handshaked = active
while not conn.atEof:
@ -172,10 +172,9 @@ proc handle*(
trace "no handlers", conn, protocol = ms
await conn.writeLp(Na)
proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.async, gcsafe.} =
proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.async.} =
trace "Starting multistream handler", conn, handshaked = active
var
handshaked = active
protos: seq[string]
matchers: seq[Matcher]
for h in m.handlers:

View File

@ -42,7 +42,7 @@ const MaxMsgSize* = 1 shl 20 # 1mb
proc newInvalidMplexMsgType*(): ref InvalidMplexMsgType =
newException(InvalidMplexMsgType, "invalid message type")
proc readMsg*(conn: Connection): Future[Msg] {.async, gcsafe.} =
proc readMsg*(conn: Connection): Future[Msg] {.async.} =
let header = await conn.readVarint()
trace "read header varint", varint = header, conn

View File

@ -73,7 +73,7 @@ func shortLog*(s: LPChannel): auto =
chronicles.formatIt(LPChannel): shortLog(it)
proc open*(s: LPChannel) {.async, gcsafe.} =
proc open*(s: LPChannel) {.async.} =
trace "Opening channel", s, conn = s.conn
if s.conn.isClosed:
return
@ -95,7 +95,7 @@ proc closeUnderlying(s: LPChannel): Future[void] {.async.} =
if s.closedLocal and s.atEof():
await procCall BufferStream(s).close()
proc reset*(s: LPChannel) {.async, gcsafe.} =
proc reset*(s: LPChannel) {.async.} =
if s.isClosed:
trace "Already closed", s
return
@ -123,7 +123,7 @@ proc reset*(s: LPChannel) {.async, gcsafe.} =
trace "Channel reset", s
method close*(s: LPChannel) {.async, gcsafe.} =
method close*(s: LPChannel) {.async.} =
## Close channel for writing - a message will be sent to the other peer
## informing them that the channel is closed and that we're waiting for
## their acknowledgement.

View File

@ -122,7 +122,7 @@ proc handleStream(m: Mplex, chann: LPChannel) {.async.} =
trace "Exception in mplex stream handler", m, chann, msg = exc.msg
await chann.reset()
method handle*(m: Mplex) {.async, gcsafe.} =
method handle*(m: Mplex) {.async.} =
trace "Starting mplex handler", m
try:
while not m.connection.atEof:
@ -211,7 +211,7 @@ proc new*(M: type Mplex,
method newStream*(m: Mplex,
name: string = "",
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
lazy: bool = false): Future[Connection] {.async.} =
let channel = m.newStreamInternal(timeout = m.inChannTimeout)
if not lazy:
@ -219,7 +219,7 @@ method newStream*(m: Mplex,
return Connection(channel)
method close*(m: Mplex) {.async, gcsafe.} =
method close*(m: Mplex) {.async.} =
if m.isClosed:
trace "Already closed", m
return

View File

@ -46,11 +46,11 @@ chronicles.formatIt(Muxer): shortLog(it)
# muxer interface
method newStream*(m: Muxer, name: string = "", lazy: bool = false):
Future[Connection] {.base, async, gcsafe.} = discard
method close*(m: Muxer) {.base, async, gcsafe.} =
Future[Connection] {.base, async.} = discard
method close*(m: Muxer) {.base, async.} =
if not isNil(m.connection):
await m.connection.close()
method handle*(m: Muxer): Future[void] {.base, async, gcsafe.} = discard
method handle*(m: Muxer): Future[void] {.base, async.} = discard
proc new*(
T: typedesc[MuxerProvider],

View File

@ -22,7 +22,8 @@ logScope:
const
YamuxCodec* = "/yamux/1.0.0"
YamuxVersion = 0.uint8
DefaultWindowSize = 256000
YamuxDefaultWindowSize* = 256000
MaxSendQueueSize = 256000
MaxChannelCount = 200
when defined(libp2p_yamux_metrics):
@ -59,7 +60,7 @@ type
streamId: uint32
length: uint32
proc readHeader(conn: LPStream): Future[YamuxHeader] {.async, gcsafe.} =
proc readHeader(conn: LPStream): Future[YamuxHeader] {.async.} =
var buffer: array[12, byte]
await conn.readExactly(addr buffer[0], 12)
@ -143,6 +144,7 @@ type
recvWindow: int
sendWindow: int
maxRecvWindow: int
maxSendQueueSize: int
conn: Connection
isSrc: bool
opened: bool
@ -169,9 +171,14 @@ proc `$`(channel: YamuxChannel): string =
if s.len > 0:
result &= " {" & s.foldl(if a != "": a & ", " & b else: b, "") & "}"
proc sendQueueBytes(channel: YamuxChannel, limit: bool = false): int =
for (elem, sent, _) in channel.sendQueue:
result.inc(min(elem.len - sent, if limit: channel.maxRecvWindow div 3 else: elem.len - sent))
proc lengthSendQueue(channel: YamuxChannel): int =
channel.sendQueue.foldl(a + b.data.len - b.sent, 0)
proc lengthSendQueueWithLimit(channel: YamuxChannel): int =
# For leniency, limit big messages size to the third of maxSendQueueSize
# This value is arbitrary, it's not in the specs
# It permits to store up to 3 big messages if the peer is stalling.
channel.sendQueue.foldl(a + min(b.data.len - b.sent, channel.maxSendQueueSize div 3), 0)
proc actuallyClose(channel: YamuxChannel) {.async.} =
if channel.closedLocally and channel.sendQueue.len == 0 and
@ -183,9 +190,10 @@ proc remoteClosed(channel: YamuxChannel) {.async.} =
channel.closedRemotely.complete()
await channel.actuallyClose()
method closeImpl*(channel: YamuxChannel) {.async, gcsafe.} =
method closeImpl*(channel: YamuxChannel) {.async.} =
if not channel.closedLocally:
channel.closedLocally = true
channel.isEof = true
if channel.isReset == false and channel.sendQueue.len == 0:
await channel.conn.write(YamuxHeader.data(channel.id, 0, {Fin}))
@ -249,6 +257,7 @@ method readOnce*(
await channel.closedRemotely or channel.receivedData.wait()
if channel.closedRemotely.done() and channel.recvQueue.len == 0:
channel.returnedEof = true
channel.isEof = true
return 0
let toRead = min(channel.recvQueue.len, nbytes)
@ -282,9 +291,9 @@ proc trySend(channel: YamuxChannel) {.async.} =
channel.sendQueue.keepItIf(not (it.fut.cancelled() and it.sent == 0))
if channel.sendWindow == 0:
trace "send window empty"
if channel.sendQueueBytes(true) > channel.maxRecvWindow:
debug "channel send queue too big, resetting", maxSendWindow=channel.maxRecvWindow,
currentQueueSize = channel.sendQueueBytes(true)
if channel.lengthSendQueueWithLimit() > channel.maxSendQueueSize:
debug "channel send queue too big, resetting", maxSendQueueSize=channel.maxSendQueueSize,
currentQueueSize = channel.lengthSendQueueWithLimit()
try:
await channel.reset(true)
except CatchableError as exc:
@ -292,7 +301,7 @@ proc trySend(channel: YamuxChannel) {.async.} =
break
let
bytesAvailable = channel.sendQueueBytes()
bytesAvailable = channel.lengthSendQueue()
toSend = min(channel.sendWindow, bytesAvailable)
var
sendBuffer = newSeqUninitialized[byte](toSend + 12)
@ -343,15 +352,18 @@ method write*(channel: YamuxChannel, msg: seq[byte]): Future[void] =
return result
channel.sendQueue.add((msg, 0, result))
when defined(libp2p_yamux_metrics):
libp2p_yamux_recv_queue.observe(channel.sendQueueBytes().int64)
libp2p_yamux_recv_queue.observe(channel.lengthSendQueue().int64)
asyncSpawn channel.trySend()
proc open*(channel: YamuxChannel) {.async, gcsafe.} =
proc open*(channel: YamuxChannel) {.async.} =
if channel.opened:
trace "Try to open channel twice"
return
channel.opened = true
await channel.conn.write(YamuxHeader.data(channel.id, 0, {if channel.isSrc: Syn else: Ack}))
await channel.conn.write(YamuxHeader.windowUpdate(
channel.id,
uint32(max(channel.maxRecvWindow - YamuxDefaultWindowSize, 0)),
{if channel.isSrc: Syn else: Ack}))
method getWrapped*(channel: YamuxChannel): Connection = channel.conn
@ -362,6 +374,8 @@ type
currentId: uint32
isClosed: bool
maxChannCount: int
windowSize: int
maxSendQueueSize: int
proc lenBySrc(m: Yamux, isSrc: bool): int =
for v in m.channels.values():
@ -375,12 +389,19 @@ proc cleanupChann(m: Yamux, channel: YamuxChannel) {.async.} =
if channel.isReset and channel.recvWindow > 0:
m.flushed[channel.id] = channel.recvWindow
proc createStream(m: Yamux, id: uint32, isSrc: bool): YamuxChannel =
proc createStream(m: Yamux, id: uint32, isSrc: bool,
recvWindow: int, maxSendQueueSize: int): YamuxChannel =
# As you can see, during initialization, recvWindow can be larger than maxRecvWindow.
# This is because the peer we're connected to will always assume
# that the initial recvWindow is 256k.
# To solve this contradiction, no updateWindow will be sent until recvWindow is less
# than maxRecvWindow
result = YamuxChannel(
id: id,
maxRecvWindow: DefaultWindowSize,
recvWindow: DefaultWindowSize,
sendWindow: DefaultWindowSize,
maxRecvWindow: recvWindow,
recvWindow: if recvWindow > YamuxDefaultWindowSize: recvWindow else: YamuxDefaultWindowSize,
sendWindow: YamuxDefaultWindowSize,
maxSendQueueSize: maxSendQueueSize,
isSrc: isSrc,
conn: m.connection,
receivedData: newAsyncEvent(),
@ -429,7 +450,7 @@ proc handleStream(m: Yamux, channel: YamuxChannel) {.async.} =
trace "Exception in yamux stream handler", msg = exc.msg
await channel.reset()
method handle*(m: Yamux) {.async, gcsafe.} =
method handle*(m: Yamux) {.async.} =
trace "Starting yamux handler", pid=m.connection.peerId
try:
while not m.connection.atEof:
@ -454,8 +475,9 @@ method handle*(m: Yamux) {.async, gcsafe.} =
if header.streamId in m.flushed:
m.flushed.del(header.streamId)
if header.streamId mod 2 == m.currentId mod 2:
debug "Peer used our reserved stream id, skipping", id=header.streamId, currentId=m.currentId, peerId=m.connection.peerId
raise newException(YamuxError, "Peer used our reserved stream id")
let newStream = m.createStream(header.streamId, false)
let newStream = m.createStream(header.streamId, false, m.windowSize, m.maxSendQueueSize)
if m.channels.len >= m.maxChannCount:
await newStream.reset()
continue
@ -511,19 +533,24 @@ method getStreams*(m: Yamux): seq[Connection] =
method newStream*(
m: Yamux,
name: string = "",
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
lazy: bool = false): Future[Connection] {.async.} =
if m.channels.len > m.maxChannCount - 1:
raise newException(TooManyChannels, "max allowed channel count exceeded")
let stream = m.createStream(m.currentId, true)
let stream = m.createStream(m.currentId, true, m.windowSize, m.maxSendQueueSize)
m.currentId += 2
if not lazy:
await stream.open()
return stream
proc new*(T: type[Yamux], conn: Connection, maxChannCount: int = MaxChannelCount): T =
proc new*(T: type[Yamux], conn: Connection,
maxChannCount: int = MaxChannelCount,
windowSize: int = YamuxDefaultWindowSize,
maxSendQueueSize: int = MaxSendQueueSize): T =
T(
connection: conn,
currentId: if conn.dir == Out: 1 else: 2,
maxChannCount: maxChannCount
maxChannCount: maxChannCount,
windowSize: windowSize,
maxSendQueueSize: maxSendQueueSize
)

View File

@ -52,7 +52,7 @@ proc resolveOneAddress(
ma: MultiAddress,
domain: Domain = Domain.AF_UNSPEC,
prefix = ""): Future[seq[MultiAddress]]
{.async, raises: [MaError, TransportAddressError].} =
{.async.} =
#Resolve a single address
var pbuf: array[2, byte]

View File

@ -140,7 +140,7 @@ proc handleDial(autonat: Autonat, conn: Connection, msg: AutonatMsg): Future[voi
proc new*(T: typedesc[Autonat], switch: Switch, semSize: int = 1, dialTimeout = 15.seconds): T =
let autonat = T(switch: switch, sem: newAsyncSemaphore(semSize), dialTimeout: dialTimeout)
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
proc handleStream(conn: Connection, proto: string) {.async.} =
try:
let msg = AutonatMsg.decode(await conn.readLp(1024)).valueOr:
raise newException(AutonatError, "Received malformed message")

View File

@ -162,7 +162,7 @@ proc schedule(service: AutonatService, switch: Switch, interval: Duration) {.asy
proc addressMapper(
self: AutonatService,
peerStore: PeerStore,
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
if self.networkReachability != NetworkReachability.Reachable:
return listenAddrs
@ -179,7 +179,7 @@ proc addressMapper(
return addrs
method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
return await addressMapper(self, switch.peerStore, listenAddrs)
info "Setting up AutonatService"

View File

@ -66,7 +66,7 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs:
if peerDialableAddrs.len > self.maxDialableAddrs:
peerDialableAddrs = peerDialableAddrs[0..<self.maxDialableAddrs]
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false))
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, dir = Direction.In))
try:
discard await anyCompleted(futs).wait(self.connectTimeout)
debug "Dcutr initiator has directly connected to the remote peer."

View File

@ -56,5 +56,10 @@ proc send*(conn: Connection, msgType: MsgType, addrs: seq[MultiAddress]) {.async
let pb = DcutrMsg(msgType: msgType, addrs: addrs).encode()
await conn.writeLp(pb.buffer)
proc getHolePunchableAddrs*(addrs: seq[MultiAddress]): seq[MultiAddress] =
addrs.filterIt(TCP.match(it))
proc getHolePunchableAddrs*(addrs: seq[MultiAddress]): seq[MultiAddress] {.raises: [LPError]} =
var result = newSeq[MultiAddress]()
for a in addrs:
# This is necessary to also accept addrs like /ip4/198.51.100/tcp/1234/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N
if [TCP, mapAnd(TCP_DNS, P2PPattern), mapAnd(TCP_IP, P2PPattern)].anyIt(it.match(a)):
result.add(a[0..1].tryGet())
return result

View File

@ -29,7 +29,7 @@ logScope:
proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDialableAddrs = 8): T =
proc handleStream(stream: Connection, proto: string) {.async, gcsafe.} =
proc handleStream(stream: Connection, proto: string) {.async.} =
var peerDialableAddrs: seq[MultiAddress]
try:
let connectMsg = DcutrMsg.decode(await stream.readLp(1024))
@ -56,7 +56,7 @@ proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDi
if peerDialableAddrs.len > maxDialableAddrs:
peerDialableAddrs = peerDialableAddrs[0..<maxDialableAddrs]
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, upgradeDir = Direction.In))
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, dir = Direction.Out))
try:
discard await anyCompleted(futs).wait(connectTimeout)
debug "Dcutr receiver has directly connected to the remote peer."

View File

@ -189,7 +189,7 @@ proc dialPeerV2*(
conn.limitData = msgRcvFromRelay.limit.data
return conn
proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async.} =
let msg = StopMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
await sendHopStatus(conn, MalformedMessage)
return
@ -201,7 +201,7 @@ proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
trace "Unexpected client / relayv2 handshake", msgType=msg.msgType
await sendStopError(conn, MalformedMessage)
proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async, gcsafe.} =
proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async.} =
let src = msg.srcPeer.valueOr:
await sendStatus(conn, StatusV1.StopSrcMultiaddrInvalid)
return
@ -226,7 +226,7 @@ proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async, g
if cl.onNewConnection != nil: await cl.onNewConnection(conn, 0, 0)
else: await conn.close()
proc handleStreamV1(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
proc handleStreamV1(cl: RelayClient, conn: Connection) {.async.} =
let msg = RelayMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
await sendStatus(conn, StatusV1.MalformedMessage)
return
@ -266,7 +266,7 @@ proc new*(T: typedesc[RelayClient], canHop: bool = false,
maxCircuitPerPeer: maxCircuitPerPeer,
msgSize: msgSize,
isCircuitRelayV1: circuitRelayV1)
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
proc handleStream(conn: Connection, proto: string) {.async.} =
try:
case proto:
of RelayV1Codec: await cl.handleStreamV1(conn)

View File

@ -47,6 +47,7 @@ proc new*(
limitDuration: uint32,
limitData: uint64): T =
let rc = T(conn: conn, limitDuration: limitDuration, limitData: limitData)
rc.dir = conn.dir
rc.initStream()
if limitDuration > 0:
proc checkDurationConnection() {.async.} =

View File

@ -105,7 +105,7 @@ proc isRelayed*(conn: Connection): bool =
wrappedConn = wrappedConn.getWrapped()
return false
proc handleReserve(r: Relay, conn: Connection) {.async, gcsafe.} =
proc handleReserve(r: Relay, conn: Connection) {.async.} =
if conn.isRelayed():
trace "reservation attempt over relay connection", pid = conn.peerId
await sendHopStatus(conn, PermissionDenied)
@ -128,7 +128,7 @@ proc handleReserve(r: Relay, conn: Connection) {.async, gcsafe.} =
proc handleConnect(r: Relay,
connSrc: Connection,
msg: HopMessage) {.async, gcsafe.} =
msg: HopMessage) {.async.} =
if connSrc.isRelayed():
trace "connection attempt over relay connection"
await sendHopStatus(connSrc, PermissionDenied)
@ -200,7 +200,7 @@ proc handleConnect(r: Relay,
await rconnDst.close()
await bridge(rconnSrc, rconnDst)
proc handleHopStreamV2*(r: Relay, conn: Connection) {.async, gcsafe.} =
proc handleHopStreamV2*(r: Relay, conn: Connection) {.async.} =
let msg = HopMessage.decode(await conn.readLp(r.msgSize)).valueOr:
await sendHopStatus(conn, MalformedMessage)
return
@ -214,7 +214,7 @@ proc handleHopStreamV2*(r: Relay, conn: Connection) {.async, gcsafe.} =
# Relay V1
proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsafe.} =
proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
r.streamCount.inc()
defer: r.streamCount.dec()
if r.streamCount + r.rsvp.len() >= r.maxCircuit:
@ -293,7 +293,7 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
trace "relaying connection", src, dst
await bridge(connSrc, connDst)
proc handleStreamV1(r: Relay, conn: Connection) {.async, gcsafe.} =
proc handleStreamV1(r: Relay, conn: Connection) {.async.} =
let msg = RelayMessage.decode(await conn.readLp(r.msgSize)).valueOr:
await sendStatus(conn, StatusV1.MalformedMessage)
return
@ -336,7 +336,7 @@ proc new*(T: typedesc[Relay],
msgSize: msgSize,
isCircuitRelayV1: circuitRelayV1)
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
proc handleStream(conn: Connection, proto: string) {.async.} =
try:
case proto:
of RelayV2HopCodec: await r.handleHopStreamV2(conn)

View File

@ -37,24 +37,24 @@ method start*(self: RelayTransport, ma: seq[MultiAddress]) {.async.} =
self.client.onNewConnection = proc(
conn: Connection,
duration: uint32 = 0,
data: uint64 = 0) {.async, gcsafe, raises: [].} =
data: uint64 = 0) {.async.} =
await self.queue.addLast(RelayConnection.new(conn, duration, data))
await conn.join()
self.selfRunning = true
await procCall Transport(self).start(ma)
trace "Starting Relay transport"
method stop*(self: RelayTransport) {.async, gcsafe.} =
method stop*(self: RelayTransport) {.async.} =
self.running = false
self.selfRunning = false
self.client.onNewConnection = nil
while not self.queue.empty():
await self.queue.popFirstNoWait().close()
method accept*(self: RelayTransport): Future[Connection] {.async, gcsafe.} =
method accept*(self: RelayTransport): Future[Connection] {.async.} =
result = await self.queue.popFirst()
proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async, gcsafe.} =
proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async.} =
let
sma = toSeq(ma.items())
relayAddrs = sma[0..sma.len-4].mapIt(it.tryGet()).foldl(a & b)
@ -90,7 +90,7 @@ method dial*(
self: RelayTransport,
hostname: string,
ma: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
peerId.withValue(pid):
let address = MultiAddress.init($ma & "/p2p/" & $pid).tryGet()
result = await self.dial(address)

View File

@ -21,14 +21,14 @@ const
RelayV2HopCodec* = "/libp2p/circuit/relay/0.2.0/hop"
RelayV2StopCodec* = "/libp2p/circuit/relay/0.2.0/stop"
proc sendStatus*(conn: Connection, code: StatusV1) {.async, gcsafe.} =
proc sendStatus*(conn: Connection, code: StatusV1) {.async.} =
trace "send relay/v1 status", status = $code & "(" & $ord(code) & ")"
let
msg = RelayMessage(msgType: Opt.some(RelayType.Status), status: Opt.some(code))
pb = encode(msg)
await conn.writeLp(pb.buffer)
proc sendHopStatus*(conn: Connection, code: StatusV2) {.async, gcsafe.} =
proc sendHopStatus*(conn: Connection, code: StatusV2) {.async.} =
trace "send hop relay/v2 status", status = $code & "(" & $ord(code) & ")"
let
msg = HopMessage(msgType: HopMessageType.Status, status: Opt.some(code))

View File

@ -21,6 +21,7 @@ import ../protobuf/minprotobuf,
../peerid,
../crypto/crypto,
../multiaddress,
../multicodec,
../protocols/protocol,
../utility,
../errors,
@ -77,7 +78,7 @@ chronicles.expandIt(IdentifyInfo):
signedPeerRecord =
# The SPR contains the same data as the identify message
# would be cumbersome to log
if iinfo.signedPeerRecord.isSome(): "Some"
if it.signedPeerRecord.isSome(): "Some"
else: "None"
proc encodeMsg(peerInfo: PeerInfo, observedAddr: Opt[MultiAddress], sendSpr: bool): ProtoBuffer
@ -133,24 +134,24 @@ proc decodeMsg*(buf: seq[byte]): Opt[IdentifyInfo] =
if ? pb.getField(6, agentVersion).toOpt():
iinfo.agentVersion = some(agentVersion)
debug "decodeMsg: decoded identify", iinfo
Opt.some(iinfo)
proc new*(
T: typedesc[Identify],
peerInfo: PeerInfo,
sendSignedPeerRecord = false
sendSignedPeerRecord = false,
observedAddrManager = ObservedAddrManager.new(),
): T =
let identify = T(
peerInfo: peerInfo,
sendSignedPeerRecord: sendSignedPeerRecord,
observedAddrManager: ObservedAddrManager.new(),
observedAddrManager: observedAddrManager,
)
identify.init()
identify
method init*(p: Identify) =
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
trace "handling identify request", conn
var pb = encodeMsg(p.peerInfo, conn.observedAddr, p.sendSignedPeerRecord)
@ -168,7 +169,7 @@ method init*(p: Identify) =
proc identify*(self: Identify,
conn: Connection,
remotePeerId: PeerId): Future[IdentifyInfo] {.async, gcsafe.} =
remotePeerId: PeerId): Future[IdentifyInfo] {.async.} =
trace "initiating identify", conn
var message = await conn.readLp(64*1024)
if len(message) == 0:
@ -176,6 +177,7 @@ proc identify*(self: Identify,
raise newException(IdentityInvalidMsgError, "Empty message received!")
var info = decodeMsg(message).valueOr: raise newException(IdentityInvalidMsgError, "Incorrect message received!")
debug "identify: decoded message", conn, info
let
pubkey = info.pubkey.valueOr: raise newException(IdentityInvalidMsgError, "No pubkey in identify")
peer = PeerId.init(pubkey).valueOr: raise newException(IdentityInvalidMsgError, $error)
@ -186,8 +188,12 @@ proc identify*(self: Identify,
info.peerId = peer
info.observedAddr.withValue(observed):
if not self.observedAddrManager.addObservation(observed):
debug "Observed address is not valid", observedAddr = observed
# Currently, we use the ObservedAddrManager only to find our dialable external NAT address. Therefore, addresses
# like "...\p2p-circuit\p2p\..." and "\p2p\..." are not useful to us.
if observed.contains(multiCodec("p2p-circuit")).get(false) or P2PPattern.matchPartial(observed):
trace "Not adding address to ObservedAddrManager.", observed
elif not self.observedAddrManager.addObservation(observed):
trace "Observed address is not valid.", observedAddr = observed
return info
proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.public.} =
@ -198,13 +204,14 @@ proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.pu
identifypush
proc init*(p: IdentifyPush) =
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
proc handle(conn: Connection, proto: string) {.async.} =
trace "handling identify push", conn
try:
var message = await conn.readLp(64*1024)
var identInfo = decodeMsg(message).valueOr:
raise newException(IdentityInvalidMsgError, "Incorrect message received!")
debug "identify push: decoded message", conn, identInfo
identInfo.pubkey.withValue(pubkey):
let receivedPeerId = PeerId.init(pubkey).tryGet()

View File

@ -27,7 +27,7 @@ type Perf* = ref object of LPProtocol
proc new*(T: typedesc[Perf]): T {.public.} =
var p = T()
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
proc handle(conn: Connection, proto: string) {.async.} =
var bytesRead = 0
try:
trace "Received benchmark performance check", conn

View File

@ -51,7 +51,7 @@ proc new*(T: typedesc[Ping], handler: PingHandler = nil, rng: ref HmacDrbgContex
ping
method init*(p: Ping) =
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
trace "handling ping", conn
var buf: array[PingSize, byte]
@ -71,7 +71,7 @@ method init*(p: Ping) =
proc ping*(
p: Ping,
conn: Connection,
): Future[Duration] {.async, gcsafe, public.} =
): Future[Duration] {.async, public.} =
## Sends ping to `conn`, returns the delay
trace "initiating ping", conn

View File

@ -384,7 +384,7 @@ proc validateAndRelay(g: GossipSub,
proc dataAndTopicsIdSize(msgs: seq[Message]): int =
msgs.mapIt(it.data.len + it.topicIds.mapIt(it.len).foldl(a + b, 0)).foldl(a + b, 0)
proc rateLimit*(g: GossipSub, peer: PubSubPeer, rpcMsgOpt: Opt[RPCMsg], msgSize: int) {.raises:[PeerRateLimitError, CatchableError], async.} =
proc rateLimit*(g: GossipSub, peer: PubSubPeer, rpcMsgOpt: Opt[RPCMsg], msgSize: int) {.async.} =
# In this way we count even ignored fields by protobuf
var rmsg = rpcMsgOpt.valueOr:

View File

@ -234,7 +234,7 @@ template sendMetrics(msg: RPCMsg): untyped =
# metrics
libp2p_pubsub_sent_messages.inc(labelValues = [$p.peerId, t])
proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [], async.} =
proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.async.} =
doAssert(not isNil(p), "pubsubpeer nil!")
if msg.len <= 0:

View File

@ -636,7 +636,7 @@ proc new*(T: typedesc[RendezVous],
sema: newAsyncSemaphore(SemaphoreDefaultSize)
)
logScope: topics = "libp2p discovery rendezvous"
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
proc handleStream(conn: Connection, proto: string) {.async.} =
try:
let
buf = await conn.readLp(4096)

View File

@ -19,7 +19,7 @@ type
method init(p: PlainText) {.gcsafe.} =
proc handle(conn: Connection, proto: string)
{.async, gcsafe.} = discard
{.async.} = discard
## plain text doesn't do anything
p.codec = PlainTextCodec

View File

@ -135,10 +135,9 @@ method init*(s: Secure) =
method secure*(s: Secure,
conn: Connection,
initiator: bool,
peerId: Opt[PeerId]):
Future[Connection] {.base.} =
s.handleConn(conn, initiator, peerId)
s.handleConn(conn, conn.dir == Direction.Out, peerId)
method readOnce*(s: SecureConn,
pbytes: pointer,

View File

@ -37,7 +37,7 @@ proc isRunning*(self: AutoRelayService): bool =
proc addressMapper(
self: AutoRelayService,
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
return concat(toSeq(self.relayAddresses.values))
proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, switch: Switch) {.async.} =
@ -58,8 +58,8 @@ proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, switch: Switch)
self.onReservation(concat(toSeq(self.relayAddresses.values)))
await sleepAsync chronos.seconds(ttl - 30)
method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcsafe.} =
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
return await addressMapper(self, listenAddrs)
let hasBeenSetUp = await procCall Service(self).setup(switch)
@ -83,7 +83,7 @@ proc manageBackedOff(self: AutoRelayService, pid: PeerId) {.async.} =
self.backingOff.keepItIf(it != pid)
self.peerAvailable.fire()
proc innerRun(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
proc innerRun(self: AutoRelayService, switch: Switch) {.async.} =
while true:
# Remove relayPeers that failed
let peers = toSeq(self.relayPeers.keys())
@ -116,14 +116,14 @@ proc innerRun(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
await self.peerAvailable.wait()
await sleepAsync(200.millis)
method run*(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
method run*(self: AutoRelayService, switch: Switch) {.async.} =
if self.running:
trace "Autorelay is already running"
return
self.running = true
self.runner = self.innerRun(switch)
method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcsafe.} =
method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
let hasBeenStopped = await procCall Service(self).stop(switch)
if hasBeenStopped:
self.running = false

View File

@ -94,7 +94,7 @@ method setup*(self: HPService, switch: Switch): Future[bool] {.async.} =
switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
self.onNewStatusHandler = proc (networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
self.onNewStatusHandler = proc (networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.NotReachable and not self.autoRelayService.isRunning():
discard await self.autoRelayService.setup(switch)
elif networkReachability == NetworkReachability.Reachable and self.autoRelayService.isRunning():

View File

@ -50,7 +50,7 @@ method initStream*(s: ChronosStream) =
if s.objName.len == 0:
s.objName = ChronosStreamTrackerName
s.timeoutHandler = proc() {.async, gcsafe.} =
s.timeoutHandler = proc() {.async.} =
trace "Idle timeout expired, closing ChronosStream", s
await s.close()

View File

@ -41,7 +41,7 @@ type
when defined(libp2p_agents_metrics):
shortAgent*: string
proc timeoutMonitor(s: Connection) {.async, gcsafe.}
proc timeoutMonitor(s: Connection) {.async.}
func shortLog*(conn: Connection): string =
try:
@ -110,7 +110,7 @@ proc pollActivity(s: Connection): Future[bool] {.async.} =
return false
proc timeoutMonitor(s: Connection) {.async, gcsafe.} =
proc timeoutMonitor(s: Connection) {.async.} =
## monitor the channel for inactivity
##
## if the timeout was hit, it means that

View File

@ -246,7 +246,7 @@ proc readLine*(s: LPStream,
if len(result) == lim:
break
proc readVarint*(conn: LPStream): Future[uint64] {.async, gcsafe, public.} =
proc readVarint*(conn: LPStream): Future[uint64] {.async, public.} =
var
buffer: array[10, byte]
@ -264,7 +264,7 @@ proc readVarint*(conn: LPStream): Future[uint64] {.async, gcsafe, public.} =
if true: # can't end with a raise apparently
raise (ref InvalidVarintError)(msg: "Cannot parse varint")
proc readLp*(s: LPStream, maxSize: int): Future[seq[byte]] {.async, gcsafe, public.} =
proc readLp*(s: LPStream, maxSize: int): Future[seq[byte]] {.async, public.} =
## read length prefixed msg, with the length encoded as a varint
let
length = await s.readVarint()

View File

@ -71,17 +71,17 @@ type
inUse: bool
method setup*(self: Service, switch: Switch): Future[bool] {.base, async, gcsafe.} =
method setup*(self: Service, switch: Switch): Future[bool] {.base, async.} =
if self.inUse:
warn "service setup has already been called"
return false
self.inUse = true
return true
method run*(self: Service, switch: Switch) {.base, async, gcsafe.} =
method run*(self: Service, switch: Switch) {.base, async.} =
doAssert(false, "Not implemented!")
method stop*(self: Service, switch: Switch): Future[bool] {.base, async, gcsafe.} =
method stop*(self: Service, switch: Switch): Future[bool] {.base, async.} =
if not self.inUse:
warn "service is already stopped"
return false
@ -141,10 +141,10 @@ method connect*(
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.public.} =
dir = Direction.Out): Future[void] {.public.} =
## Connects to a peer without opening a stream to it
s.dialer.connect(peerId, addrs, forceDial, reuseConnection, upgradeDir)
s.dialer.connect(peerId, addrs, forceDial, reuseConnection, dir)
method connect*(
s: Switch,
@ -213,7 +213,7 @@ proc mount*[T: LPProtocol](s: Switch, proto: T, matcher: Matcher = nil)
s.peerInfo.protocols.add(proto.codec)
proc upgrader(switch: Switch, trans: Transport, conn: Connection) {.async.} =
let muxed = await trans.upgrade(conn, Direction.In, Opt.none(PeerId))
let muxed = await trans.upgrade(conn, Opt.none(PeerId))
switch.connManager.storeMuxer(muxed)
await switch.peerStore.identify(muxed)
trace "Connection upgrade succeeded"
@ -321,7 +321,7 @@ proc stop*(s: Switch) {.async, public.} =
trace "Switch stopped"
proc start*(s: Switch) {.async, gcsafe, public.} =
proc start*(s: Switch) {.async, public.} =
## Start listening on every transport
if s.started:

View File

@ -174,7 +174,7 @@ method start*(
trace "Listening on", address = ma
method stop*(self: TcpTransport) {.async, gcsafe.} =
method stop*(self: TcpTransport) {.async.} =
## stop the transport
##
try:
@ -210,7 +210,7 @@ method stop*(self: TcpTransport) {.async, gcsafe.} =
except CatchableError as exc:
trace "Error shutting down tcp transport", exc = exc.msg
method accept*(self: TcpTransport): Future[Connection] {.async, gcsafe.} =
method accept*(self: TcpTransport): Future[Connection] {.async.} =
## accept a new TCP connection
##
@ -260,7 +260,7 @@ method dial*(
self: TcpTransport,
hostname: string,
address: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
## dial a peer
##

View File

@ -82,7 +82,7 @@ proc handlesStart(address: MultiAddress): bool {.gcsafe.} =
return TcpOnion3.match(address)
proc connectToTorServer(
transportAddress: TransportAddress): Future[StreamTransport] {.async, gcsafe.} =
transportAddress: TransportAddress): Future[StreamTransport] {.async.} =
let transp = await connect(transportAddress)
try:
discard await transp.write(@[Socks5ProtocolVersion, NMethods, Socks5AuthMethod.NoAuth.byte])
@ -99,7 +99,7 @@ proc connectToTorServer(
await transp.closeWait()
raise err
proc readServerReply(transp: StreamTransport) {.async, gcsafe.} =
proc readServerReply(transp: StreamTransport) {.async.} =
## The specification for this code is defined on
## [link text](https://www.rfc-editor.org/rfc/rfc1928#section-5)
## and [link text](https://www.rfc-editor.org/rfc/rfc1928#section-6).
@ -166,7 +166,7 @@ proc parseDnsTcp(address: MultiAddress):
(Socks5AddressType.FQDN.byte, dstAddr, dstPort)
proc dialPeer(
transp: StreamTransport, address: MultiAddress) {.async, gcsafe.} =
transp: StreamTransport, address: MultiAddress) {.async.} =
let (atyp, dstAddr, dstPort) =
if Onion3.match(address):
parseOnion3(address)
@ -190,7 +190,7 @@ method dial*(
self: TorTransport,
hostname: string,
address: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
## dial a peer
##
if not handlesDial(address):
@ -229,14 +229,14 @@ method start*(
else:
raise newException(TransportStartError, "Tor Transport couldn't start, no supported addr was provided.")
method accept*(self: TorTransport): Future[Connection] {.async, gcsafe.} =
method accept*(self: TorTransport): Future[Connection] {.async.} =
## accept a new Tor connection
##
let conn = await self.tcpTransport.accept()
conn.observedAddr = Opt.none(MultiAddress)
return conn
method stop*(self: TorTransport) {.async, gcsafe.} =
method stop*(self: TorTransport) {.async.} =
## stop the transport
##
await procCall Transport(self).stop() # call base

View File

@ -83,13 +83,12 @@ proc dial*(
method upgrade*(
self: Transport,
conn: Connection,
direction: Direction,
peerId: Opt[PeerId]): Future[Muxer] {.base, gcsafe.} =
## base upgrade method that the transport uses to perform
## transport specific upgrades
##
self.upgrader.upgrade(conn, direction, peerId)
self.upgrader.upgrade(conn, peerId)
method handles*(
self: Transport,

View File

@ -173,7 +173,7 @@ method start*(
self.running = true
method stop*(self: WsTransport) {.async, gcsafe.} =
method stop*(self: WsTransport) {.async.} =
## stop the transport
##
@ -237,7 +237,7 @@ proc connHandler(self: WsTransport,
asyncSpawn onClose()
return conn
method accept*(self: WsTransport): Future[Connection] {.async, gcsafe.} =
method accept*(self: WsTransport): Future[Connection] {.async.} =
## accept a new WS connection
##
@ -295,7 +295,7 @@ method dial*(
self: WsTransport,
hostname: string,
address: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
## dial a peer
##

View File

@ -32,8 +32,7 @@ proc getMuxerByCodec(self: MuxedUpgrade, muxerName: string): MuxerProvider =
proc mux*(
self: MuxedUpgrade,
conn: Connection,
direction: Direction): Future[Muxer] {.async, gcsafe.} =
conn: Connection): Future[Muxer] {.async.} =
## mux connection
trace "Muxing connection", conn
@ -42,7 +41,7 @@ proc mux*(
return
let muxerName =
if direction == Out: await self.ms.select(conn, self.muxers.mapIt(it.codec))
if conn.dir == Out: await self.ms.select(conn, self.muxers.mapIt(it.codec))
else: await MultistreamSelect.handle(conn, self.muxers.mapIt(it.codec))
if muxerName.len == 0 or muxerName == "na":
@ -62,16 +61,15 @@ proc mux*(
method upgrade*(
self: MuxedUpgrade,
conn: Connection,
direction: Direction,
peerId: Opt[PeerId]): Future[Muxer] {.async.} =
trace "Upgrading connection", conn, direction
trace "Upgrading connection", conn, direction = conn.dir
let sconn = await self.secure(conn, direction, peerId) # secure the connection
let sconn = await self.secure(conn, peerId) # secure the connection
if isNil(sconn):
raise newException(UpgradeFailedError,
"unable to secure connection, stopping upgrade")
let muxer = await self.mux(sconn, direction) # mux it if possible
let muxer = await self.mux(sconn) # mux it if possible
if muxer == nil:
raise newException(UpgradeFailedError,
"a muxer is required for outgoing connections")
@ -84,7 +82,7 @@ method upgrade*(
raise newException(UpgradeFailedError,
"Connection closed or missing peer info, stopping upgrade")
trace "Upgraded connection", conn, sconn, direction
trace "Upgraded connection", conn, sconn, direction = conn.dir
return muxer
proc new*(
@ -98,8 +96,7 @@ proc new*(
secureManagers: @secureManagers,
ms: ms)
upgrader.streamHandler = proc(conn: Connection)
{.async, gcsafe, raises: [].} =
upgrader.streamHandler = proc(conn: Connection) {.async.} =
trace "Starting stream handler", conn
try:
await upgrader.ms.handle(conn) # handle incoming connection

View File

@ -40,20 +40,18 @@ type
method upgrade*(
self: Upgrade,
conn: Connection,
direction: Direction,
peerId: Opt[PeerId]): Future[Muxer] {.base.} =
doAssert(false, "Not implemented!")
proc secure*(
self: Upgrade,
conn: Connection,
direction: Direction,
peerId: Opt[PeerId]): Future[Connection] {.async, gcsafe.} =
peerId: Opt[PeerId]): Future[Connection] {.async.} =
if self.secureManagers.len <= 0:
raise newException(UpgradeFailedError, "No secure managers registered!")
let codec =
if direction == Out: await self.ms.select(conn, self.secureManagers.mapIt(it.codec))
if conn.dir == Out: await self.ms.select(conn, self.secureManagers.mapIt(it.codec))
else: await MultistreamSelect.handle(conn, self.secureManagers.mapIt(it.codec))
if codec.len == 0:
raise newException(UpgradeFailedError, "Unable to negotiate a secure channel!")
@ -65,4 +63,4 @@ proc secure*(
# let's avoid duplicating checks but detect if it fails to do it properly
doAssert(secureProtocol.len > 0)
return await secureProtocol[0].secure(conn, direction == Out, peerId)
return await secureProtocol[0].secure(conn, peerId)

View File

@ -89,8 +89,27 @@ template exceptionToAssert*(body: untyped): untyped =
res
template withValue*[T](self: Opt[T] | Option[T], value, body: untyped): untyped =
if self.isSome:
let value {.inject.} = self.get()
## This template provides a convenient way to work with `Option` types in Nim.
## It allows you to execute a block of code (`body`) only when the `Option` is not empty.
##
## `self` is the `Option` instance being checked.
## `value` is the variable name to be used within the `body` for the unwrapped value.
## `body` is a block of code that is executed only if `self` contains a value.
##
## The `value` within `body` is automatically unwrapped from the `Option`, making it
## simpler to work with without needing explicit checks or unwrapping.
##
## Example:
## ```nim
## let myOpt = Opt.some(5)
## myOpt.withValue(value):
## echo value # Will print 5
## ```
##
## Note: This is a template, and it will be inlined at the call site, offering good performance.
let temp = (self)
if temp.isSome:
let value {.inject.} = temp.get()
body
macro withValue*[T](self: Opt[T] | Option[T], value, body, body2: untyped): untyped =

View File

@ -5,21 +5,21 @@ export unittest2, chronos
template asyncTeardown*(body: untyped): untyped =
teardown:
waitFor((
proc() {.async, gcsafe.} =
proc() {.async.} =
body
)())
template asyncSetup*(body: untyped): untyped =
setup:
waitFor((
proc() {.async, gcsafe.} =
proc() {.async.} =
body
)())
template asyncTest*(name: string, body: untyped): untyped =
test name:
waitFor((
proc() {.async, gcsafe.} =
proc() {.async.} =
body
)())
@ -31,7 +31,7 @@ template flakyAsyncTest*(name: string, attempts: int, body: untyped): untyped =
inc attemptNumber
try:
waitFor((
proc() {.async, gcsafe.} =
proc() {.async.} =
body
)())
except Exception as e:

View File

@ -20,7 +20,7 @@ proc writeLp(s: StreamTransport, msg: string | seq[byte]): Future[int] {.gcsafe.
buf.finish()
result = s.write(buf.buffer)
proc readLp(s: StreamTransport): Future[seq[byte]] {.async, gcsafe.} =
proc readLp(s: StreamTransport): Future[seq[byte]] {.async.} =
## read length prefixed msg
var
size: uint

View File

@ -30,7 +30,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
let transport2 = transpProvider()
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
if conn.observedAddr.isSome():
check transport1.handles(conn.observedAddr.get())
@ -58,7 +58,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
let transport1 = transpProvider()
await transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
await conn.write("Hello!")
await conn.close()
@ -85,7 +85,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
let transport1 = transpProvider()
await transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
var msg = newSeq[byte](6)
await conn.readExactly(addr msg[0], 6)
@ -147,7 +147,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
let transport1 = transpProvider()
await transport1.start(addrs)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
while true:
let conn = await transport1.accept()
await conn.write(newSeq[byte](0))
@ -214,7 +214,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
let transport1 = transpProvider()
await transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
await conn.close()

View File

@ -111,7 +111,7 @@ proc bridgedConnections*: (Connection, Connection) =
return (connA, connB)
proc checkExpiringInternal(cond: proc(): bool {.raises: [], gcsafe.} ): Future[bool] {.async, gcsafe.} =
proc checkExpiringInternal(cond: proc(): bool {.raises: [], gcsafe.} ): Future[bool] {.async.} =
let start = Moment.now()
while true:
if Moment.now() > (start + chronos.seconds(5)):
@ -146,8 +146,8 @@ proc default*(T: typedesc[MockResolver]): T =
resolver.ipResponses[("localhost", true)] = @["::1"]
resolver
proc setDNSAddr*(switch: Switch) {.gcsafe, async.} =
proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
proc setDNSAddr*(switch: Switch) {.async.} =
proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
return @[MultiAddress.init("/dns4/localhost/").tryGet() & listenAddrs[0][1].tryGet()]
switch.peerInfo.addressMappers.add(addressMapper)
await switch.peerInfo.update()

View File

@ -0,0 +1,17 @@
# syntax=docker/dockerfile:1.5-labs
FROM nimlang/nim:1.6.14 as builder
WORKDIR /workspace
COPY .pinned libp2p.nimble nim-libp2p/
RUN cd nim-libp2p && nimble install_pinned && nimble install redis -y
COPY . nim-libp2p/
RUN cd nim-libp2p && nim c --skipParentCfg --NimblePath:./nimbledeps/pkgs -d:chronicles_log_level=DEBUG -d:chronicles_default_output_device=stderr -d:release --threads:off --skipProjCfg -o:hole-punching-tests ./tests/hole-punching-interop/hole_punching.nim
FROM --platform=linux/amd64 debian:bookworm-slim
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y dnsutils jq curl tcpdump iproute2
COPY --from=builder /workspace/nim-libp2p/hole-punching-tests /usr/bin/hole-punch-client
ENV RUST_BACKTRACE=1

View File

@ -0,0 +1,114 @@
import std/[os, options, strformat]
import redis
import chronos, chronicles
import ../../libp2p/[builders,
switch,
observedaddrmanager,
services/hpservice,
services/autorelayservice,
protocols/connectivity/autonat/client as aclient,
protocols/connectivity/relay/client as rclient,
protocols/connectivity/relay/relay,
protocols/connectivity/autonat/service,
protocols/ping]
import ../stubs/autonatclientstub
proc createSwitch(r: Relay = nil, hpService: Service = nil): Switch =
let rng = newRng()
var builder = SwitchBuilder.new()
.withRng(rng)
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
.withObservedAddrManager(ObservedAddrManager.new(maxSize = 1, minCount = 1))
.withTcpTransport({ServerFlags.TcpNoDelay})
.withYamux()
.withAutonat()
.withNoise()
if hpService != nil:
builder = builder.withServices(@[hpService])
if r != nil:
builder = builder.withCircuitRelay(r)
let s = builder.build()
s.mount(Ping.new(rng=rng))
return s
proc main() {.async.} =
try:
let relayClient = RelayClient.new()
let autoRelayService = AutoRelayService.new(1, relayClient, nil, newRng())
let autonatClientStub = AutonatClientStub.new(expectedDials = 1)
autonatClientStub.answer = NotReachable
let autonatService = AutonatService.new(autonatClientStub, newRng(), maxQueueSize = 1)
let hpservice = HPService.new(autonatService, autoRelayService)
let
isListener = getEnv("MODE") == "listen"
switch = createSwitch(relayClient, hpservice)
auxSwitch = createSwitch()
redisClient = open("redis", 6379.Port)
debug "Connected to redis"
await switch.start()
await auxSwitch.start()
let relayAddr =
try:
redisClient.bLPop(@["RELAY_TCP_ADDRESS"], 0)
except Exception as e:
raise newException(CatchableError, e.msg)
# This is necessary to make the autonat service work. It will ask this peer for our reachability which the autonat
# client stub will answer NotReachable.
await switch.connect(auxSwitch.peerInfo.peerId, auxSwitch.peerInfo.addrs)
# Wait for autonat to be NotReachable
while autonatService.networkReachability != NetworkReachability.NotReachable:
await sleepAsync(100.milliseconds)
# This will trigger the autonat relay service to make a reservation.
let relayMA = MultiAddress.init(relayAddr[1]).tryGet()
debug "Got relay address", relayMA
let relayId = await switch.connect(relayMA)
debug "Connected to relay", relayId
# Wait for our relay address to be published
while switch.peerInfo.addrs.len == 0:
await sleepAsync(100.milliseconds)
if isListener:
let listenerPeerId = switch.peerInfo.peerId
discard redisClient.rPush("LISTEN_CLIENT_PEER_ID", $listenerPeerId)
debug "Pushed listener client peer id to redis", listenerPeerId
# Nothing to do anymore, wait to be killed
await sleepAsync(2.minutes)
else:
let listenerId =
try:
PeerId.init(redisClient.bLPop(@["LISTEN_CLIENT_PEER_ID"], 0)[1]).tryGet()
except Exception as e:
raise newException(CatchableError, e.msg)
debug "Got listener peer id", listenerId
let listenerRelayAddr = MultiAddress.init($relayMA & "/p2p-circuit").tryGet()
debug "Dialing listener relay address", listenerRelayAddr
await switch.connect(listenerId, @[listenerRelayAddr])
# wait for hole-punching to complete in the background
await sleepAsync(5000.milliseconds)
let conn = switch.connManager.selectMuxer(listenerId).connection
let channel = await switch.dial(listenerId, @[listenerRelayAddr], PingCodec)
let delay = await Ping.new().ping(channel)
await allFuturesThrowing(channel.close(), conn.close(), switch.stop(), auxSwitch.stop())
echo &"""{{"rtt_to_holepunched_peer_millis":{delay.millis}}}"""
quit(0)
except CatchableError as e:
error "Unexpected error", msg = e.msg
discard waitFor(main().withTimeout(4.minutes))
quit(1)

View File

@ -0,0 +1,7 @@
{
"id": "nim-libp2p-head",
"containerImageID": "nim-libp2p-head",
"transports": [
"tcp"
]
}

View File

@ -26,7 +26,7 @@ import ../../libp2p/protocols/pubsub/errors as pubsub_errors
import ../helpers
proc waitSub(sender, receiver: auto; key: string) {.async, gcsafe.} =
proc waitSub(sender, receiver: auto; key: string) {.async.} =
# turn things deterministic
# this is for testing purposes only
var ceil = 15
@ -43,7 +43,7 @@ suite "FloodSub":
asyncTest "FloodSub basic publish/subscribe A -> B":
var completionFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
completionFut.complete(true)
@ -81,7 +81,7 @@ suite "FloodSub":
asyncTest "FloodSub basic publish/subscribe B -> A":
var completionFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
completionFut.complete(true)
@ -113,7 +113,7 @@ suite "FloodSub":
asyncTest "FloodSub validation should succeed":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete(true)
@ -151,7 +151,7 @@ suite "FloodSub":
await allFuturesThrowing(nodesFut)
asyncTest "FloodSub validation should fail":
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check false # if we get here, it should fail
let
@ -186,7 +186,7 @@ suite "FloodSub":
asyncTest "FloodSub validation one fails and one succeeds":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foo"
handlerFut.complete(true)
@ -235,7 +235,7 @@ suite "FloodSub":
counter = new int
futs[i] = (
fut,
(proc(topic: string, data: seq[byte]) {.async, gcsafe.} =
(proc(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
inc counter[]
if counter[] == runs - 1:
@ -283,7 +283,7 @@ suite "FloodSub":
counter = new int
futs[i] = (
fut,
(proc(topic: string, data: seq[byte]) {.async, gcsafe.} =
(proc(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
inc counter[]
if counter[] == runs - 1:
@ -333,7 +333,7 @@ suite "FloodSub":
asyncTest "FloodSub message size validation":
var messageReceived = 0
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check data.len < 50
inc(messageReceived)
@ -375,7 +375,7 @@ suite "FloodSub":
asyncTest "FloodSub message size validation 2":
var messageReceived = 0
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
inc(messageReceived)
let

View File

@ -24,7 +24,7 @@ import utils
import ../helpers
proc noop(data: seq[byte]) {.async, gcsafe.} = discard
proc noop(data: seq[byte]) {.async.} = discard
const MsgIdSuccess = "msg id gen success"
@ -730,10 +730,10 @@ suite "GossipSub internal":
var receivedMessages = new(HashSet[seq[byte]])
proc handlerA(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handlerA(topic: string, data: seq[byte]) {.async.} =
receivedMessages[].incl(data)
proc handlerB(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handlerB(topic: string, data: seq[byte]) {.async.} =
discard
nodes[0].subscribe("foobar", handlerA)

View File

@ -47,7 +47,7 @@ suite "GossipSub":
asyncTest "GossipSub validation should succeed":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete(true)
@ -92,7 +92,7 @@ suite "GossipSub":
await allFuturesThrowing(nodesFut.concat())
asyncTest "GossipSub validation should fail (reject)":
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check false # if we get here, it should fail
let
@ -138,7 +138,7 @@ suite "GossipSub":
await allFuturesThrowing(nodesFut.concat())
asyncTest "GossipSub validation should fail (ignore)":
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check false # if we get here, it should fail
let
@ -185,7 +185,7 @@ suite "GossipSub":
asyncTest "GossipSub validation one fails and one succeeds":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foo"
handlerFut.complete(true)
@ -238,7 +238,7 @@ suite "GossipSub":
asyncTest "GossipSub unsub - resub faster than backoff":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete(true)
@ -289,7 +289,7 @@ suite "GossipSub":
await allFuturesThrowing(nodesFut.concat())
asyncTest "e2e - GossipSub should add remote peer topic subscriptions":
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
discard
let
@ -323,7 +323,7 @@ suite "GossipSub":
await allFuturesThrowing(nodesFut.concat())
asyncTest "e2e - GossipSub should add remote peer topic subscriptions if both peers are subscribed":
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
discard
let
@ -374,7 +374,7 @@ suite "GossipSub":
asyncTest "e2e - GossipSub send over fanout A -> B":
var passed = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
passed.complete()
@ -428,7 +428,7 @@ suite "GossipSub":
asyncTest "e2e - GossipSub send over fanout A -> B for subscribed topic":
var passed = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
passed.complete()
@ -481,7 +481,7 @@ suite "GossipSub":
asyncTest "e2e - GossipSub send over mesh A -> B":
var passed: Future[bool] = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
passed.complete(true)
@ -548,11 +548,11 @@ suite "GossipSub":
var
aReceived = 0
cReceived = 0
proc handlerA(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handlerA(topic: string, data: seq[byte]) {.async.} =
inc aReceived
check aReceived < 2
proc handlerB(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
proc handlerC(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handlerB(topic: string, data: seq[byte]) {.async.} = discard
proc handlerC(topic: string, data: seq[byte]) {.async.} =
inc cReceived
check cReceived < 2
cRelayed.complete()
@ -596,7 +596,7 @@ suite "GossipSub":
asyncTest "e2e - GossipSub send over floodPublish A -> B":
var passed: Future[bool] = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
passed.complete(true)
@ -653,7 +653,7 @@ suite "GossipSub":
)
proc connectNodes(nodes: seq[PubSub], target: PubSub) {.async.} =
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
for node in nodes:
@ -661,7 +661,7 @@ suite "GossipSub":
await node.switch.connect(target.peerInfo.peerId, target.peerInfo.addrs)
proc baseTestProcedure(nodes: seq[PubSub], gossip1: GossipSub, numPeersFirstMsg: int, numPeersSecondMsg: int) {.async.} =
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
block setup:
@ -727,7 +727,7 @@ suite "GossipSub":
var handler: TopicHandler
closureScope:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
if peerName notin seen:
seen[peerName] = 0
seen[peerName].inc
@ -778,7 +778,7 @@ suite "GossipSub":
var handler: TopicHandler
capture dialer, i:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
if peerName notin seen:
seen[peerName] = 0
seen[peerName].inc
@ -819,7 +819,7 @@ suite "GossipSub":
# PX to A & C
#
# C sent his SPR, not A
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
discard # not used in this test
let
@ -895,9 +895,9 @@ suite "GossipSub":
await nodes[1].switch.connect(nodes[2].switch.peerInfo.peerId, nodes[2].switch.peerInfo.addrs)
let bFinished = newFuture[void]()
proc handlerA(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
proc handlerB(topic: string, data: seq[byte]) {.async, gcsafe.} = bFinished.complete()
proc handlerC(topic: string, data: seq[byte]) {.async, gcsafe.} = doAssert false
proc handlerA(topic: string, data: seq[byte]) {.async.} = discard
proc handlerB(topic: string, data: seq[byte]) {.async.} = bFinished.complete()
proc handlerC(topic: string, data: seq[byte]) {.async.} = doAssert false
nodes[0].subscribe("foobar", handlerA)
nodes[1].subscribe("foobar", handlerB)
@ -943,7 +943,7 @@ suite "GossipSub":
await subscribeNodes(nodes)
proc handle(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
proc handle(topic: string, data: seq[byte]) {.async.} = discard
let gossip0 = GossipSub(nodes[0])
let gossip1 = GossipSub(nodes[1])

View File

@ -59,7 +59,7 @@ suite "GossipSub":
var handler: TopicHandler
closureScope:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
if peerName notin seen:
seen[peerName] = 0
seen[peerName].inc
@ -93,7 +93,7 @@ suite "GossipSub":
asyncTest "GossipSub invalid topic subscription":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete(true)
@ -155,7 +155,7 @@ suite "GossipSub":
# DO NOT SUBSCRIBE, CONNECTION SHOULD HAPPEN
### await subscribeNodes(nodes)
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
proc handler(topic: string, data: seq[byte]) {.async.} = discard
nodes[1].subscribe("foobar", handler)
await invalidDetected.wait(10.seconds)
@ -182,10 +182,10 @@ suite "GossipSub":
await GossipSub(nodes[2]).addDirectPeer(nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs)
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete()
proc noop(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc noop(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
nodes[0].subscribe("foobar", noop)
@ -226,7 +226,7 @@ suite "GossipSub":
GossipSub(nodes[1]).parameters.graylistThreshold = 100000
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete()
@ -272,7 +272,7 @@ suite "GossipSub":
var handler: TopicHandler
closureScope:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
if peerName notin seen:
seen[peerName] = 0
seen[peerName].inc
@ -324,7 +324,7 @@ suite "GossipSub":
# Adding again subscriptions
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
for i in 0..<runs:
@ -368,7 +368,7 @@ suite "GossipSub":
)
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
handlerFut.complete()
await subscribeNodes(nodes)

View File

@ -128,7 +128,7 @@ proc subscribeRandom*(nodes: seq[PubSub]) {.async.} =
await dialer.switch.connect(node.peerInfo.peerId, node.peerInfo.addrs)
dialed.add(node.peerInfo.peerId)
proc waitSub*(sender, receiver: auto; key: string) {.async, gcsafe.} =
proc waitSub*(sender, receiver: auto; key: string) {.async.} =
if sender == receiver:
return
let timeout = Moment.now() + 5.seconds
@ -148,7 +148,7 @@ proc waitSub*(sender, receiver: auto; key: string) {.async, gcsafe.} =
await sleepAsync(5.milliseconds)
doAssert Moment.now() < timeout, "waitSub timeout!"
proc waitSubGraph*(nodes: seq[PubSub], key: string) {.async, gcsafe.} =
proc waitSubGraph*(nodes: seq[PubSub], key: string) {.async.} =
let timeout = Moment.now() + 5.seconds
while true:
var

View File

@ -24,7 +24,7 @@ type
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.gcsafe, async.}
dir = Direction.Out): Future[void] {.async.}
method connect*(
self: SwitchStub,
@ -32,11 +32,11 @@ method connect*(
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out) {.async.} =
dir = Direction.Out) {.async.} =
if (self.connectStub != nil):
await self.connectStub(self, peerId, addrs, forceDial, reuseConnection, upgradeDir)
await self.connectStub(self, peerId, addrs, forceDial, reuseConnection, dir)
else:
await self.switch.connect(peerId, addrs, forceDial, reuseConnection, upgradeDir)
await self.switch.connect(peerId, addrs, forceDial, reuseConnection, dir)
proc new*(T: typedesc[SwitchStub], switch: Switch, connectStub: connectStubType = nil): T =
return SwitchStub(

View File

@ -39,7 +39,7 @@ proc createAutonatSwitch(nameResolver: NameResolver = nil): Switch =
proc makeAutonatServicePrivate(): Switch =
var autonatProtocol = new LPProtocol
autonatProtocol.handler = proc (conn: Connection, proto: string) {.async, gcsafe.} =
autonatProtocol.handler = proc (conn: Connection, proto: string) {.async.} =
discard await conn.readLp(1024)
await conn.writeLp(AutonatDialResponse(
status: DialError,

View File

@ -87,7 +87,7 @@ suite "Autonat Service":
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() >= 0.3:
if not awaiter.finished:
awaiter.complete()
@ -131,7 +131,7 @@ suite "Autonat Service":
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.NotReachable and confidence.isSome() and confidence.get() >= 0.3:
if not awaiter.finished:
autonatClientStub.answer = Reachable
@ -173,7 +173,7 @@ suite "Autonat Service":
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter.finished:
awaiter.complete()
@ -213,7 +213,7 @@ suite "Autonat Service":
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.NotReachable and confidence.isSome() and confidence.get() >= 0.3:
if not awaiter.finished:
autonatClientStub.answer = Unknown
@ -267,7 +267,7 @@ suite "Autonat Service":
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter.finished:
awaiter.complete()
@ -302,12 +302,12 @@ suite "Autonat Service":
let awaiter2 = newFuture[void]()
let awaiter3 = newFuture[void]()
proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter1.finished:
awaiter1.complete()
proc statusAndConfidenceHandler2(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler2(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter2.finished:
awaiter2.complete()
@ -345,7 +345,7 @@ suite "Autonat Service":
let awaiter1 = newFuture[void]()
proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter1.finished:
awaiter1.complete()
@ -388,7 +388,7 @@ suite "Autonat Service":
var awaiter = newFuture[void]()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter.finished:
awaiter.complete()
@ -428,7 +428,7 @@ suite "Autonat Service":
let switch1 = createSwitch(autonatService)
let switch2 = createSwitch()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
fail()
check autonatService.networkReachability == NetworkReachability.Unknown

View File

@ -32,7 +32,7 @@ method newStream*(
m: TestMuxer,
name: string = "",
lazy: bool = false):
Future[Connection] {.async, gcsafe.} =
Future[Connection] {.async.} =
result = Connection.new(m.peerId, Direction.Out, Opt.none(MultiAddress))
suite "Connection Manager":

View File

@ -57,14 +57,15 @@ suite "Dcutr":
for t in behindNATSwitch.transports:
t.networkReachability = NetworkReachability.NotReachable
expect CatchableError:
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
# in two connections attemps, instead of one. This dial is going to fail because the dcutr client is acting as the
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case.
await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs)
.wait(300.millis)
checkExpiring:
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
# in two connections attemps, instead of one. The server dial is going to fail because it is acting as the
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case, but the client
# dial will succeed.
# we still expect a new connection to be open by the receiver peer acting as the dcutr server
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2
await allFutures(behindNATSwitch.stop(), publicSwitch.stop())
@ -83,8 +84,8 @@ suite "Dcutr":
body
checkExpiring:
# no connection will be open by the receiver peer acting as the dcutr server
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 1
# we still expect a new connection to be open by the receiver peer acting as the dcutr server
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2
await allFutures(behindNATSwitch.stop(), publicSwitch.stop())
@ -95,7 +96,7 @@ suite "Dcutr":
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.async.} =
dir = Direction.Out): Future[void] {.async.} =
await sleepAsync(100.millis)
let behindNATSwitch = SwitchStub.new(newStandardSwitch(), connectTimeoutProc)
@ -114,7 +115,7 @@ suite "Dcutr":
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.async.} =
dir = Direction.Out): Future[void] {.async.} =
raise newException(CatchableError, "error")
let behindNATSwitch = SwitchStub.new(newStandardSwitch(), connectErrorProc)
@ -142,13 +143,16 @@ suite "Dcutr":
for t in behindNATSwitch.transports:
t.networkReachability = NetworkReachability.NotReachable
expect CatchableError:
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
# in two connections attemps, instead of one. This dial is going to fail because the dcutr client is acting as the
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case.
await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs)
.wait(300.millis)
checkExpiring:
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
# in two connections attemps, instead of one. The server dial is going to fail, but the client dial will succeed.
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2
# we still expect a new connection to be open by the receiver peer acting as the dcutr server
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 1
await allFutures(behindNATSwitch.stop(), publicSwitch.stop())
@ -159,7 +163,7 @@ suite "Dcutr":
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.async.} =
dir = Direction.Out): Future[void] {.async.} =
await sleepAsync(100.millis)
await ductrServerTest(connectProc)
@ -171,7 +175,23 @@ suite "Dcutr":
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.async.} =
dir = Direction.Out): Future[void] {.async.} =
raise newException(CatchableError, "error")
await ductrServerTest(connectProc)
test "should return valid TCP/IP and TCP/DNS addresses only":
let testAddrs = @[MultiAddress.init("/ip4/192.0.2.1/tcp/1234").tryGet(),
MultiAddress.init("/ip4/203.0.113.5/tcp/5678/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N").tryGet(),
MultiAddress.init("/ip6/::1/tcp/9012").tryGet(),
MultiAddress.init("/dns4/example.com/tcp/3456/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N").tryGet(),
MultiAddress.init("/ip4/198.51.100.42/udp/7890").tryGet()]
let expected = @[MultiAddress.init("/ip4/192.0.2.1/tcp/1234").tryGet(),
MultiAddress.init("/ip4/203.0.113.5/tcp/5678").tryGet(),
MultiAddress.init("/ip6/::1/tcp/9012").tryGet(),
MultiAddress.init("/dns4/example.com/tcp/3456").tryGet()]
let result = getHolePunchableAddrs(testAddrs)
check result == expected

View File

@ -65,7 +65,7 @@ suite "Hole Punching":
let publicPeerSwitch = createSwitch(RelayClient.new())
proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
return @[MultiAddress.init("/dns4/localhost/").tryGet() & listenAddrs[0][1].tryGet()]
publicPeerSwitch.peerInfo.addressMappers.add(addressMapper)
await publicPeerSwitch.peerInfo.update()
@ -193,38 +193,24 @@ suite "Hole Punching":
await privatePeerSwitch2.connect(privatePeerSwitch1.peerInfo.peerId, (await privatePeerRelayAddr1))
privatePeerSwitch2.connectStub = rcvConnectStub
checkExpiring:
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
# in two connections attemps, instead of one. The server dial is going to fail because it is acting as the
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case, but the client
# dial will succeed.
privatePeerSwitch1.connManager.connCount(privatePeerSwitch2.peerInfo.peerId) == 1 and
not isRelayed(privatePeerSwitch1.connManager.selectMuxer(privatePeerSwitch2.peerInfo.peerId).connection)
# wait for hole punching to finish in the background
await sleepAsync(600.millis)
await allFuturesThrowing(
privatePeerSwitch1.stop(), privatePeerSwitch2.stop(), switchRelay.stop(),
switchAux.stop(), switchAux2.stop(), switchAux3.stop(), switchAux4.stop())
asyncTest "Hole punching when peers addresses are private":
proc connectStub(self: SwitchStub,
peerId: PeerId,
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.async.} =
self.connectStub = nil # this stub should be called only once
await sleepAsync(100.millis) # avoid simultaneous dialing that causes address in use error
await self.switch.connect(peerId, addrs, forceDial, reuseConnection, upgradeDir)
await holePunchingTest(nil, connectStub, NotReachable)
await holePunchingTest(nil, nil, NotReachable)
asyncTest "Hole punching when there is an error during unilateral direct connection":
asyncTest "Hole punching when peers addresses are private and there is an error in the initiator side":
proc connectStub(self: SwitchStub,
peerId: PeerId,
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.async.} =
dir = Direction.Out): Future[void] {.async.} =
self.connectStub = nil # this stub should be called only once
raise newException(CatchableError, "error")

View File

@ -73,7 +73,7 @@ suite "Identify":
asyncTest "default agent version":
msListen.addHandler(IdentifyCodec, identifyProto1)
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let c = await transport1.accept()
await msListen.handle(c)
@ -95,7 +95,7 @@ suite "Identify":
remotePeerInfo.agentVersion = customAgentVersion
msListen.addHandler(IdentifyCodec, identifyProto1)
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let c = await transport1.accept()
await msListen.handle(c)
@ -136,7 +136,7 @@ suite "Identify":
asyncTest "can send signed peer record":
msListen.addHandler(IdentifyCodec, identifyProto1)
identifyProto1.sendSignedPeerRecord = true
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let c = await transport1.accept()
await msListen.handle(c)

View File

@ -97,7 +97,7 @@ suite "Mplex":
suite "channel half-closed":
asyncTest "(local close) - should close for write":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@ -112,7 +112,7 @@ suite "Mplex":
asyncTest "(local close) - should allow reads until remote closes":
let
conn = TestBufferStream.new(
proc (data: seq[byte]) {.gcsafe, async.} =
proc (data: seq[byte]) {.async.} =
discard,
)
chann = LPChannel.init(1, conn, true)
@ -139,7 +139,7 @@ suite "Mplex":
asyncTest "(remote close) - channel should close for reading by remote":
let
conn = TestBufferStream.new(
proc (data: seq[byte]) {.gcsafe, async.} =
proc (data: seq[byte]) {.async.} =
discard,
)
chann = LPChannel.init(1, conn, true)
@ -162,7 +162,7 @@ suite "Mplex":
let
testData = "Hello!".toBytes
conn = TestBufferStream.new(
proc (data: seq[byte]) {.gcsafe, async.} =
proc (data: seq[byte]) {.async.} =
discard
)
chann = LPChannel.init(1, conn, true)
@ -175,7 +175,7 @@ suite "Mplex":
await conn.close()
asyncTest "should not allow pushing data to channel when remote end closed":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@ -192,7 +192,7 @@ suite "Mplex":
suite "channel reset":
asyncTest "channel should fail reading":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@ -205,7 +205,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete read":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@ -220,7 +220,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete pushData":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@ -239,7 +239,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete both read and push":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@ -254,7 +254,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete both read and pushes":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@ -279,7 +279,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete both read and push with cancel":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@ -293,7 +293,7 @@ suite "Mplex":
await conn.close()
asyncTest "should complete both read and push after reset":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@ -311,7 +311,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete ongoing push without reader":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@ -323,7 +323,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete ongoing read without a push":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@ -335,7 +335,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should allow all reads and pushes to complete":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@ -364,7 +364,7 @@ suite "Mplex":
await conn.close()
asyncTest "channel should fail writing":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@ -376,7 +376,7 @@ suite "Mplex":
await conn.close()
asyncTest "channel should reset on timeout":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(
@ -392,11 +392,11 @@ suite "Mplex":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
let msg = await stream.readLp(1024)
check string.fromBytes(msg) == "HELLO"
await stream.close()
@ -429,11 +429,11 @@ suite "Mplex":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
let msg = await stream.readLp(1024)
check string.fromBytes(msg) == "HELLO"
await stream.close()
@ -473,12 +473,12 @@ suite "Mplex":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
try:
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
let msg = await stream.readLp(MaxMsgSize)
check msg == bigseq
trace "Bigseq check passed!"
@ -520,11 +520,11 @@ suite "Mplex":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
await stream.writeLp("Hello from stream!")
await stream.close()
@ -557,12 +557,12 @@ suite "Mplex":
let listenFut = transport1.start(ma)
let done = newFuture[void]()
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
var count = 1
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
let msg = await stream.readLp(1024)
check string.fromBytes(msg) == &"stream {count}!"
count.inc
@ -601,12 +601,12 @@ suite "Mplex":
let listenFut = transport1.start(ma)
let done = newFuture[void]()
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
var count = 1
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
let msg = await stream.readLp(1024)
check string.fromBytes(msg) == &"stream {count} from dialer!"
await stream.writeLp(&"stream {count} from listener!")
@ -646,12 +646,12 @@ suite "Mplex":
let transport1 = TcpTransport.new(upgrade = Upgrade())
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
try:
discard await stream.readLp(1024)
@ -697,11 +697,11 @@ suite "Mplex":
var count = 0
var done = newFuture[void]()
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
count.inc()
if count == 10:
@ -761,11 +761,11 @@ suite "Mplex":
let transport1 = TcpTransport.new(upgrade = Upgrade())
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
await stream.join()
@ -805,11 +805,11 @@ suite "Mplex":
var mplexListen: Mplex
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
await stream.join()
@ -851,11 +851,11 @@ suite "Mplex":
var mplexHandle: Future[void]
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
await stream.join()
@ -896,11 +896,11 @@ suite "Mplex":
let transport1 = TcpTransport.new(upgrade = Upgrade())
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
await stream.join()
@ -943,11 +943,11 @@ suite "Mplex":
var listenConn: Connection
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
listenConn = await transport1.accept()
let mplexListen = Mplex.new(listenConn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
await stream.join()
@ -992,11 +992,11 @@ suite "Mplex":
var complete = newFuture[void]()
const MsgSize = 1024
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
try:
let msg = await stream.readLp(MsgSize)
check msg.len == MsgSize
@ -1064,11 +1064,11 @@ suite "Mplex":
var complete = newFuture[void]()
const MsgSize = 512
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
let msg = await stream.readLp(MsgSize)
check msg.len == MsgSize
await stream.close()

View File

@ -60,6 +60,7 @@ const
"/ip4/127.0.0.1/tcp/1234",
"/ip4/127.0.0.1/tcp/1234/",
"/ip4/127.0.0.1/udp/1234/quic",
"/ip4/192.168.80.3/udp/33422/quic-v1",
"/ip4/127.0.0.1/ipfs/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC",
"/ip4/127.0.0.1/ipfs/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC/tcp/1234",
"/ip4/127.0.0.1/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC",

View File

@ -34,7 +34,7 @@ type
method readOnce*(s: TestSelectStream,
pbytes: pointer,
nbytes: int): Future[int] {.async, gcsafe.} =
nbytes: int): Future[int] {.async.} =
case s.step:
of 1:
var buf = newSeq[byte](1)
@ -64,9 +64,9 @@ method readOnce*(s: TestSelectStream,
return "\0x3na\n".len()
method write*(s: TestSelectStream, msg: seq[byte]) {.async, gcsafe.} = discard
method write*(s: TestSelectStream, msg: seq[byte]) {.async.} = discard
method close(s: TestSelectStream) {.async, gcsafe.} =
method close(s: TestSelectStream) {.async.} =
s.isClosed = true
s.isEof = true
@ -113,11 +113,11 @@ method readOnce*(s: TestLsStream,
copyMem(pbytes, addr buf[0], buf.len())
return buf.len()
method write*(s: TestLsStream, msg: seq[byte]) {.async, gcsafe.} =
method write*(s: TestLsStream, msg: seq[byte]) {.async.} =
if s.step == 4:
await s.ls(msg)
method close(s: TestLsStream) {.async, gcsafe.} =
method close(s: TestLsStream) {.async.} =
s.isClosed = true
s.isEof = true
@ -137,7 +137,7 @@ type
method readOnce*(s: TestNaStream,
pbytes: pointer,
nbytes: int):
Future[int] {.async, gcsafe.} =
Future[int] {.async.} =
case s.step:
of 1:
var buf = newSeq[byte](1)
@ -167,11 +167,11 @@ method readOnce*(s: TestNaStream,
return "\0x3na\n".len()
method write*(s: TestNaStream, msg: seq[byte]) {.async, gcsafe.} =
method write*(s: TestNaStream, msg: seq[byte]) {.async.} =
if s.step == 4:
await s.na(string.fromBytes(msg))
method close(s: TestNaStream) {.async, gcsafe.} =
method close(s: TestNaStream) {.async.} =
s.isClosed = true
s.isEof = true
@ -197,7 +197,7 @@ suite "Multistream select":
var protocol: LPProtocol = new LPProtocol
proc testHandler(conn: Connection,
proto: string):
Future[void] {.async, gcsafe.} =
Future[void] {.async.} =
check proto == "/test/proto/1.0.0"
await conn.close()
@ -210,7 +210,7 @@ suite "Multistream select":
var conn: Connection = nil
let done = newFuture[void]()
proc testLsHandler(proto: seq[byte]) {.async, gcsafe.} =
proc testLsHandler(proto: seq[byte]) {.async.} =
var strProto: string = string.fromBytes(proto)
check strProto == "\x26/test/proto1/1.0.0\n/test/proto2/1.0.0\n"
await conn.close()
@ -218,7 +218,7 @@ suite "Multistream select":
conn = Connection(newTestLsStream(testLsHandler))
proc testHandler(conn: Connection, proto: string): Future[void]
{.async, gcsafe.} = discard
{.async.} = discard
var protocol: LPProtocol = new LPProtocol
protocol.handler = testHandler
ms.addHandler("/test/proto1/1.0.0", protocol)
@ -230,7 +230,7 @@ suite "Multistream select":
let ms = MultistreamSelect.new()
var conn: Connection = nil
proc testNaHandler(msg: string): Future[void] {.async, gcsafe.} =
proc testNaHandler(msg: string): Future[void] {.async.} =
check msg == "\x03na\n"
await conn.close()
conn = newTestNaStream(testNaHandler)
@ -238,7 +238,7 @@ suite "Multistream select":
var protocol: LPProtocol = new LPProtocol
proc testHandler(conn: Connection,
proto: string):
Future[void] {.async, gcsafe.} = discard
Future[void] {.async.} = discard
protocol.handler = testHandler
ms.addHandler("/unabvailable/proto/1.0.0", protocol)
@ -250,7 +250,7 @@ suite "Multistream select":
var protocol: LPProtocol = new LPProtocol
proc testHandler(conn: Connection,
proto: string):
Future[void] {.async, gcsafe.} =
Future[void] {.async.} =
check proto == "/test/proto/1.0.0"
await conn.writeLp("Hello!")
await conn.close()
@ -262,7 +262,7 @@ suite "Multistream select":
let transport1 = TcpTransport.new(upgrade = Upgrade())
asyncSpawn transport1.start(ma)
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let conn = await transport1.accept()
await msListen.handle(conn)
await conn.close()
@ -293,7 +293,7 @@ suite "Multistream select":
# Unblock the 5 streams, check that we can open a new one
proc testHandler(conn: Connection,
proto: string):
Future[void] {.async, gcsafe.} =
Future[void] {.async.} =
await blocker
await conn.writeLp("Hello!")
await conn.close()
@ -315,7 +315,7 @@ suite "Multistream select":
await msListen.handle(c)
await c.close()
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
while true:
let conn = await transport1.accept()
asyncSpawn acceptedOne(conn)
@ -362,7 +362,7 @@ suite "Multistream select":
let msListen = MultistreamSelect.new()
var protocol: LPProtocol = new LPProtocol
protocol.handler = proc(conn: Connection, proto: string) {.async, gcsafe.} =
protocol.handler = proc(conn: Connection, proto: string) {.async.} =
# never reached
discard
@ -379,7 +379,7 @@ suite "Multistream select":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let conn = await transport1.accept()
try:
await msListen.handle(conn)
@ -412,7 +412,7 @@ suite "Multistream select":
var protocol: LPProtocol = new LPProtocol
proc testHandler(conn: Connection,
proto: string):
Future[void] {.async, gcsafe.} =
Future[void] {.async.} =
check proto == "/test/proto/1.0.0"
await conn.writeLp("Hello!")
await conn.close()
@ -424,7 +424,7 @@ suite "Multistream select":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
asyncSpawn transport1.start(ma)
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let conn = await transport1.accept()
await msListen.handle(conn)
@ -450,7 +450,7 @@ suite "Multistream select":
var protocol: LPProtocol = new LPProtocol
proc testHandler(conn: Connection,
proto: string):
Future[void] {.async, gcsafe.} =
Future[void] {.async.} =
await conn.writeLp(&"Hello from {proto}!")
await conn.close()
@ -462,7 +462,7 @@ suite "Multistream select":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
asyncSpawn transport1.start(ma)
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let conn = await transport1.accept()
await msListen.handle(conn)

View File

@ -41,7 +41,7 @@ type
{.push raises: [].}
method init(p: TestProto) {.gcsafe.} =
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
let msg = string.fromBytes(await conn.readLp(1024))
check "Hello!" == msg
await conn.writeLp("Hello!")
@ -100,7 +100,7 @@ suite "Noise":
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let sconn = await serverNoise.secure(conn, false, Opt.none(PeerId))
let sconn = await serverNoise.secure(conn, Opt.none(PeerId))
try:
await sconn.write("Hello!")
finally:
@ -115,7 +115,7 @@ suite "Noise":
clientNoise = Noise.new(rng, clientPrivKey, outgoing = true)
conn = await transport2.dial(transport1.addrs[0])
let sconn = await clientNoise.secure(conn, true, Opt.some(serverInfo.peerId))
let sconn = await clientNoise.secure(conn, Opt.some(serverInfo.peerId))
var msg = newSeq[byte](6)
await sconn.readExactly(addr msg[0], 6)
@ -140,11 +140,11 @@ suite "Noise":
asyncSpawn transport1.start(server)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
var conn: Connection
try:
conn = await transport1.accept()
discard await serverNoise.secure(conn, false, Opt.none(PeerId))
discard await serverNoise.secure(conn, Opt.none(PeerId))
except CatchableError:
discard
finally:
@ -160,7 +160,7 @@ suite "Noise":
var sconn: Connection = nil
expect(NoiseDecryptTagError):
sconn = await clientNoise.secure(conn, true, Opt.some(conn.peerId))
sconn = await clientNoise.secure(conn, Opt.some(conn.peerId))
await conn.close()
await handlerWait
@ -178,9 +178,9 @@ suite "Noise":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
asyncSpawn transport1.start(server)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let sconn = await serverNoise.secure(conn, false, Opt.none(PeerId))
let sconn = await serverNoise.secure(conn, Opt.none(PeerId))
defer:
await sconn.close()
await conn.close()
@ -196,7 +196,7 @@ suite "Noise":
clientInfo = PeerInfo.new(clientPrivKey, transport1.addrs)
clientNoise = Noise.new(rng, clientPrivKey, outgoing = true)
conn = await transport2.dial(transport1.addrs[0])
let sconn = await clientNoise.secure(conn, true, Opt.some(serverInfo.peerId))
let sconn = await clientNoise.secure(conn, Opt.some(serverInfo.peerId))
await sconn.write("Hello!")
await acceptFut
@ -221,9 +221,9 @@ suite "Noise":
transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
listenFut = transport1.start(server)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let sconn = await serverNoise.secure(conn, false, Opt.none(PeerId))
let sconn = await serverNoise.secure(conn, Opt.none(PeerId))
defer:
await sconn.close()
let msg = await sconn.readLp(1024*1024)
@ -237,7 +237,7 @@ suite "Noise":
clientInfo = PeerInfo.new(clientPrivKey, transport1.addrs)
clientNoise = Noise.new(rng, clientPrivKey, outgoing = true)
conn = await transport2.dial(transport1.addrs[0])
let sconn = await clientNoise.secure(conn, true, Opt.some(serverInfo.peerId))
let sconn = await clientNoise.secure(conn, Opt.some(serverInfo.peerId))
await sconn.writeLp(hugePayload)
await readTask

View File

@ -42,7 +42,7 @@ suite "Ping":
transport1 = TcpTransport.new(upgrade = Upgrade())
transport2 = TcpTransport.new(upgrade = Upgrade())
proc handlePing(peer: PeerId) {.async, gcsafe, closure.} =
proc handlePing(peer: PeerId) {.async, closure.} =
inc pingReceivedCount
pingProto1 = Ping.new()
pingProto2 = Ping.new(handlePing)
@ -63,7 +63,7 @@ suite "Ping":
asyncTest "simple ping":
msListen.addHandler(PingCodec, pingProto1)
serverFut = transport1.start(@[ma])
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let c = await transport1.accept()
await msListen.handle(c)
@ -78,7 +78,7 @@ suite "Ping":
asyncTest "ping callback":
msDial.addHandler(PingCodec, pingProto2)
serverFut = transport1.start(@[ma])
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let c = await transport1.accept()
discard await msListen.select(c, PingCodec)
discard await pingProto1.ping(c)
@ -92,7 +92,7 @@ suite "Ping":
asyncTest "bad ping data ack":
type FakePing = ref object of LPProtocol
let fakePingProto = FakePing()
proc fakeHandle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
proc fakeHandle(conn: Connection, proto: string) {.async, closure.} =
var
buf: array[32, byte]
fakebuf: array[32, byte]
@ -103,7 +103,7 @@ suite "Ping":
msListen.addHandler(PingCodec, fakePingProto)
serverFut = transport1.start(@[ma])
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let c = await transport1.accept()
await msListen.handle(c)

View File

@ -19,14 +19,22 @@ import ./helpers
import std/times
import stew/byteutils
proc createSwitch(r: Relay): Switch =
result = SwitchBuilder.new()
proc createSwitch(r: Relay = nil, useYamux: bool = false): Switch =
var builder = SwitchBuilder.new()
.withRng(newRng())
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
.withTcpTransport()
.withMplex()
if useYamux:
builder = builder.withYamux()
else:
builder = builder.withMplex()
if r != nil:
builder = builder.withCircuitRelay(r)
return builder
.withNoise()
.withCircuitRelay(r)
.build()
suite "Circuit Relay V2":
@ -122,7 +130,8 @@ suite "Circuit Relay V2":
expect(ReservationError):
discard await cl1.reserve(src2.peerInfo.peerId, addrs)
suite "Connection":
for (useYamux, muxName) in [(false, "Mplex"), (true, "Yamux")]:
suite "Circuit Relay V2 Connection using " & muxName:
asyncTeardown:
checkTrackers()
var
@ -149,9 +158,9 @@ suite "Circuit Relay V2":
ldata = 16384
srcCl = RelayClient.new()
dstCl = RelayClient.new()
src = createSwitch(srcCl)
dst = createSwitch(dstCl)
rel = newStandardSwitch()
src = createSwitch(srcCl, useYamux)
dst = createSwitch(dstCl, useYamux)
rel = createSwitch(nil, useYamux)
asyncTest "Connection succeed":
proto.handler = proc(conn: Connection, proto: string) {.async.} =
@ -230,20 +239,20 @@ suite "Circuit Relay V2":
await conn.writeLp("do you expect a lorem ipsum or...?")
check "surprise me!" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("""Call me Ishmael. Some years ago--never mind how long
precisely--having little or no money in my purse, and nothing
particular to interest me on shore, I thought I would sail about a
little and see the watery part of the world. It is a way I have of
driving off the spleen and regulating the circulation. Whenever I
find myself growing grim about the mouth; whenever it is a damp,
drizzly November in my soul; whenever I find myself involuntarily
pausing before coffin warehouses, and bringing up the rear of every
funeral I meet; and especially whenever my hypos get such an upper
hand of me, that it requires a strong moral principle to prevent me
from deliberately stepping into the street, and methodically knocking
people's hats off--then, I account it high time to get to sea as soon
as I can. This is my substitute for pistol and ball. With a
philosophical flourish Cato throws himself upon his sword; I quietly
take to the ship.""")
precisely--having little or no money in my purse, and nothing
particular to interest me on shore, I thought I would sail about a
little and see the watery part of the world. It is a way I have of
driving off the spleen and regulating the circulation. Whenever I
find myself growing grim about the mouth; whenever it is a damp,
drizzly November in my soul; whenever I find myself involuntarily
pausing before coffin warehouses, and bringing up the rear of every
funeral I meet; and especially whenever my hypos get such an upper
hand of me, that it requires a strong moral principle to prevent me
from deliberately stepping into the street, and methodically knocking
people's hats off--then, I account it high time to get to sea as soon
as I can. This is my substitute for pistol and ball. With a
philosophical flourish Cato throws himself upon his sword; I quietly
take to the ship.""")
rv2 = Relay.new(reservationTTL=initDuration(seconds=ttl),
limitDuration=ldur,
limitData=ldata)
@ -322,7 +331,7 @@ take to the ship.""")
raise newException(CatchableError, "Should not be here")
let
rel2Cl = RelayClient.new(canHop = true)
rel2 = createSwitch(rel2Cl)
rel2 = createSwitch(rel2Cl, useYamux)
rv2 = Relay.new()
rv2.setup(rel)
rel.mount(rv2)
@ -347,6 +356,7 @@ take to the ship.""")
expect(DialFailedError):
conn = await src.dial(dst.peerInfo.peerId, addrs, customProtoCodec)
if not conn.isNil():
await allFutures(conn.close())
await allFutures(src.stop(), dst.stop(), rel.stop(), rel2.stop())
@ -381,9 +391,9 @@ take to the ship.""")
clientA = RelayClient.new(canHop = true)
clientB = RelayClient.new(canHop = true)
clientC = RelayClient.new(canHop = true)
switchA = createSwitch(clientA)
switchB = createSwitch(clientB)
switchC = createSwitch(clientC)
switchA = createSwitch(clientA, useYamux)
switchB = createSwitch(clientB, useYamux)
switchC = createSwitch(clientC, useYamux)
switchA.mount(protoBCA)
switchB.mount(protoCAB)

View File

@ -46,7 +46,7 @@ suite "Switch":
asyncTest "e2e use switch dial proto string":
let done = newFuture[void]()
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
let msg = string.fromBytes(await conn.readLp(1024))
check "Hello!" == msg
@ -86,7 +86,7 @@ suite "Switch":
asyncTest "e2e use switch dial proto string with custom matcher":
let done = newFuture[void]()
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
let msg = string.fromBytes(await conn.readLp(1024))
check "Hello!" == msg
@ -131,7 +131,7 @@ suite "Switch":
asyncTest "e2e should not leak bufferstreams and connections on channel close":
let done = newFuture[void]()
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
let msg = string.fromBytes(await conn.readLp(1024))
check "Hello!" == msg
@ -171,7 +171,7 @@ suite "Switch":
check not switch2.isConnected(switch1.peerInfo.peerId)
asyncTest "e2e use connect then dial":
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
let msg = string.fromBytes(await conn.readLp(1024))
check "Hello!" == msg
@ -305,7 +305,7 @@ suite "Switch":
var step = 0
var kinds: set[ConnEventKind]
proc hook(peerId: PeerId, event: ConnEvent) {.async, gcsafe.} =
proc hook(peerId: PeerId, event: ConnEvent) {.async.} =
kinds = kinds + {event.kind}
case step:
of 0:
@ -357,7 +357,7 @@ suite "Switch":
var step = 0
var kinds: set[ConnEventKind]
proc hook(peerId: PeerId, event: ConnEvent) {.async, gcsafe.} =
proc hook(peerId: PeerId, event: ConnEvent) {.async.} =
kinds = kinds + {event.kind}
case step:
of 0:
@ -409,7 +409,7 @@ suite "Switch":
var step = 0
var kinds: set[PeerEventKind]
proc handler(peerId: PeerId, event: PeerEvent) {.async, gcsafe.} =
proc handler(peerId: PeerId, event: PeerEvent) {.async.} =
kinds = kinds + {event.kind}
case step:
of 0:
@ -460,7 +460,7 @@ suite "Switch":
var step = 0
var kinds: set[PeerEventKind]
proc handler(peerId: PeerId, event: PeerEvent) {.async, gcsafe.} =
proc handler(peerId: PeerId, event: PeerEvent) {.async.} =
kinds = kinds + {event.kind}
case step:
of 0:
@ -521,7 +521,7 @@ suite "Switch":
var step = 0
var kinds: set[PeerEventKind]
proc handler(peerId: PeerId, event: PeerEvent) {.async, gcsafe.} =
proc handler(peerId: PeerId, event: PeerEvent) {.async.} =
kinds = kinds + {event.kind}
case step:
of 0:
@ -581,7 +581,7 @@ suite "Switch":
var switches: seq[Switch]
var done = newFuture[void]()
var onConnect: Future[void]
proc hook(peerId: PeerId, event: ConnEvent) {.async, gcsafe.} =
proc hook(peerId: PeerId, event: ConnEvent) {.async.} =
case event.kind:
of ConnEventKind.Connected:
await onConnect
@ -619,7 +619,7 @@ suite "Switch":
var switches: seq[Switch]
var done = newFuture[void]()
var onConnect: Future[void]
proc hook(peerId2: PeerId, event: ConnEvent) {.async, gcsafe.} =
proc hook(peerId2: PeerId, event: ConnEvent) {.async.} =
case event.kind:
of ConnEventKind.Connected:
if conns == 5:
@ -662,7 +662,7 @@ suite "Switch":
let transport = TcpTransport.new(upgrade = Upgrade())
await transport.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport.accept()
await conn.closeWithEOF()
@ -686,7 +686,7 @@ suite "Switch":
switch.stop())
asyncTest "e2e calling closeWithEOF on the same stream should not assert":
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
discard await conn.readLp(100)
let testProto = new TestProto
@ -832,7 +832,7 @@ suite "Switch":
asyncTest "e2e peer store":
let done = newFuture[void]()
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
let msg = string.fromBytes(await conn.readLp(1024))
check "Hello!" == msg
@ -882,7 +882,7 @@ suite "Switch":
# this randomly locks the Windows CI job
skip()
return
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
let msg = string.fromBytes(await conn.readLp(1024))
check "Hello!" == msg
@ -1019,7 +1019,7 @@ suite "Switch":
await srcTcpSwitch.stop()
asyncTest "mount unstarted protocol":
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
check "test123" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test456")
await conn.close()

View File

@ -30,7 +30,7 @@ suite "TCP transport":
let transport: TcpTransport = TcpTransport.new(upgrade = Upgrade())
asyncSpawn transport.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport.accept()
await conn.write("Hello!")
await conn.close()
@ -52,7 +52,7 @@ suite "TCP transport":
let transport: TcpTransport = TcpTransport.new(upgrade = Upgrade())
asyncSpawn transport.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
var msg = newSeq[byte](6)
let conn = await transport.accept()
await conn.readExactly(addr msg[0], 6)
@ -73,7 +73,7 @@ suite "TCP transport":
let address = initTAddress("0.0.0.0:0")
let handlerWait = newFuture[void]()
proc serveClient(server: StreamServer,
transp: StreamTransport) {.async, gcsafe.} =
transp: StreamTransport) {.async.} =
var wstream = newAsyncStreamWriter(transp)
await wstream.write("Hello!")
await wstream.finish()
@ -106,7 +106,7 @@ suite "TCP transport":
let address = initTAddress("0.0.0.0:0")
let handlerWait = newFuture[void]()
proc serveClient(server: StreamServer,
transp: StreamTransport) {.async, gcsafe.} =
transp: StreamTransport) {.async.} =
var rstream = newAsyncStreamReader(transp)
let msg = await rstream.read(6)
check string.fromBytes(msg) == "Hello!"
@ -179,7 +179,7 @@ suite "TCP transport":
let transport: TcpTransport = TcpTransport.new(upgrade = Upgrade(), connectionsTimeout=1.milliseconds)
asyncSpawn transport.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport.accept()
await conn.join()

View File

@ -56,7 +56,7 @@ suite "Tor transport":
check string.fromBytes(resp) == "server"
await client.stop()
proc serverAcceptHandler() {.async, gcsafe.} =
proc serverAcceptHandler() {.async.} =
let conn = await server.accept()
var resp: array[6, byte]
await conn.readExactly(addr resp, 6)
@ -87,7 +87,7 @@ suite "Tor transport":
proc new(T: typedesc[TestProto]): T =
# every incoming connections will be in handled in this closure
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
var resp: array[6, byte]
await conn.readExactly(addr resp, 6)

View File

@ -89,7 +89,7 @@ suite "WebSocket transport":
const correctPattern = mapAnd(TCP, mapEq("wss"))
await transport1.start(ma)
check correctPattern.match(transport1.addrs[0])
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
while true:
let conn = await transport1.accept()
if not isNil(conn):

View File

@ -22,11 +22,12 @@ suite "Yamux":
teardown:
checkTrackers()
template mSetup {.inject.} =
template mSetup(ws: int = YamuxDefaultWindowSize) {.inject.} =
#TODO in a template to avoid threadvar
let
(conna {.inject.}, connb {.inject.}) = bridgedConnections()
(yamuxa {.inject.}, yamuxb {.inject.}) = (Yamux.new(conna), Yamux.new(connb))
yamuxa {.inject.} = Yamux.new(conna, windowSize = ws)
yamuxb {.inject.} = Yamux.new(connb, windowSize = ws)
(handlera, handlerb) = (yamuxa.handle(), yamuxb.handle())
defer:
@ -179,6 +180,63 @@ suite "Yamux":
writerBlocker.complete()
await streamA.close()
asyncTest "Increase window size":
mSetup(512000)
let readerBlocker = newFuture[void]()
yamuxb.streamHandler = proc(conn: Connection) {.async.} =
await readerBlocker
var buffer: array[260000, byte]
discard await conn.readOnce(addr buffer[0], 260000)
await conn.close()
let streamA = await yamuxa.newStream()
check streamA == yamuxa.getStreams()[0]
await wait(streamA.write(newSeq[byte](512000)), 1.seconds) # shouldn't block
let secondWriter = streamA.write(newSeq[byte](10000))
await sleepAsync(10.milliseconds)
check: not secondWriter.finished()
readerBlocker.complete()
await wait(secondWriter, 1.seconds)
await streamA.close()
asyncTest "Reduce window size":
mSetup(64000)
let readerBlocker1 = newFuture[void]()
let readerBlocker2 = newFuture[void]()
yamuxb.streamHandler = proc(conn: Connection) {.async.} =
await readerBlocker1
var buffer: array[256000, byte]
# For the first roundtrip, the send window size is assumed to be 256k
discard await conn.readOnce(addr buffer[0], 256000)
await readerBlocker2
discard await conn.readOnce(addr buffer[0], 40000)
await conn.close()
let streamA = await yamuxa.newStream()
check streamA == yamuxa.getStreams()[0]
await wait(streamA.write(newSeq[byte](256000)), 1.seconds) # shouldn't block
let secondWriter = streamA.write(newSeq[byte](64000))
await sleepAsync(10.milliseconds)
check: not secondWriter.finished()
readerBlocker1.complete()
await wait(secondWriter, 1.seconds)
let thirdWriter = streamA.write(newSeq[byte](10))
await sleepAsync(10.milliseconds)
check: not thirdWriter.finished()
readerBlocker2.complete()
await wait(thirdWriter, 1.seconds)
await streamA.close()
suite "Exception testing":
asyncTest "Local & Remote close":
mSetup()