mirror of
https://github.com/vacp2p/nim-libp2p.git
synced 2025-03-01 08:30:29 +00:00
Merge branch 'unstable' into dev/etan/zz-dbg
This commit is contained in:
commit
65d57c0aff
12
.github/workflows/daily.yml
vendored
Normal file
12
.github/workflows/daily.yml
vendored
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
name: Daily
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: "30 6 * * *"
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
call-multi-nim-common:
|
||||||
|
uses: ./.github/workflows/daily_common.yml
|
||||||
|
with:
|
||||||
|
nim-branch: "['version-1-6','version-2-0']"
|
||||||
|
cpu: "['amd64']"
|
84
.github/workflows/daily_common.yml
vendored
Normal file
84
.github/workflows/daily_common.yml
vendored
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
name: daily-common
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
inputs:
|
||||||
|
nim-branch:
|
||||||
|
description: 'Nim branch'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
cpu:
|
||||||
|
description: 'CPU'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
exclude:
|
||||||
|
description: 'Exclude matrix configurations'
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
default: "[]"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
delete-cache:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: snnaplab/delete-branch-cache-action@v1
|
||||||
|
|
||||||
|
build:
|
||||||
|
needs: delete-cache
|
||||||
|
timeout-minutes: 120
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
platform:
|
||||||
|
- os: linux
|
||||||
|
builder: ubuntu-20
|
||||||
|
shell: bash
|
||||||
|
- os: macos
|
||||||
|
builder: macos-12
|
||||||
|
shell: bash
|
||||||
|
- os: windows
|
||||||
|
builder: windows-2019
|
||||||
|
shell: msys2 {0}
|
||||||
|
branch: ${{ fromJSON(inputs.nim-branch) }}
|
||||||
|
cpu: ${{ fromJSON(inputs.cpu) }}
|
||||||
|
exclude: ${{ fromJSON(inputs.exclude) }}
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: ${{ matrix.platform.shell }}
|
||||||
|
|
||||||
|
name: '${{ matrix.platform.os }}-${{ matrix.cpu }} (Nim ${{ matrix.branch }})'
|
||||||
|
runs-on: ${{ matrix.platform.builder }}
|
||||||
|
continue-on-error: ${{ matrix.branch == 'devel' || matrix.branch == 'version-2-0' }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Setup Nim
|
||||||
|
uses: "./.github/actions/install_nim"
|
||||||
|
with:
|
||||||
|
os: ${{ matrix.platform.os }}
|
||||||
|
shell: ${{ matrix.platform.shell }}
|
||||||
|
nim_branch: ${{ matrix.branch }}
|
||||||
|
cpu: ${{ matrix.cpu }}
|
||||||
|
|
||||||
|
- name: Setup Go
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: '~1.15.5'
|
||||||
|
cache: false
|
||||||
|
|
||||||
|
- name: Install p2pd
|
||||||
|
run: |
|
||||||
|
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
|
||||||
|
|
||||||
|
- name: Run tests
|
||||||
|
run: |
|
||||||
|
nim --version
|
||||||
|
nimble --version
|
||||||
|
nimble install -y --depsOnly
|
||||||
|
NIMFLAGS="${NIMFLAGS} --mm:refc" nimble test
|
||||||
|
if [[ "${{ matrix.branch }}" == "devel" ]]; then
|
||||||
|
echo -e "\nTesting with '--mm:orc':\n"
|
||||||
|
NIMFLAGS="${NIMFLAGS} --mm:orc" nimble test
|
||||||
|
fi
|
13
.github/workflows/daily_i386.yml
vendored
Normal file
13
.github/workflows/daily_i386.yml
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
name: Daily i386
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: "30 6 * * *"
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
call-multi-nim-common:
|
||||||
|
uses: ./.github/workflows/daily_common.yml
|
||||||
|
with:
|
||||||
|
nim-branch: "['version-1-6','version-2-0', 'devel']"
|
||||||
|
cpu: "['i386']"
|
||||||
|
exclude: "[{'platform': {'os':'macos'}}, {'platform': {'os':'windows'}}]"
|
12
.github/workflows/daily_nim_devel.yml
vendored
Normal file
12
.github/workflows/daily_nim_devel.yml
vendored
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
name: Daily Nim Devel
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: "30 6 * * *"
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
call-multi-nim-common:
|
||||||
|
uses: ./.github/workflows/daily_common.yml
|
||||||
|
with:
|
||||||
|
nim-branch: "['devel']"
|
||||||
|
cpu: "['amd64']"
|
14
.github/workflows/interop.yml
vendored
14
.github/workflows/interop.yml
vendored
@ -52,3 +52,17 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
test-filter: nim-libp2p-head
|
test-filter: nim-libp2p-head
|
||||||
extra-versions: ${{ github.workspace }}/test_head.json
|
extra-versions: ${{ github.workspace }}/test_head.json
|
||||||
|
|
||||||
|
run-hole-punching-interop:
|
||||||
|
name: Run hole-punching interoperability tests
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: docker/setup-buildx-action@v3
|
||||||
|
- name: Build image
|
||||||
|
run: docker buildx build --load -t nim-libp2p-head -f tests/hole-punching-interop/Dockerfile .
|
||||||
|
- name: Run tests
|
||||||
|
uses: libp2p/test-plans/.github/actions/run-interop-hole-punch-test@master
|
||||||
|
with:
|
||||||
|
test-filter: nim-libp2p-head
|
||||||
|
extra-versions: ${{ github.workspace }}/tests/hole-punching-interop/version.json
|
||||||
|
82
.github/workflows/multi_nim.yml
vendored
82
.github/workflows/multi_nim.yml
vendored
@ -1,82 +0,0 @@
|
|||||||
name: Daily
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: "30 6 * * *"
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
delete-cache:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: snnaplab/delete-branch-cache-action@v1
|
|
||||||
|
|
||||||
build:
|
|
||||||
needs: delete-cache
|
|
||||||
timeout-minutes: 120
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
target:
|
|
||||||
- os: linux
|
|
||||||
cpu: amd64
|
|
||||||
- os: linux
|
|
||||||
cpu: i386
|
|
||||||
- os: macos
|
|
||||||
cpu: amd64
|
|
||||||
- os: windows
|
|
||||||
cpu: amd64
|
|
||||||
#- os: windows
|
|
||||||
#cpu: i386
|
|
||||||
branch: [version-1-6, version-2-0, devel]
|
|
||||||
include:
|
|
||||||
- target:
|
|
||||||
os: linux
|
|
||||||
builder: ubuntu-20.04
|
|
||||||
shell: bash
|
|
||||||
- target:
|
|
||||||
os: macos
|
|
||||||
builder: macos-12
|
|
||||||
shell: bash
|
|
||||||
- target:
|
|
||||||
os: windows
|
|
||||||
builder: windows-2019
|
|
||||||
shell: msys2 {0}
|
|
||||||
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: ${{ matrix.shell }}
|
|
||||||
|
|
||||||
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
|
|
||||||
runs-on: ${{ matrix.builder }}
|
|
||||||
continue-on-error: ${{ matrix.branch == 'devel' || matrix.branch == 'version-2-0' }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
- name: Setup Nim
|
|
||||||
uses: "./.github/actions/install_nim"
|
|
||||||
with:
|
|
||||||
os: ${{ matrix.target.os }}
|
|
||||||
shell: ${{ matrix.shell }}
|
|
||||||
nim_branch: ${{ matrix.branch }}
|
|
||||||
cpu: ${{ matrix.target.cpu }}
|
|
||||||
|
|
||||||
- name: Setup Go
|
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: '~1.15.5'
|
|
||||||
|
|
||||||
- name: Install p2pd
|
|
||||||
run: |
|
|
||||||
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
|
|
||||||
|
|
||||||
- name: Run tests
|
|
||||||
run: |
|
|
||||||
nim --version
|
|
||||||
nimble --version
|
|
||||||
nimble install -y --depsOnly
|
|
||||||
NIMFLAGS="${NIMFLAGS} --gc:refc" nimble test
|
|
||||||
if [[ "${{ matrix.branch }}" == "devel" ]]; then
|
|
||||||
echo -e "\nTesting with '--gc:orc':\n"
|
|
||||||
NIMFLAGS="${NIMFLAGS} --gc:orc" nimble test
|
|
||||||
fi
|
|
@ -13,7 +13,7 @@ type
|
|||||||
proc new(T: typedesc[TestProto]): T =
|
proc new(T: typedesc[TestProto]): T =
|
||||||
|
|
||||||
# every incoming connections will be in handled in this closure
|
# every incoming connections will be in handled in this closure
|
||||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
proc handle(conn: Connection, proto: string) {.async.} =
|
||||||
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
|
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
|
||||||
await conn.writeLp("Roger p2p!")
|
await conn.writeLp("Roger p2p!")
|
||||||
|
|
||||||
@ -40,7 +40,7 @@ proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
|
|||||||
##
|
##
|
||||||
# The actual application
|
# The actual application
|
||||||
##
|
##
|
||||||
proc main() {.async, gcsafe.} =
|
proc main() {.async.} =
|
||||||
let
|
let
|
||||||
rng = newRng() # Single random number source for the whole application
|
rng = newRng() # Single random number source for the whole application
|
||||||
# port 0 will take a random available port
|
# port 0 will take a random available port
|
||||||
|
@ -53,7 +53,7 @@ proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
|
|||||||
##
|
##
|
||||||
##
|
##
|
||||||
## Let's now start to create our main procedure:
|
## Let's now start to create our main procedure:
|
||||||
proc main() {.async, gcsafe.} =
|
proc main() {.async.} =
|
||||||
let
|
let
|
||||||
rng = newRng()
|
rng = newRng()
|
||||||
localAddress = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
localAddress = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||||
|
@ -25,7 +25,7 @@ type TestProto = ref object of LPProtocol
|
|||||||
|
|
||||||
proc new(T: typedesc[TestProto]): T =
|
proc new(T: typedesc[TestProto]): T =
|
||||||
# every incoming connections will in be handled in this closure
|
# every incoming connections will in be handled in this closure
|
||||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
proc handle(conn: Connection, proto: string) {.async.} =
|
||||||
# Read up to 1024 bytes from this connection, and transform them into
|
# Read up to 1024 bytes from this connection, and transform them into
|
||||||
# a string
|
# a string
|
||||||
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
|
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
|
||||||
@ -44,7 +44,7 @@ proc hello(p: TestProto, conn: Connection) {.async.} =
|
|||||||
## Again, pretty straight-forward, we just send a message on the connection.
|
## Again, pretty straight-forward, we just send a message on the connection.
|
||||||
##
|
##
|
||||||
## We can now create our main procedure:
|
## We can now create our main procedure:
|
||||||
proc main() {.async, gcsafe.} =
|
proc main() {.async.} =
|
||||||
let
|
let
|
||||||
rng = newRng()
|
rng = newRng()
|
||||||
testProto = TestProto.new()
|
testProto = TestProto.new()
|
||||||
|
@ -108,7 +108,7 @@ type
|
|||||||
|
|
||||||
proc new(_: typedesc[MetricProto], cb: MetricCallback): MetricProto =
|
proc new(_: typedesc[MetricProto], cb: MetricCallback): MetricProto =
|
||||||
var res: MetricProto
|
var res: MetricProto
|
||||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
proc handle(conn: Connection, proto: string) {.async.} =
|
||||||
let
|
let
|
||||||
metrics = await res.metricGetter()
|
metrics = await res.metricGetter()
|
||||||
asProtobuf = metrics.encode()
|
asProtobuf = metrics.encode()
|
||||||
@ -126,7 +126,7 @@ proc fetch(p: MetricProto, conn: Connection): Future[MetricList] {.async.} =
|
|||||||
return MetricList.decode(protobuf).tryGet()
|
return MetricList.decode(protobuf).tryGet()
|
||||||
|
|
||||||
## We can now create our main procedure:
|
## We can now create our main procedure:
|
||||||
proc main() {.async, gcsafe.} =
|
proc main() {.async.} =
|
||||||
let rng = newRng()
|
let rng = newRng()
|
||||||
proc randomMetricGenerator: Future[MetricList] {.async.} =
|
proc randomMetricGenerator: Future[MetricList] {.async.} =
|
||||||
let metricCount = rng[].generate(uint32) mod 16
|
let metricCount = rng[].generate(uint32) mod 16
|
||||||
|
@ -33,7 +33,7 @@ proc createSwitch(rdv: RendezVous = RendezVous.new()): Switch =
|
|||||||
const DumbCodec = "/dumb/proto/1.0.0"
|
const DumbCodec = "/dumb/proto/1.0.0"
|
||||||
type DumbProto = ref object of LPProtocol
|
type DumbProto = ref object of LPProtocol
|
||||||
proc new(T: typedesc[DumbProto], nodeNumber: int): T =
|
proc new(T: typedesc[DumbProto], nodeNumber: int): T =
|
||||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
proc handle(conn: Connection, proto: string) {.async.} =
|
||||||
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
|
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
|
||||||
await conn.close()
|
await conn.close()
|
||||||
return T.new(codecs = @[DumbCodec], handler = handle)
|
return T.new(codecs = @[DumbCodec], handler = handle)
|
||||||
@ -49,7 +49,7 @@ proc new(T: typedesc[DumbProto], nodeNumber: int): T =
|
|||||||
## (rendezvous in this case) as a bootnode. For this example, we'll
|
## (rendezvous in this case) as a bootnode. For this example, we'll
|
||||||
## create a bootnode, and then every peer will advertise itself on the
|
## create a bootnode, and then every peer will advertise itself on the
|
||||||
## bootnode, and use it to find other peers
|
## bootnode, and use it to find other peers
|
||||||
proc main() {.async, gcsafe.} =
|
proc main() {.async.} =
|
||||||
let bootNode = createSwitch()
|
let bootNode = createSwitch()
|
||||||
await bootNode.start()
|
await bootNode.start()
|
||||||
|
|
||||||
|
@ -143,7 +143,7 @@ proc draw(g: Game) =
|
|||||||
## peer know that we are available, check that he is also available,
|
## peer know that we are available, check that he is also available,
|
||||||
## and launch the game.
|
## and launch the game.
|
||||||
proc new(T: typedesc[GameProto], g: Game): T =
|
proc new(T: typedesc[GameProto], g: Game): T =
|
||||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
proc handle(conn: Connection, proto: string) {.async.} =
|
||||||
defer: await conn.closeWithEof()
|
defer: await conn.closeWithEof()
|
||||||
if g.peerFound.finished or g.hasCandidate:
|
if g.peerFound.finished or g.hasCandidate:
|
||||||
await conn.close()
|
await conn.close()
|
||||||
|
@ -17,7 +17,7 @@ requires "nim >= 1.6.0",
|
|||||||
"secp256k1",
|
"secp256k1",
|
||||||
"stew#head",
|
"stew#head",
|
||||||
"websock",
|
"websock",
|
||||||
"unittest2 >= 0.0.5 & <= 0.1.0"
|
"unittest2"
|
||||||
|
|
||||||
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
|
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
|
||||||
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)
|
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)
|
||||||
|
@ -25,7 +25,7 @@ import
|
|||||||
muxers/[muxer, mplex/mplex, yamux/yamux],
|
muxers/[muxer, mplex/mplex, yamux/yamux],
|
||||||
protocols/[identify, secure/secure, secure/noise, rendezvous],
|
protocols/[identify, secure/secure, secure/noise, rendezvous],
|
||||||
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
|
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
|
||||||
connmanager, upgrademngrs/muxedupgrade,
|
connmanager, upgrademngrs/muxedupgrade, observedaddrmanager,
|
||||||
nameresolving/nameresolver,
|
nameresolving/nameresolver,
|
||||||
errors, utility
|
errors, utility
|
||||||
|
|
||||||
@ -59,6 +59,7 @@ type
|
|||||||
circuitRelay: Relay
|
circuitRelay: Relay
|
||||||
rdv: RendezVous
|
rdv: RendezVous
|
||||||
services: seq[Service]
|
services: seq[Service]
|
||||||
|
observedAddrManager: ObservedAddrManager
|
||||||
|
|
||||||
proc new*(T: type[SwitchBuilder]): T {.public.} =
|
proc new*(T: type[SwitchBuilder]): T {.public.} =
|
||||||
## Creates a SwitchBuilder
|
## Creates a SwitchBuilder
|
||||||
@ -121,8 +122,8 @@ proc withMplex*(
|
|||||||
b.muxers.add(MuxerProvider.new(newMuxer, MplexCodec))
|
b.muxers.add(MuxerProvider.new(newMuxer, MplexCodec))
|
||||||
b
|
b
|
||||||
|
|
||||||
proc withYamux*(b: SwitchBuilder): SwitchBuilder =
|
proc withYamux*(b: SwitchBuilder, windowSize: int = YamuxDefaultWindowSize): SwitchBuilder =
|
||||||
proc newMuxer(conn: Connection): Muxer = Yamux.new(conn)
|
proc newMuxer(conn: Connection): Muxer = Yamux.new(conn, windowSize)
|
||||||
|
|
||||||
assert b.muxers.countIt(it.codec == YamuxCodec) == 0, "Yamux build multiple times"
|
assert b.muxers.countIt(it.codec == YamuxCodec) == 0, "Yamux build multiple times"
|
||||||
b.muxers.add(MuxerProvider.new(newMuxer, YamuxCodec))
|
b.muxers.add(MuxerProvider.new(newMuxer, YamuxCodec))
|
||||||
@ -201,6 +202,10 @@ proc withServices*(b: SwitchBuilder, services: seq[Service]): SwitchBuilder =
|
|||||||
b.services = services
|
b.services = services
|
||||||
b
|
b
|
||||||
|
|
||||||
|
proc withObservedAddrManager*(b: SwitchBuilder, observedAddrManager: ObservedAddrManager): SwitchBuilder =
|
||||||
|
b.observedAddrManager = observedAddrManager
|
||||||
|
b
|
||||||
|
|
||||||
proc build*(b: SwitchBuilder): Switch
|
proc build*(b: SwitchBuilder): Switch
|
||||||
{.raises: [LPError], public.} =
|
{.raises: [LPError], public.} =
|
||||||
|
|
||||||
@ -223,8 +228,13 @@ proc build*(b: SwitchBuilder): Switch
|
|||||||
protoVersion = b.protoVersion,
|
protoVersion = b.protoVersion,
|
||||||
agentVersion = b.agentVersion)
|
agentVersion = b.agentVersion)
|
||||||
|
|
||||||
|
let identify =
|
||||||
|
if b.observedAddrManager != nil:
|
||||||
|
Identify.new(peerInfo, b.sendSignedPeerRecord, b.observedAddrManager)
|
||||||
|
else:
|
||||||
|
Identify.new(peerInfo, b.sendSignedPeerRecord)
|
||||||
|
|
||||||
let
|
let
|
||||||
identify = Identify.new(peerInfo, b.sendSignedPeerRecord)
|
|
||||||
connManager = ConnManager.new(b.maxConnsPerPeer, b.maxConnections, b.maxIn, b.maxOut)
|
connManager = ConnManager.new(b.maxConnsPerPeer, b.maxConnections, b.maxIn, b.maxOut)
|
||||||
ms = MultistreamSelect.new()
|
ms = MultistreamSelect.new()
|
||||||
muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, ms)
|
muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, ms)
|
||||||
|
@ -128,7 +128,7 @@ proc removeConnEventHandler*(c: ConnManager,
|
|||||||
|
|
||||||
proc triggerConnEvent*(c: ConnManager,
|
proc triggerConnEvent*(c: ConnManager,
|
||||||
peerId: PeerId,
|
peerId: PeerId,
|
||||||
event: ConnEvent) {.async, gcsafe.} =
|
event: ConnEvent) {.async.} =
|
||||||
try:
|
try:
|
||||||
trace "About to trigger connection events", peer = peerId
|
trace "About to trigger connection events", peer = peerId
|
||||||
if c.connEvents[event.kind].len() > 0:
|
if c.connEvents[event.kind].len() > 0:
|
||||||
@ -160,7 +160,7 @@ proc removePeerEventHandler*(c: ConnManager,
|
|||||||
|
|
||||||
proc triggerPeerEvents*(c: ConnManager,
|
proc triggerPeerEvents*(c: ConnManager,
|
||||||
peerId: PeerId,
|
peerId: PeerId,
|
||||||
event: PeerEvent) {.async, gcsafe.} =
|
event: PeerEvent) {.async.} =
|
||||||
|
|
||||||
trace "About to trigger peer events", peer = peerId
|
trace "About to trigger peer events", peer = peerId
|
||||||
if c.peerEvents[event.kind].len == 0:
|
if c.peerEvents[event.kind].len == 0:
|
||||||
@ -379,7 +379,7 @@ proc trackMuxer*(cs: ConnectionSlot, mux: Muxer) =
|
|||||||
cs.trackConnection(mux.connection)
|
cs.trackConnection(mux.connection)
|
||||||
|
|
||||||
proc getStream*(c: ConnManager,
|
proc getStream*(c: ConnManager,
|
||||||
muxer: Muxer): Future[Connection] {.async, gcsafe.} =
|
muxer: Muxer): Future[Connection] {.async.} =
|
||||||
## get a muxed stream for the passed muxer
|
## get a muxed stream for the passed muxer
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -387,7 +387,7 @@ proc getStream*(c: ConnManager,
|
|||||||
return await muxer.newStream()
|
return await muxer.newStream()
|
||||||
|
|
||||||
proc getStream*(c: ConnManager,
|
proc getStream*(c: ConnManager,
|
||||||
peerId: PeerId): Future[Connection] {.async, gcsafe.} =
|
peerId: PeerId): Future[Connection] {.async.} =
|
||||||
## get a muxed stream for the passed peer from any connection
|
## get a muxed stream for the passed peer from any connection
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -395,7 +395,7 @@ proc getStream*(c: ConnManager,
|
|||||||
|
|
||||||
proc getStream*(c: ConnManager,
|
proc getStream*(c: ConnManager,
|
||||||
peerId: PeerId,
|
peerId: PeerId,
|
||||||
dir: Direction): Future[Connection] {.async, gcsafe.} =
|
dir: Direction): Future[Connection] {.async.} =
|
||||||
## get a muxed stream for the passed peer from a connection with `dir`
|
## get a muxed stream for the passed peer from a connection with `dir`
|
||||||
##
|
##
|
||||||
|
|
||||||
|
@ -553,7 +553,7 @@ proc getSocket(pattern: string,
|
|||||||
closeSocket(sock)
|
closeSocket(sock)
|
||||||
|
|
||||||
# This is forward declaration needed for newDaemonApi()
|
# This is forward declaration needed for newDaemonApi()
|
||||||
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async, gcsafe.}
|
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.}
|
||||||
|
|
||||||
proc copyEnv(): StringTableRef =
|
proc copyEnv(): StringTableRef =
|
||||||
## This procedure copy all environment variables into StringTable.
|
## This procedure copy all environment variables into StringTable.
|
||||||
@ -755,7 +755,7 @@ proc newDaemonApi*(flags: set[P2PDaemonFlags] = {},
|
|||||||
|
|
||||||
# Starting daemon process
|
# Starting daemon process
|
||||||
# echo "Starting ", cmd, " ", args.join(" ")
|
# echo "Starting ", cmd, " ", args.join(" ")
|
||||||
api.process =
|
api.process =
|
||||||
exceptionToAssert:
|
exceptionToAssert:
|
||||||
startProcess(cmd, "", args, env, {poParentStreams})
|
startProcess(cmd, "", args, env, {poParentStreams})
|
||||||
# Waiting until daemon will not be bound to control socket.
|
# Waiting until daemon will not be bound to control socket.
|
||||||
@ -1032,7 +1032,7 @@ proc enterDhtMessage(pb: ProtoBuffer, rt: DHTResponseType): ProtoBuffer
|
|||||||
var value: seq[byte]
|
var value: seq[byte]
|
||||||
if pbDhtResponse.getRequiredField(3, value).isErr():
|
if pbDhtResponse.getRequiredField(3, value).isErr():
|
||||||
raise newException(DaemonLocalError, "Missing required DHT field `value`!")
|
raise newException(DaemonLocalError, "Missing required DHT field `value`!")
|
||||||
|
|
||||||
return initProtoBuffer(value)
|
return initProtoBuffer(value)
|
||||||
else:
|
else:
|
||||||
raise newException(DaemonLocalError, "Wrong message type!")
|
raise newException(DaemonLocalError, "Wrong message type!")
|
||||||
|
@ -26,7 +26,7 @@ method connect*(
|
|||||||
addrs: seq[MultiAddress],
|
addrs: seq[MultiAddress],
|
||||||
forceDial = false,
|
forceDial = false,
|
||||||
reuseConnection = true,
|
reuseConnection = true,
|
||||||
upgradeDir = Direction.Out) {.async, base.} =
|
dir = Direction.Out) {.async, base.} =
|
||||||
## connect remote peer without negotiating
|
## connect remote peer without negotiating
|
||||||
## a protocol
|
## a protocol
|
||||||
##
|
##
|
||||||
|
@ -53,7 +53,7 @@ proc dialAndUpgrade(
|
|||||||
peerId: Opt[PeerId],
|
peerId: Opt[PeerId],
|
||||||
hostname: string,
|
hostname: string,
|
||||||
address: MultiAddress,
|
address: MultiAddress,
|
||||||
upgradeDir = Direction.Out):
|
dir = Direction.Out):
|
||||||
Future[Muxer] {.async.} =
|
Future[Muxer] {.async.} =
|
||||||
|
|
||||||
for transport in self.transports: # for each transport
|
for transport in self.transports: # for each transport
|
||||||
@ -75,15 +75,19 @@ proc dialAndUpgrade(
|
|||||||
|
|
||||||
let mux =
|
let mux =
|
||||||
try:
|
try:
|
||||||
dialed.transportDir = upgradeDir
|
# This is for the very specific case of a simultaneous dial during DCUtR. In this case, both sides will have
|
||||||
await transport.upgrade(dialed, upgradeDir, peerId)
|
# an Outbound direction at the transport level. Therefore we update the DCUtR initiator transport direction to Inbound.
|
||||||
|
# The if below is more general and might handle other use cases in the future.
|
||||||
|
if dialed.dir != dir:
|
||||||
|
dialed.dir = dir
|
||||||
|
await transport.upgrade(dialed, peerId)
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
# If we failed to establish the connection through one transport,
|
# If we failed to establish the connection through one transport,
|
||||||
# we won't succeeded through another - no use in trying again
|
# we won't succeeded through another - no use in trying again
|
||||||
await dialed.close()
|
await dialed.close()
|
||||||
debug "Upgrade failed", err = exc.msg, peerId = peerId.get(default(PeerId))
|
debug "Upgrade failed", err = exc.msg, peerId = peerId.get(default(PeerId))
|
||||||
if exc isnot CancelledError:
|
if exc isnot CancelledError:
|
||||||
if upgradeDir == Direction.Out:
|
if dialed.dir == Direction.Out:
|
||||||
libp2p_failed_upgrades_outgoing.inc()
|
libp2p_failed_upgrades_outgoing.inc()
|
||||||
else:
|
else:
|
||||||
libp2p_failed_upgrades_incoming.inc()
|
libp2p_failed_upgrades_incoming.inc()
|
||||||
@ -91,7 +95,7 @@ proc dialAndUpgrade(
|
|||||||
# Try other address
|
# Try other address
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
doAssert not isNil(mux), "connection died after upgrade " & $upgradeDir
|
doAssert not isNil(mux), "connection died after upgrade " & $dialed.dir
|
||||||
debug "Dial successful", peerId = mux.connection.peerId
|
debug "Dial successful", peerId = mux.connection.peerId
|
||||||
return mux
|
return mux
|
||||||
return nil
|
return nil
|
||||||
@ -128,7 +132,7 @@ proc dialAndUpgrade(
|
|||||||
self: Dialer,
|
self: Dialer,
|
||||||
peerId: Opt[PeerId],
|
peerId: Opt[PeerId],
|
||||||
addrs: seq[MultiAddress],
|
addrs: seq[MultiAddress],
|
||||||
upgradeDir = Direction.Out):
|
dir = Direction.Out):
|
||||||
Future[Muxer] {.async.} =
|
Future[Muxer] {.async.} =
|
||||||
|
|
||||||
debug "Dialing peer", peerId = peerId.get(default(PeerId))
|
debug "Dialing peer", peerId = peerId.get(default(PeerId))
|
||||||
@ -146,7 +150,7 @@ proc dialAndUpgrade(
|
|||||||
else: await self.nameResolver.resolveMAddress(expandedAddress)
|
else: await self.nameResolver.resolveMAddress(expandedAddress)
|
||||||
|
|
||||||
for resolvedAddress in resolvedAddresses:
|
for resolvedAddress in resolvedAddresses:
|
||||||
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, upgradeDir)
|
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, dir)
|
||||||
if not isNil(result):
|
if not isNil(result):
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@ -164,7 +168,7 @@ proc internalConnect(
|
|||||||
addrs: seq[MultiAddress],
|
addrs: seq[MultiAddress],
|
||||||
forceDial: bool,
|
forceDial: bool,
|
||||||
reuseConnection = true,
|
reuseConnection = true,
|
||||||
upgradeDir = Direction.Out):
|
dir = Direction.Out):
|
||||||
Future[Muxer] {.async.} =
|
Future[Muxer] {.async.} =
|
||||||
if Opt.some(self.localPeerId) == peerId:
|
if Opt.some(self.localPeerId) == peerId:
|
||||||
raise newException(CatchableError, "can't dial self!")
|
raise newException(CatchableError, "can't dial self!")
|
||||||
@ -182,7 +186,7 @@ proc internalConnect(
|
|||||||
let slot = self.connManager.getOutgoingSlot(forceDial)
|
let slot = self.connManager.getOutgoingSlot(forceDial)
|
||||||
let muxed =
|
let muxed =
|
||||||
try:
|
try:
|
||||||
await self.dialAndUpgrade(peerId, addrs, upgradeDir)
|
await self.dialAndUpgrade(peerId, addrs, dir)
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
slot.release()
|
slot.release()
|
||||||
raise exc
|
raise exc
|
||||||
@ -209,7 +213,7 @@ method connect*(
|
|||||||
addrs: seq[MultiAddress],
|
addrs: seq[MultiAddress],
|
||||||
forceDial = false,
|
forceDial = false,
|
||||||
reuseConnection = true,
|
reuseConnection = true,
|
||||||
upgradeDir = Direction.Out) {.async.} =
|
dir = Direction.Out) {.async.} =
|
||||||
## connect remote peer without negotiating
|
## connect remote peer without negotiating
|
||||||
## a protocol
|
## a protocol
|
||||||
##
|
##
|
||||||
@ -217,7 +221,7 @@ method connect*(
|
|||||||
if self.connManager.connCount(peerId) > 0 and reuseConnection:
|
if self.connManager.connCount(peerId) > 0 and reuseConnection:
|
||||||
return
|
return
|
||||||
|
|
||||||
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, upgradeDir)
|
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, dir)
|
||||||
|
|
||||||
method connect*(
|
method connect*(
|
||||||
self: Dialer,
|
self: Dialer,
|
||||||
|
@ -398,6 +398,9 @@ const
|
|||||||
MAProtocol(
|
MAProtocol(
|
||||||
mcodec: multiCodec("quic"), kind: Marker, size: 0
|
mcodec: multiCodec("quic"), kind: Marker, size: 0
|
||||||
),
|
),
|
||||||
|
MAProtocol(
|
||||||
|
mcodec: multiCodec("quic-v1"), kind: Marker, size: 0
|
||||||
|
),
|
||||||
MAProtocol(
|
MAProtocol(
|
||||||
mcodec: multiCodec("ip6zone"), kind: Length, size: 0,
|
mcodec: multiCodec("ip6zone"), kind: Length, size: 0,
|
||||||
coder: TranscoderIP6Zone
|
coder: TranscoderIP6Zone
|
||||||
@ -955,7 +958,7 @@ proc init*(mtype: typedesc[MultiAddress]): MultiAddress =
|
|||||||
## Initialize empty MultiAddress.
|
## Initialize empty MultiAddress.
|
||||||
result.data = initVBuffer()
|
result.data = initVBuffer()
|
||||||
|
|
||||||
proc init*(mtype: typedesc[MultiAddress], address: ValidIpAddress,
|
proc init*(mtype: typedesc[MultiAddress], address: IpAddress,
|
||||||
protocol: IpTransportProtocol, port: Port): MultiAddress =
|
protocol: IpTransportProtocol, port: Port): MultiAddress =
|
||||||
var res: MultiAddress
|
var res: MultiAddress
|
||||||
res.data = initVBuffer()
|
res.data = initVBuffer()
|
||||||
|
@ -193,6 +193,7 @@ const MultiCodecList = [
|
|||||||
("https", 0x01BB),
|
("https", 0x01BB),
|
||||||
("tls", 0x01C0),
|
("tls", 0x01C0),
|
||||||
("quic", 0x01CC),
|
("quic", 0x01CC),
|
||||||
|
("quic-v1", 0x01CD),
|
||||||
("ws", 0x01DD),
|
("ws", 0x01DD),
|
||||||
("wss", 0x01DE),
|
("wss", 0x01DE),
|
||||||
("p2p-websocket-star", 0x01DF), # not in multicodec list
|
("p2p-websocket-star", 0x01DF), # not in multicodec list
|
||||||
|
@ -131,7 +131,7 @@ proc handle*(
|
|||||||
protos: seq[string],
|
protos: seq[string],
|
||||||
matchers = newSeq[Matcher](),
|
matchers = newSeq[Matcher](),
|
||||||
active: bool = false,
|
active: bool = false,
|
||||||
): Future[string] {.async, gcsafe.} =
|
): Future[string] {.async.} =
|
||||||
trace "Starting multistream negotiation", conn, handshaked = active
|
trace "Starting multistream negotiation", conn, handshaked = active
|
||||||
var handshaked = active
|
var handshaked = active
|
||||||
while not conn.atEof:
|
while not conn.atEof:
|
||||||
@ -172,10 +172,9 @@ proc handle*(
|
|||||||
trace "no handlers", conn, protocol = ms
|
trace "no handlers", conn, protocol = ms
|
||||||
await conn.writeLp(Na)
|
await conn.writeLp(Na)
|
||||||
|
|
||||||
proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.async, gcsafe.} =
|
proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.async.} =
|
||||||
trace "Starting multistream handler", conn, handshaked = active
|
trace "Starting multistream handler", conn, handshaked = active
|
||||||
var
|
var
|
||||||
handshaked = active
|
|
||||||
protos: seq[string]
|
protos: seq[string]
|
||||||
matchers: seq[Matcher]
|
matchers: seq[Matcher]
|
||||||
for h in m.handlers:
|
for h in m.handlers:
|
||||||
|
@ -42,7 +42,7 @@ const MaxMsgSize* = 1 shl 20 # 1mb
|
|||||||
proc newInvalidMplexMsgType*(): ref InvalidMplexMsgType =
|
proc newInvalidMplexMsgType*(): ref InvalidMplexMsgType =
|
||||||
newException(InvalidMplexMsgType, "invalid message type")
|
newException(InvalidMplexMsgType, "invalid message type")
|
||||||
|
|
||||||
proc readMsg*(conn: Connection): Future[Msg] {.async, gcsafe.} =
|
proc readMsg*(conn: Connection): Future[Msg] {.async.} =
|
||||||
let header = await conn.readVarint()
|
let header = await conn.readVarint()
|
||||||
trace "read header varint", varint = header, conn
|
trace "read header varint", varint = header, conn
|
||||||
|
|
||||||
|
@ -73,7 +73,7 @@ func shortLog*(s: LPChannel): auto =
|
|||||||
|
|
||||||
chronicles.formatIt(LPChannel): shortLog(it)
|
chronicles.formatIt(LPChannel): shortLog(it)
|
||||||
|
|
||||||
proc open*(s: LPChannel) {.async, gcsafe.} =
|
proc open*(s: LPChannel) {.async.} =
|
||||||
trace "Opening channel", s, conn = s.conn
|
trace "Opening channel", s, conn = s.conn
|
||||||
if s.conn.isClosed:
|
if s.conn.isClosed:
|
||||||
return
|
return
|
||||||
@ -95,7 +95,7 @@ proc closeUnderlying(s: LPChannel): Future[void] {.async.} =
|
|||||||
if s.closedLocal and s.atEof():
|
if s.closedLocal and s.atEof():
|
||||||
await procCall BufferStream(s).close()
|
await procCall BufferStream(s).close()
|
||||||
|
|
||||||
proc reset*(s: LPChannel) {.async, gcsafe.} =
|
proc reset*(s: LPChannel) {.async.} =
|
||||||
if s.isClosed:
|
if s.isClosed:
|
||||||
trace "Already closed", s
|
trace "Already closed", s
|
||||||
return
|
return
|
||||||
@ -123,7 +123,7 @@ proc reset*(s: LPChannel) {.async, gcsafe.} =
|
|||||||
|
|
||||||
trace "Channel reset", s
|
trace "Channel reset", s
|
||||||
|
|
||||||
method close*(s: LPChannel) {.async, gcsafe.} =
|
method close*(s: LPChannel) {.async.} =
|
||||||
## Close channel for writing - a message will be sent to the other peer
|
## Close channel for writing - a message will be sent to the other peer
|
||||||
## informing them that the channel is closed and that we're waiting for
|
## informing them that the channel is closed and that we're waiting for
|
||||||
## their acknowledgement.
|
## their acknowledgement.
|
||||||
|
@ -122,7 +122,7 @@ proc handleStream(m: Mplex, chann: LPChannel) {.async.} =
|
|||||||
trace "Exception in mplex stream handler", m, chann, msg = exc.msg
|
trace "Exception in mplex stream handler", m, chann, msg = exc.msg
|
||||||
await chann.reset()
|
await chann.reset()
|
||||||
|
|
||||||
method handle*(m: Mplex) {.async, gcsafe.} =
|
method handle*(m: Mplex) {.async.} =
|
||||||
trace "Starting mplex handler", m
|
trace "Starting mplex handler", m
|
||||||
try:
|
try:
|
||||||
while not m.connection.atEof:
|
while not m.connection.atEof:
|
||||||
@ -211,7 +211,7 @@ proc new*(M: type Mplex,
|
|||||||
|
|
||||||
method newStream*(m: Mplex,
|
method newStream*(m: Mplex,
|
||||||
name: string = "",
|
name: string = "",
|
||||||
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
|
lazy: bool = false): Future[Connection] {.async.} =
|
||||||
let channel = m.newStreamInternal(timeout = m.inChannTimeout)
|
let channel = m.newStreamInternal(timeout = m.inChannTimeout)
|
||||||
|
|
||||||
if not lazy:
|
if not lazy:
|
||||||
@ -219,7 +219,7 @@ method newStream*(m: Mplex,
|
|||||||
|
|
||||||
return Connection(channel)
|
return Connection(channel)
|
||||||
|
|
||||||
method close*(m: Mplex) {.async, gcsafe.} =
|
method close*(m: Mplex) {.async.} =
|
||||||
if m.isClosed:
|
if m.isClosed:
|
||||||
trace "Already closed", m
|
trace "Already closed", m
|
||||||
return
|
return
|
||||||
|
@ -46,11 +46,11 @@ chronicles.formatIt(Muxer): shortLog(it)
|
|||||||
|
|
||||||
# muxer interface
|
# muxer interface
|
||||||
method newStream*(m: Muxer, name: string = "", lazy: bool = false):
|
method newStream*(m: Muxer, name: string = "", lazy: bool = false):
|
||||||
Future[Connection] {.base, async, gcsafe.} = discard
|
Future[Connection] {.base, async.} = discard
|
||||||
method close*(m: Muxer) {.base, async, gcsafe.} =
|
method close*(m: Muxer) {.base, async.} =
|
||||||
if not isNil(m.connection):
|
if not isNil(m.connection):
|
||||||
await m.connection.close()
|
await m.connection.close()
|
||||||
method handle*(m: Muxer): Future[void] {.base, async, gcsafe.} = discard
|
method handle*(m: Muxer): Future[void] {.base, async.} = discard
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
T: typedesc[MuxerProvider],
|
T: typedesc[MuxerProvider],
|
||||||
|
@ -22,7 +22,8 @@ logScope:
|
|||||||
const
|
const
|
||||||
YamuxCodec* = "/yamux/1.0.0"
|
YamuxCodec* = "/yamux/1.0.0"
|
||||||
YamuxVersion = 0.uint8
|
YamuxVersion = 0.uint8
|
||||||
DefaultWindowSize = 256000
|
YamuxDefaultWindowSize* = 256000
|
||||||
|
MaxSendQueueSize = 256000
|
||||||
MaxChannelCount = 200
|
MaxChannelCount = 200
|
||||||
|
|
||||||
when defined(libp2p_yamux_metrics):
|
when defined(libp2p_yamux_metrics):
|
||||||
@ -59,7 +60,7 @@ type
|
|||||||
streamId: uint32
|
streamId: uint32
|
||||||
length: uint32
|
length: uint32
|
||||||
|
|
||||||
proc readHeader(conn: LPStream): Future[YamuxHeader] {.async, gcsafe.} =
|
proc readHeader(conn: LPStream): Future[YamuxHeader] {.async.} =
|
||||||
var buffer: array[12, byte]
|
var buffer: array[12, byte]
|
||||||
await conn.readExactly(addr buffer[0], 12)
|
await conn.readExactly(addr buffer[0], 12)
|
||||||
|
|
||||||
@ -143,6 +144,7 @@ type
|
|||||||
recvWindow: int
|
recvWindow: int
|
||||||
sendWindow: int
|
sendWindow: int
|
||||||
maxRecvWindow: int
|
maxRecvWindow: int
|
||||||
|
maxSendQueueSize: int
|
||||||
conn: Connection
|
conn: Connection
|
||||||
isSrc: bool
|
isSrc: bool
|
||||||
opened: bool
|
opened: bool
|
||||||
@ -169,9 +171,14 @@ proc `$`(channel: YamuxChannel): string =
|
|||||||
if s.len > 0:
|
if s.len > 0:
|
||||||
result &= " {" & s.foldl(if a != "": a & ", " & b else: b, "") & "}"
|
result &= " {" & s.foldl(if a != "": a & ", " & b else: b, "") & "}"
|
||||||
|
|
||||||
proc sendQueueBytes(channel: YamuxChannel, limit: bool = false): int =
|
proc lengthSendQueue(channel: YamuxChannel): int =
|
||||||
for (elem, sent, _) in channel.sendQueue:
|
channel.sendQueue.foldl(a + b.data.len - b.sent, 0)
|
||||||
result.inc(min(elem.len - sent, if limit: channel.maxRecvWindow div 3 else: elem.len - sent))
|
|
||||||
|
proc lengthSendQueueWithLimit(channel: YamuxChannel): int =
|
||||||
|
# For leniency, limit big messages size to the third of maxSendQueueSize
|
||||||
|
# This value is arbitrary, it's not in the specs
|
||||||
|
# It permits to store up to 3 big messages if the peer is stalling.
|
||||||
|
channel.sendQueue.foldl(a + min(b.data.len - b.sent, channel.maxSendQueueSize div 3), 0)
|
||||||
|
|
||||||
proc actuallyClose(channel: YamuxChannel) {.async.} =
|
proc actuallyClose(channel: YamuxChannel) {.async.} =
|
||||||
if channel.closedLocally and channel.sendQueue.len == 0 and
|
if channel.closedLocally and channel.sendQueue.len == 0 and
|
||||||
@ -183,9 +190,10 @@ proc remoteClosed(channel: YamuxChannel) {.async.} =
|
|||||||
channel.closedRemotely.complete()
|
channel.closedRemotely.complete()
|
||||||
await channel.actuallyClose()
|
await channel.actuallyClose()
|
||||||
|
|
||||||
method closeImpl*(channel: YamuxChannel) {.async, gcsafe.} =
|
method closeImpl*(channel: YamuxChannel) {.async.} =
|
||||||
if not channel.closedLocally:
|
if not channel.closedLocally:
|
||||||
channel.closedLocally = true
|
channel.closedLocally = true
|
||||||
|
channel.isEof = true
|
||||||
|
|
||||||
if channel.isReset == false and channel.sendQueue.len == 0:
|
if channel.isReset == false and channel.sendQueue.len == 0:
|
||||||
await channel.conn.write(YamuxHeader.data(channel.id, 0, {Fin}))
|
await channel.conn.write(YamuxHeader.data(channel.id, 0, {Fin}))
|
||||||
@ -249,6 +257,7 @@ method readOnce*(
|
|||||||
await channel.closedRemotely or channel.receivedData.wait()
|
await channel.closedRemotely or channel.receivedData.wait()
|
||||||
if channel.closedRemotely.done() and channel.recvQueue.len == 0:
|
if channel.closedRemotely.done() and channel.recvQueue.len == 0:
|
||||||
channel.returnedEof = true
|
channel.returnedEof = true
|
||||||
|
channel.isEof = true
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
let toRead = min(channel.recvQueue.len, nbytes)
|
let toRead = min(channel.recvQueue.len, nbytes)
|
||||||
@ -282,9 +291,9 @@ proc trySend(channel: YamuxChannel) {.async.} =
|
|||||||
channel.sendQueue.keepItIf(not (it.fut.cancelled() and it.sent == 0))
|
channel.sendQueue.keepItIf(not (it.fut.cancelled() and it.sent == 0))
|
||||||
if channel.sendWindow == 0:
|
if channel.sendWindow == 0:
|
||||||
trace "send window empty"
|
trace "send window empty"
|
||||||
if channel.sendQueueBytes(true) > channel.maxRecvWindow:
|
if channel.lengthSendQueueWithLimit() > channel.maxSendQueueSize:
|
||||||
debug "channel send queue too big, resetting", maxSendWindow=channel.maxRecvWindow,
|
debug "channel send queue too big, resetting", maxSendQueueSize=channel.maxSendQueueSize,
|
||||||
currentQueueSize = channel.sendQueueBytes(true)
|
currentQueueSize = channel.lengthSendQueueWithLimit()
|
||||||
try:
|
try:
|
||||||
await channel.reset(true)
|
await channel.reset(true)
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
@ -292,7 +301,7 @@ proc trySend(channel: YamuxChannel) {.async.} =
|
|||||||
break
|
break
|
||||||
|
|
||||||
let
|
let
|
||||||
bytesAvailable = channel.sendQueueBytes()
|
bytesAvailable = channel.lengthSendQueue()
|
||||||
toSend = min(channel.sendWindow, bytesAvailable)
|
toSend = min(channel.sendWindow, bytesAvailable)
|
||||||
var
|
var
|
||||||
sendBuffer = newSeqUninitialized[byte](toSend + 12)
|
sendBuffer = newSeqUninitialized[byte](toSend + 12)
|
||||||
@ -343,15 +352,18 @@ method write*(channel: YamuxChannel, msg: seq[byte]): Future[void] =
|
|||||||
return result
|
return result
|
||||||
channel.sendQueue.add((msg, 0, result))
|
channel.sendQueue.add((msg, 0, result))
|
||||||
when defined(libp2p_yamux_metrics):
|
when defined(libp2p_yamux_metrics):
|
||||||
libp2p_yamux_recv_queue.observe(channel.sendQueueBytes().int64)
|
libp2p_yamux_recv_queue.observe(channel.lengthSendQueue().int64)
|
||||||
asyncSpawn channel.trySend()
|
asyncSpawn channel.trySend()
|
||||||
|
|
||||||
proc open*(channel: YamuxChannel) {.async, gcsafe.} =
|
proc open*(channel: YamuxChannel) {.async.} =
|
||||||
if channel.opened:
|
if channel.opened:
|
||||||
trace "Try to open channel twice"
|
trace "Try to open channel twice"
|
||||||
return
|
return
|
||||||
channel.opened = true
|
channel.opened = true
|
||||||
await channel.conn.write(YamuxHeader.data(channel.id, 0, {if channel.isSrc: Syn else: Ack}))
|
await channel.conn.write(YamuxHeader.windowUpdate(
|
||||||
|
channel.id,
|
||||||
|
uint32(max(channel.maxRecvWindow - YamuxDefaultWindowSize, 0)),
|
||||||
|
{if channel.isSrc: Syn else: Ack}))
|
||||||
|
|
||||||
method getWrapped*(channel: YamuxChannel): Connection = channel.conn
|
method getWrapped*(channel: YamuxChannel): Connection = channel.conn
|
||||||
|
|
||||||
@ -362,6 +374,8 @@ type
|
|||||||
currentId: uint32
|
currentId: uint32
|
||||||
isClosed: bool
|
isClosed: bool
|
||||||
maxChannCount: int
|
maxChannCount: int
|
||||||
|
windowSize: int
|
||||||
|
maxSendQueueSize: int
|
||||||
|
|
||||||
proc lenBySrc(m: Yamux, isSrc: bool): int =
|
proc lenBySrc(m: Yamux, isSrc: bool): int =
|
||||||
for v in m.channels.values():
|
for v in m.channels.values():
|
||||||
@ -375,12 +389,19 @@ proc cleanupChann(m: Yamux, channel: YamuxChannel) {.async.} =
|
|||||||
if channel.isReset and channel.recvWindow > 0:
|
if channel.isReset and channel.recvWindow > 0:
|
||||||
m.flushed[channel.id] = channel.recvWindow
|
m.flushed[channel.id] = channel.recvWindow
|
||||||
|
|
||||||
proc createStream(m: Yamux, id: uint32, isSrc: bool): YamuxChannel =
|
proc createStream(m: Yamux, id: uint32, isSrc: bool,
|
||||||
|
recvWindow: int, maxSendQueueSize: int): YamuxChannel =
|
||||||
|
# As you can see, during initialization, recvWindow can be larger than maxRecvWindow.
|
||||||
|
# This is because the peer we're connected to will always assume
|
||||||
|
# that the initial recvWindow is 256k.
|
||||||
|
# To solve this contradiction, no updateWindow will be sent until recvWindow is less
|
||||||
|
# than maxRecvWindow
|
||||||
result = YamuxChannel(
|
result = YamuxChannel(
|
||||||
id: id,
|
id: id,
|
||||||
maxRecvWindow: DefaultWindowSize,
|
maxRecvWindow: recvWindow,
|
||||||
recvWindow: DefaultWindowSize,
|
recvWindow: if recvWindow > YamuxDefaultWindowSize: recvWindow else: YamuxDefaultWindowSize,
|
||||||
sendWindow: DefaultWindowSize,
|
sendWindow: YamuxDefaultWindowSize,
|
||||||
|
maxSendQueueSize: maxSendQueueSize,
|
||||||
isSrc: isSrc,
|
isSrc: isSrc,
|
||||||
conn: m.connection,
|
conn: m.connection,
|
||||||
receivedData: newAsyncEvent(),
|
receivedData: newAsyncEvent(),
|
||||||
@ -429,7 +450,7 @@ proc handleStream(m: Yamux, channel: YamuxChannel) {.async.} =
|
|||||||
trace "Exception in yamux stream handler", msg = exc.msg
|
trace "Exception in yamux stream handler", msg = exc.msg
|
||||||
await channel.reset()
|
await channel.reset()
|
||||||
|
|
||||||
method handle*(m: Yamux) {.async, gcsafe.} =
|
method handle*(m: Yamux) {.async.} =
|
||||||
trace "Starting yamux handler", pid=m.connection.peerId
|
trace "Starting yamux handler", pid=m.connection.peerId
|
||||||
try:
|
try:
|
||||||
while not m.connection.atEof:
|
while not m.connection.atEof:
|
||||||
@ -454,8 +475,9 @@ method handle*(m: Yamux) {.async, gcsafe.} =
|
|||||||
if header.streamId in m.flushed:
|
if header.streamId in m.flushed:
|
||||||
m.flushed.del(header.streamId)
|
m.flushed.del(header.streamId)
|
||||||
if header.streamId mod 2 == m.currentId mod 2:
|
if header.streamId mod 2 == m.currentId mod 2:
|
||||||
|
debug "Peer used our reserved stream id, skipping", id=header.streamId, currentId=m.currentId, peerId=m.connection.peerId
|
||||||
raise newException(YamuxError, "Peer used our reserved stream id")
|
raise newException(YamuxError, "Peer used our reserved stream id")
|
||||||
let newStream = m.createStream(header.streamId, false)
|
let newStream = m.createStream(header.streamId, false, m.windowSize, m.maxSendQueueSize)
|
||||||
if m.channels.len >= m.maxChannCount:
|
if m.channels.len >= m.maxChannCount:
|
||||||
await newStream.reset()
|
await newStream.reset()
|
||||||
continue
|
continue
|
||||||
@ -511,19 +533,24 @@ method getStreams*(m: Yamux): seq[Connection] =
|
|||||||
method newStream*(
|
method newStream*(
|
||||||
m: Yamux,
|
m: Yamux,
|
||||||
name: string = "",
|
name: string = "",
|
||||||
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
|
lazy: bool = false): Future[Connection] {.async.} =
|
||||||
|
|
||||||
if m.channels.len > m.maxChannCount - 1:
|
if m.channels.len > m.maxChannCount - 1:
|
||||||
raise newException(TooManyChannels, "max allowed channel count exceeded")
|
raise newException(TooManyChannels, "max allowed channel count exceeded")
|
||||||
let stream = m.createStream(m.currentId, true)
|
let stream = m.createStream(m.currentId, true, m.windowSize, m.maxSendQueueSize)
|
||||||
m.currentId += 2
|
m.currentId += 2
|
||||||
if not lazy:
|
if not lazy:
|
||||||
await stream.open()
|
await stream.open()
|
||||||
return stream
|
return stream
|
||||||
|
|
||||||
proc new*(T: type[Yamux], conn: Connection, maxChannCount: int = MaxChannelCount): T =
|
proc new*(T: type[Yamux], conn: Connection,
|
||||||
|
maxChannCount: int = MaxChannelCount,
|
||||||
|
windowSize: int = YamuxDefaultWindowSize,
|
||||||
|
maxSendQueueSize: int = MaxSendQueueSize): T =
|
||||||
T(
|
T(
|
||||||
connection: conn,
|
connection: conn,
|
||||||
currentId: if conn.dir == Out: 1 else: 2,
|
currentId: if conn.dir == Out: 1 else: 2,
|
||||||
maxChannCount: maxChannCount
|
maxChannCount: maxChannCount,
|
||||||
|
windowSize: windowSize,
|
||||||
|
maxSendQueueSize: maxSendQueueSize
|
||||||
)
|
)
|
||||||
|
@ -52,7 +52,7 @@ proc resolveOneAddress(
|
|||||||
ma: MultiAddress,
|
ma: MultiAddress,
|
||||||
domain: Domain = Domain.AF_UNSPEC,
|
domain: Domain = Domain.AF_UNSPEC,
|
||||||
prefix = ""): Future[seq[MultiAddress]]
|
prefix = ""): Future[seq[MultiAddress]]
|
||||||
{.async, raises: [MaError, TransportAddressError].} =
|
{.async.} =
|
||||||
#Resolve a single address
|
#Resolve a single address
|
||||||
var pbuf: array[2, byte]
|
var pbuf: array[2, byte]
|
||||||
|
|
||||||
|
@ -140,7 +140,7 @@ proc handleDial(autonat: Autonat, conn: Connection, msg: AutonatMsg): Future[voi
|
|||||||
|
|
||||||
proc new*(T: typedesc[Autonat], switch: Switch, semSize: int = 1, dialTimeout = 15.seconds): T =
|
proc new*(T: typedesc[Autonat], switch: Switch, semSize: int = 1, dialTimeout = 15.seconds): T =
|
||||||
let autonat = T(switch: switch, sem: newAsyncSemaphore(semSize), dialTimeout: dialTimeout)
|
let autonat = T(switch: switch, sem: newAsyncSemaphore(semSize), dialTimeout: dialTimeout)
|
||||||
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
|
proc handleStream(conn: Connection, proto: string) {.async.} =
|
||||||
try:
|
try:
|
||||||
let msg = AutonatMsg.decode(await conn.readLp(1024)).valueOr:
|
let msg = AutonatMsg.decode(await conn.readLp(1024)).valueOr:
|
||||||
raise newException(AutonatError, "Received malformed message")
|
raise newException(AutonatError, "Received malformed message")
|
||||||
|
@ -162,7 +162,7 @@ proc schedule(service: AutonatService, switch: Switch, interval: Duration) {.asy
|
|||||||
proc addressMapper(
|
proc addressMapper(
|
||||||
self: AutonatService,
|
self: AutonatService,
|
||||||
peerStore: PeerStore,
|
peerStore: PeerStore,
|
||||||
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||||
|
|
||||||
if self.networkReachability != NetworkReachability.Reachable:
|
if self.networkReachability != NetworkReachability.Reachable:
|
||||||
return listenAddrs
|
return listenAddrs
|
||||||
@ -179,7 +179,7 @@ proc addressMapper(
|
|||||||
return addrs
|
return addrs
|
||||||
|
|
||||||
method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
|
method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
|
||||||
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||||
return await addressMapper(self, switch.peerStore, listenAddrs)
|
return await addressMapper(self, switch.peerStore, listenAddrs)
|
||||||
|
|
||||||
info "Setting up AutonatService"
|
info "Setting up AutonatService"
|
||||||
|
@ -66,7 +66,7 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs:
|
|||||||
|
|
||||||
if peerDialableAddrs.len > self.maxDialableAddrs:
|
if peerDialableAddrs.len > self.maxDialableAddrs:
|
||||||
peerDialableAddrs = peerDialableAddrs[0..<self.maxDialableAddrs]
|
peerDialableAddrs = peerDialableAddrs[0..<self.maxDialableAddrs]
|
||||||
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false))
|
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, dir = Direction.In))
|
||||||
try:
|
try:
|
||||||
discard await anyCompleted(futs).wait(self.connectTimeout)
|
discard await anyCompleted(futs).wait(self.connectTimeout)
|
||||||
debug "Dcutr initiator has directly connected to the remote peer."
|
debug "Dcutr initiator has directly connected to the remote peer."
|
||||||
|
@ -56,5 +56,10 @@ proc send*(conn: Connection, msgType: MsgType, addrs: seq[MultiAddress]) {.async
|
|||||||
let pb = DcutrMsg(msgType: msgType, addrs: addrs).encode()
|
let pb = DcutrMsg(msgType: msgType, addrs: addrs).encode()
|
||||||
await conn.writeLp(pb.buffer)
|
await conn.writeLp(pb.buffer)
|
||||||
|
|
||||||
proc getHolePunchableAddrs*(addrs: seq[MultiAddress]): seq[MultiAddress] =
|
proc getHolePunchableAddrs*(addrs: seq[MultiAddress]): seq[MultiAddress] {.raises: [LPError]} =
|
||||||
addrs.filterIt(TCP.match(it))
|
var result = newSeq[MultiAddress]()
|
||||||
|
for a in addrs:
|
||||||
|
# This is necessary to also accept addrs like /ip4/198.51.100/tcp/1234/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N
|
||||||
|
if [TCP, mapAnd(TCP_DNS, P2PPattern), mapAnd(TCP_IP, P2PPattern)].anyIt(it.match(a)):
|
||||||
|
result.add(a[0..1].tryGet())
|
||||||
|
return result
|
||||||
|
@ -29,7 +29,7 @@ logScope:
|
|||||||
|
|
||||||
proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDialableAddrs = 8): T =
|
proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDialableAddrs = 8): T =
|
||||||
|
|
||||||
proc handleStream(stream: Connection, proto: string) {.async, gcsafe.} =
|
proc handleStream(stream: Connection, proto: string) {.async.} =
|
||||||
var peerDialableAddrs: seq[MultiAddress]
|
var peerDialableAddrs: seq[MultiAddress]
|
||||||
try:
|
try:
|
||||||
let connectMsg = DcutrMsg.decode(await stream.readLp(1024))
|
let connectMsg = DcutrMsg.decode(await stream.readLp(1024))
|
||||||
@ -56,7 +56,7 @@ proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDi
|
|||||||
|
|
||||||
if peerDialableAddrs.len > maxDialableAddrs:
|
if peerDialableAddrs.len > maxDialableAddrs:
|
||||||
peerDialableAddrs = peerDialableAddrs[0..<maxDialableAddrs]
|
peerDialableAddrs = peerDialableAddrs[0..<maxDialableAddrs]
|
||||||
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, upgradeDir = Direction.In))
|
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, dir = Direction.Out))
|
||||||
try:
|
try:
|
||||||
discard await anyCompleted(futs).wait(connectTimeout)
|
discard await anyCompleted(futs).wait(connectTimeout)
|
||||||
debug "Dcutr receiver has directly connected to the remote peer."
|
debug "Dcutr receiver has directly connected to the remote peer."
|
||||||
|
@ -189,7 +189,7 @@ proc dialPeerV2*(
|
|||||||
conn.limitData = msgRcvFromRelay.limit.data
|
conn.limitData = msgRcvFromRelay.limit.data
|
||||||
return conn
|
return conn
|
||||||
|
|
||||||
proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
|
proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async.} =
|
||||||
let msg = StopMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
|
let msg = StopMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
|
||||||
await sendHopStatus(conn, MalformedMessage)
|
await sendHopStatus(conn, MalformedMessage)
|
||||||
return
|
return
|
||||||
@ -201,7 +201,7 @@ proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
|
|||||||
trace "Unexpected client / relayv2 handshake", msgType=msg.msgType
|
trace "Unexpected client / relayv2 handshake", msgType=msg.msgType
|
||||||
await sendStopError(conn, MalformedMessage)
|
await sendStopError(conn, MalformedMessage)
|
||||||
|
|
||||||
proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async, gcsafe.} =
|
proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async.} =
|
||||||
let src = msg.srcPeer.valueOr:
|
let src = msg.srcPeer.valueOr:
|
||||||
await sendStatus(conn, StatusV1.StopSrcMultiaddrInvalid)
|
await sendStatus(conn, StatusV1.StopSrcMultiaddrInvalid)
|
||||||
return
|
return
|
||||||
@ -226,7 +226,7 @@ proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async, g
|
|||||||
if cl.onNewConnection != nil: await cl.onNewConnection(conn, 0, 0)
|
if cl.onNewConnection != nil: await cl.onNewConnection(conn, 0, 0)
|
||||||
else: await conn.close()
|
else: await conn.close()
|
||||||
|
|
||||||
proc handleStreamV1(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
|
proc handleStreamV1(cl: RelayClient, conn: Connection) {.async.} =
|
||||||
let msg = RelayMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
|
let msg = RelayMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
|
||||||
await sendStatus(conn, StatusV1.MalformedMessage)
|
await sendStatus(conn, StatusV1.MalformedMessage)
|
||||||
return
|
return
|
||||||
@ -266,7 +266,7 @@ proc new*(T: typedesc[RelayClient], canHop: bool = false,
|
|||||||
maxCircuitPerPeer: maxCircuitPerPeer,
|
maxCircuitPerPeer: maxCircuitPerPeer,
|
||||||
msgSize: msgSize,
|
msgSize: msgSize,
|
||||||
isCircuitRelayV1: circuitRelayV1)
|
isCircuitRelayV1: circuitRelayV1)
|
||||||
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
|
proc handleStream(conn: Connection, proto: string) {.async.} =
|
||||||
try:
|
try:
|
||||||
case proto:
|
case proto:
|
||||||
of RelayV1Codec: await cl.handleStreamV1(conn)
|
of RelayV1Codec: await cl.handleStreamV1(conn)
|
||||||
|
@ -47,6 +47,7 @@ proc new*(
|
|||||||
limitDuration: uint32,
|
limitDuration: uint32,
|
||||||
limitData: uint64): T =
|
limitData: uint64): T =
|
||||||
let rc = T(conn: conn, limitDuration: limitDuration, limitData: limitData)
|
let rc = T(conn: conn, limitDuration: limitDuration, limitData: limitData)
|
||||||
|
rc.dir = conn.dir
|
||||||
rc.initStream()
|
rc.initStream()
|
||||||
if limitDuration > 0:
|
if limitDuration > 0:
|
||||||
proc checkDurationConnection() {.async.} =
|
proc checkDurationConnection() {.async.} =
|
||||||
|
@ -105,7 +105,7 @@ proc isRelayed*(conn: Connection): bool =
|
|||||||
wrappedConn = wrappedConn.getWrapped()
|
wrappedConn = wrappedConn.getWrapped()
|
||||||
return false
|
return false
|
||||||
|
|
||||||
proc handleReserve(r: Relay, conn: Connection) {.async, gcsafe.} =
|
proc handleReserve(r: Relay, conn: Connection) {.async.} =
|
||||||
if conn.isRelayed():
|
if conn.isRelayed():
|
||||||
trace "reservation attempt over relay connection", pid = conn.peerId
|
trace "reservation attempt over relay connection", pid = conn.peerId
|
||||||
await sendHopStatus(conn, PermissionDenied)
|
await sendHopStatus(conn, PermissionDenied)
|
||||||
@ -128,7 +128,7 @@ proc handleReserve(r: Relay, conn: Connection) {.async, gcsafe.} =
|
|||||||
|
|
||||||
proc handleConnect(r: Relay,
|
proc handleConnect(r: Relay,
|
||||||
connSrc: Connection,
|
connSrc: Connection,
|
||||||
msg: HopMessage) {.async, gcsafe.} =
|
msg: HopMessage) {.async.} =
|
||||||
if connSrc.isRelayed():
|
if connSrc.isRelayed():
|
||||||
trace "connection attempt over relay connection"
|
trace "connection attempt over relay connection"
|
||||||
await sendHopStatus(connSrc, PermissionDenied)
|
await sendHopStatus(connSrc, PermissionDenied)
|
||||||
@ -200,7 +200,7 @@ proc handleConnect(r: Relay,
|
|||||||
await rconnDst.close()
|
await rconnDst.close()
|
||||||
await bridge(rconnSrc, rconnDst)
|
await bridge(rconnSrc, rconnDst)
|
||||||
|
|
||||||
proc handleHopStreamV2*(r: Relay, conn: Connection) {.async, gcsafe.} =
|
proc handleHopStreamV2*(r: Relay, conn: Connection) {.async.} =
|
||||||
let msg = HopMessage.decode(await conn.readLp(r.msgSize)).valueOr:
|
let msg = HopMessage.decode(await conn.readLp(r.msgSize)).valueOr:
|
||||||
await sendHopStatus(conn, MalformedMessage)
|
await sendHopStatus(conn, MalformedMessage)
|
||||||
return
|
return
|
||||||
@ -214,7 +214,7 @@ proc handleHopStreamV2*(r: Relay, conn: Connection) {.async, gcsafe.} =
|
|||||||
|
|
||||||
# Relay V1
|
# Relay V1
|
||||||
|
|
||||||
proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsafe.} =
|
proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
|
||||||
r.streamCount.inc()
|
r.streamCount.inc()
|
||||||
defer: r.streamCount.dec()
|
defer: r.streamCount.dec()
|
||||||
if r.streamCount + r.rsvp.len() >= r.maxCircuit:
|
if r.streamCount + r.rsvp.len() >= r.maxCircuit:
|
||||||
@ -293,7 +293,7 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
|
|||||||
trace "relaying connection", src, dst
|
trace "relaying connection", src, dst
|
||||||
await bridge(connSrc, connDst)
|
await bridge(connSrc, connDst)
|
||||||
|
|
||||||
proc handleStreamV1(r: Relay, conn: Connection) {.async, gcsafe.} =
|
proc handleStreamV1(r: Relay, conn: Connection) {.async.} =
|
||||||
let msg = RelayMessage.decode(await conn.readLp(r.msgSize)).valueOr:
|
let msg = RelayMessage.decode(await conn.readLp(r.msgSize)).valueOr:
|
||||||
await sendStatus(conn, StatusV1.MalformedMessage)
|
await sendStatus(conn, StatusV1.MalformedMessage)
|
||||||
return
|
return
|
||||||
@ -336,7 +336,7 @@ proc new*(T: typedesc[Relay],
|
|||||||
msgSize: msgSize,
|
msgSize: msgSize,
|
||||||
isCircuitRelayV1: circuitRelayV1)
|
isCircuitRelayV1: circuitRelayV1)
|
||||||
|
|
||||||
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
|
proc handleStream(conn: Connection, proto: string) {.async.} =
|
||||||
try:
|
try:
|
||||||
case proto:
|
case proto:
|
||||||
of RelayV2HopCodec: await r.handleHopStreamV2(conn)
|
of RelayV2HopCodec: await r.handleHopStreamV2(conn)
|
||||||
|
@ -37,24 +37,24 @@ method start*(self: RelayTransport, ma: seq[MultiAddress]) {.async.} =
|
|||||||
self.client.onNewConnection = proc(
|
self.client.onNewConnection = proc(
|
||||||
conn: Connection,
|
conn: Connection,
|
||||||
duration: uint32 = 0,
|
duration: uint32 = 0,
|
||||||
data: uint64 = 0) {.async, gcsafe, raises: [].} =
|
data: uint64 = 0) {.async.} =
|
||||||
await self.queue.addLast(RelayConnection.new(conn, duration, data))
|
await self.queue.addLast(RelayConnection.new(conn, duration, data))
|
||||||
await conn.join()
|
await conn.join()
|
||||||
self.selfRunning = true
|
self.selfRunning = true
|
||||||
await procCall Transport(self).start(ma)
|
await procCall Transport(self).start(ma)
|
||||||
trace "Starting Relay transport"
|
trace "Starting Relay transport"
|
||||||
|
|
||||||
method stop*(self: RelayTransport) {.async, gcsafe.} =
|
method stop*(self: RelayTransport) {.async.} =
|
||||||
self.running = false
|
self.running = false
|
||||||
self.selfRunning = false
|
self.selfRunning = false
|
||||||
self.client.onNewConnection = nil
|
self.client.onNewConnection = nil
|
||||||
while not self.queue.empty():
|
while not self.queue.empty():
|
||||||
await self.queue.popFirstNoWait().close()
|
await self.queue.popFirstNoWait().close()
|
||||||
|
|
||||||
method accept*(self: RelayTransport): Future[Connection] {.async, gcsafe.} =
|
method accept*(self: RelayTransport): Future[Connection] {.async.} =
|
||||||
result = await self.queue.popFirst()
|
result = await self.queue.popFirst()
|
||||||
|
|
||||||
proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async, gcsafe.} =
|
proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async.} =
|
||||||
let
|
let
|
||||||
sma = toSeq(ma.items())
|
sma = toSeq(ma.items())
|
||||||
relayAddrs = sma[0..sma.len-4].mapIt(it.tryGet()).foldl(a & b)
|
relayAddrs = sma[0..sma.len-4].mapIt(it.tryGet()).foldl(a & b)
|
||||||
@ -90,7 +90,7 @@ method dial*(
|
|||||||
self: RelayTransport,
|
self: RelayTransport,
|
||||||
hostname: string,
|
hostname: string,
|
||||||
ma: MultiAddress,
|
ma: MultiAddress,
|
||||||
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
|
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
|
||||||
peerId.withValue(pid):
|
peerId.withValue(pid):
|
||||||
let address = MultiAddress.init($ma & "/p2p/" & $pid).tryGet()
|
let address = MultiAddress.init($ma & "/p2p/" & $pid).tryGet()
|
||||||
result = await self.dial(address)
|
result = await self.dial(address)
|
||||||
|
@ -21,14 +21,14 @@ const
|
|||||||
RelayV2HopCodec* = "/libp2p/circuit/relay/0.2.0/hop"
|
RelayV2HopCodec* = "/libp2p/circuit/relay/0.2.0/hop"
|
||||||
RelayV2StopCodec* = "/libp2p/circuit/relay/0.2.0/stop"
|
RelayV2StopCodec* = "/libp2p/circuit/relay/0.2.0/stop"
|
||||||
|
|
||||||
proc sendStatus*(conn: Connection, code: StatusV1) {.async, gcsafe.} =
|
proc sendStatus*(conn: Connection, code: StatusV1) {.async.} =
|
||||||
trace "send relay/v1 status", status = $code & "(" & $ord(code) & ")"
|
trace "send relay/v1 status", status = $code & "(" & $ord(code) & ")"
|
||||||
let
|
let
|
||||||
msg = RelayMessage(msgType: Opt.some(RelayType.Status), status: Opt.some(code))
|
msg = RelayMessage(msgType: Opt.some(RelayType.Status), status: Opt.some(code))
|
||||||
pb = encode(msg)
|
pb = encode(msg)
|
||||||
await conn.writeLp(pb.buffer)
|
await conn.writeLp(pb.buffer)
|
||||||
|
|
||||||
proc sendHopStatus*(conn: Connection, code: StatusV2) {.async, gcsafe.} =
|
proc sendHopStatus*(conn: Connection, code: StatusV2) {.async.} =
|
||||||
trace "send hop relay/v2 status", status = $code & "(" & $ord(code) & ")"
|
trace "send hop relay/v2 status", status = $code & "(" & $ord(code) & ")"
|
||||||
let
|
let
|
||||||
msg = HopMessage(msgType: HopMessageType.Status, status: Opt.some(code))
|
msg = HopMessage(msgType: HopMessageType.Status, status: Opt.some(code))
|
||||||
|
@ -21,6 +21,7 @@ import ../protobuf/minprotobuf,
|
|||||||
../peerid,
|
../peerid,
|
||||||
../crypto/crypto,
|
../crypto/crypto,
|
||||||
../multiaddress,
|
../multiaddress,
|
||||||
|
../multicodec,
|
||||||
../protocols/protocol,
|
../protocols/protocol,
|
||||||
../utility,
|
../utility,
|
||||||
../errors,
|
../errors,
|
||||||
@ -77,7 +78,7 @@ chronicles.expandIt(IdentifyInfo):
|
|||||||
signedPeerRecord =
|
signedPeerRecord =
|
||||||
# The SPR contains the same data as the identify message
|
# The SPR contains the same data as the identify message
|
||||||
# would be cumbersome to log
|
# would be cumbersome to log
|
||||||
if iinfo.signedPeerRecord.isSome(): "Some"
|
if it.signedPeerRecord.isSome(): "Some"
|
||||||
else: "None"
|
else: "None"
|
||||||
|
|
||||||
proc encodeMsg(peerInfo: PeerInfo, observedAddr: Opt[MultiAddress], sendSpr: bool): ProtoBuffer
|
proc encodeMsg(peerInfo: PeerInfo, observedAddr: Opt[MultiAddress], sendSpr: bool): ProtoBuffer
|
||||||
@ -133,24 +134,24 @@ proc decodeMsg*(buf: seq[byte]): Opt[IdentifyInfo] =
|
|||||||
if ? pb.getField(6, agentVersion).toOpt():
|
if ? pb.getField(6, agentVersion).toOpt():
|
||||||
iinfo.agentVersion = some(agentVersion)
|
iinfo.agentVersion = some(agentVersion)
|
||||||
|
|
||||||
debug "decodeMsg: decoded identify", iinfo
|
|
||||||
Opt.some(iinfo)
|
Opt.some(iinfo)
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
T: typedesc[Identify],
|
T: typedesc[Identify],
|
||||||
peerInfo: PeerInfo,
|
peerInfo: PeerInfo,
|
||||||
sendSignedPeerRecord = false
|
sendSignedPeerRecord = false,
|
||||||
|
observedAddrManager = ObservedAddrManager.new(),
|
||||||
): T =
|
): T =
|
||||||
let identify = T(
|
let identify = T(
|
||||||
peerInfo: peerInfo,
|
peerInfo: peerInfo,
|
||||||
sendSignedPeerRecord: sendSignedPeerRecord,
|
sendSignedPeerRecord: sendSignedPeerRecord,
|
||||||
observedAddrManager: ObservedAddrManager.new(),
|
observedAddrManager: observedAddrManager,
|
||||||
)
|
)
|
||||||
identify.init()
|
identify.init()
|
||||||
identify
|
identify
|
||||||
|
|
||||||
method init*(p: Identify) =
|
method init*(p: Identify) =
|
||||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
proc handle(conn: Connection, proto: string) {.async.} =
|
||||||
try:
|
try:
|
||||||
trace "handling identify request", conn
|
trace "handling identify request", conn
|
||||||
var pb = encodeMsg(p.peerInfo, conn.observedAddr, p.sendSignedPeerRecord)
|
var pb = encodeMsg(p.peerInfo, conn.observedAddr, p.sendSignedPeerRecord)
|
||||||
@ -168,7 +169,7 @@ method init*(p: Identify) =
|
|||||||
|
|
||||||
proc identify*(self: Identify,
|
proc identify*(self: Identify,
|
||||||
conn: Connection,
|
conn: Connection,
|
||||||
remotePeerId: PeerId): Future[IdentifyInfo] {.async, gcsafe.} =
|
remotePeerId: PeerId): Future[IdentifyInfo] {.async.} =
|
||||||
trace "initiating identify", conn
|
trace "initiating identify", conn
|
||||||
var message = await conn.readLp(64*1024)
|
var message = await conn.readLp(64*1024)
|
||||||
if len(message) == 0:
|
if len(message) == 0:
|
||||||
@ -176,6 +177,7 @@ proc identify*(self: Identify,
|
|||||||
raise newException(IdentityInvalidMsgError, "Empty message received!")
|
raise newException(IdentityInvalidMsgError, "Empty message received!")
|
||||||
|
|
||||||
var info = decodeMsg(message).valueOr: raise newException(IdentityInvalidMsgError, "Incorrect message received!")
|
var info = decodeMsg(message).valueOr: raise newException(IdentityInvalidMsgError, "Incorrect message received!")
|
||||||
|
debug "identify: decoded message", conn, info
|
||||||
let
|
let
|
||||||
pubkey = info.pubkey.valueOr: raise newException(IdentityInvalidMsgError, "No pubkey in identify")
|
pubkey = info.pubkey.valueOr: raise newException(IdentityInvalidMsgError, "No pubkey in identify")
|
||||||
peer = PeerId.init(pubkey).valueOr: raise newException(IdentityInvalidMsgError, $error)
|
peer = PeerId.init(pubkey).valueOr: raise newException(IdentityInvalidMsgError, $error)
|
||||||
@ -186,8 +188,12 @@ proc identify*(self: Identify,
|
|||||||
info.peerId = peer
|
info.peerId = peer
|
||||||
|
|
||||||
info.observedAddr.withValue(observed):
|
info.observedAddr.withValue(observed):
|
||||||
if not self.observedAddrManager.addObservation(observed):
|
# Currently, we use the ObservedAddrManager only to find our dialable external NAT address. Therefore, addresses
|
||||||
debug "Observed address is not valid", observedAddr = observed
|
# like "...\p2p-circuit\p2p\..." and "\p2p\..." are not useful to us.
|
||||||
|
if observed.contains(multiCodec("p2p-circuit")).get(false) or P2PPattern.matchPartial(observed):
|
||||||
|
trace "Not adding address to ObservedAddrManager.", observed
|
||||||
|
elif not self.observedAddrManager.addObservation(observed):
|
||||||
|
trace "Observed address is not valid.", observedAddr = observed
|
||||||
return info
|
return info
|
||||||
|
|
||||||
proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.public.} =
|
proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.public.} =
|
||||||
@ -198,13 +204,14 @@ proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.pu
|
|||||||
identifypush
|
identifypush
|
||||||
|
|
||||||
proc init*(p: IdentifyPush) =
|
proc init*(p: IdentifyPush) =
|
||||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
proc handle(conn: Connection, proto: string) {.async.} =
|
||||||
trace "handling identify push", conn
|
trace "handling identify push", conn
|
||||||
try:
|
try:
|
||||||
var message = await conn.readLp(64*1024)
|
var message = await conn.readLp(64*1024)
|
||||||
|
|
||||||
var identInfo = decodeMsg(message).valueOr:
|
var identInfo = decodeMsg(message).valueOr:
|
||||||
raise newException(IdentityInvalidMsgError, "Incorrect message received!")
|
raise newException(IdentityInvalidMsgError, "Incorrect message received!")
|
||||||
|
debug "identify push: decoded message", conn, identInfo
|
||||||
|
|
||||||
identInfo.pubkey.withValue(pubkey):
|
identInfo.pubkey.withValue(pubkey):
|
||||||
let receivedPeerId = PeerId.init(pubkey).tryGet()
|
let receivedPeerId = PeerId.init(pubkey).tryGet()
|
||||||
|
@ -27,7 +27,7 @@ type Perf* = ref object of LPProtocol
|
|||||||
|
|
||||||
proc new*(T: typedesc[Perf]): T {.public.} =
|
proc new*(T: typedesc[Perf]): T {.public.} =
|
||||||
var p = T()
|
var p = T()
|
||||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
proc handle(conn: Connection, proto: string) {.async.} =
|
||||||
var bytesRead = 0
|
var bytesRead = 0
|
||||||
try:
|
try:
|
||||||
trace "Received benchmark performance check", conn
|
trace "Received benchmark performance check", conn
|
||||||
|
@ -51,7 +51,7 @@ proc new*(T: typedesc[Ping], handler: PingHandler = nil, rng: ref HmacDrbgContex
|
|||||||
ping
|
ping
|
||||||
|
|
||||||
method init*(p: Ping) =
|
method init*(p: Ping) =
|
||||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
proc handle(conn: Connection, proto: string) {.async.} =
|
||||||
try:
|
try:
|
||||||
trace "handling ping", conn
|
trace "handling ping", conn
|
||||||
var buf: array[PingSize, byte]
|
var buf: array[PingSize, byte]
|
||||||
@ -71,7 +71,7 @@ method init*(p: Ping) =
|
|||||||
proc ping*(
|
proc ping*(
|
||||||
p: Ping,
|
p: Ping,
|
||||||
conn: Connection,
|
conn: Connection,
|
||||||
): Future[Duration] {.async, gcsafe, public.} =
|
): Future[Duration] {.async, public.} =
|
||||||
## Sends ping to `conn`, returns the delay
|
## Sends ping to `conn`, returns the delay
|
||||||
|
|
||||||
trace "initiating ping", conn
|
trace "initiating ping", conn
|
||||||
|
@ -384,7 +384,7 @@ proc validateAndRelay(g: GossipSub,
|
|||||||
proc dataAndTopicsIdSize(msgs: seq[Message]): int =
|
proc dataAndTopicsIdSize(msgs: seq[Message]): int =
|
||||||
msgs.mapIt(it.data.len + it.topicIds.mapIt(it.len).foldl(a + b, 0)).foldl(a + b, 0)
|
msgs.mapIt(it.data.len + it.topicIds.mapIt(it.len).foldl(a + b, 0)).foldl(a + b, 0)
|
||||||
|
|
||||||
proc rateLimit*(g: GossipSub, peer: PubSubPeer, rpcMsgOpt: Opt[RPCMsg], msgSize: int) {.raises:[PeerRateLimitError, CatchableError], async.} =
|
proc rateLimit*(g: GossipSub, peer: PubSubPeer, rpcMsgOpt: Opt[RPCMsg], msgSize: int) {.async.} =
|
||||||
# In this way we count even ignored fields by protobuf
|
# In this way we count even ignored fields by protobuf
|
||||||
|
|
||||||
var rmsg = rpcMsgOpt.valueOr:
|
var rmsg = rpcMsgOpt.valueOr:
|
||||||
|
@ -234,7 +234,7 @@ template sendMetrics(msg: RPCMsg): untyped =
|
|||||||
# metrics
|
# metrics
|
||||||
libp2p_pubsub_sent_messages.inc(labelValues = [$p.peerId, t])
|
libp2p_pubsub_sent_messages.inc(labelValues = [$p.peerId, t])
|
||||||
|
|
||||||
proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [], async.} =
|
proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.async.} =
|
||||||
doAssert(not isNil(p), "pubsubpeer nil!")
|
doAssert(not isNil(p), "pubsubpeer nil!")
|
||||||
|
|
||||||
if msg.len <= 0:
|
if msg.len <= 0:
|
||||||
|
@ -636,7 +636,7 @@ proc new*(T: typedesc[RendezVous],
|
|||||||
sema: newAsyncSemaphore(SemaphoreDefaultSize)
|
sema: newAsyncSemaphore(SemaphoreDefaultSize)
|
||||||
)
|
)
|
||||||
logScope: topics = "libp2p discovery rendezvous"
|
logScope: topics = "libp2p discovery rendezvous"
|
||||||
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
|
proc handleStream(conn: Connection, proto: string) {.async.} =
|
||||||
try:
|
try:
|
||||||
let
|
let
|
||||||
buf = await conn.readLp(4096)
|
buf = await conn.readLp(4096)
|
||||||
|
@ -19,7 +19,7 @@ type
|
|||||||
|
|
||||||
method init(p: PlainText) {.gcsafe.} =
|
method init(p: PlainText) {.gcsafe.} =
|
||||||
proc handle(conn: Connection, proto: string)
|
proc handle(conn: Connection, proto: string)
|
||||||
{.async, gcsafe.} = discard
|
{.async.} = discard
|
||||||
## plain text doesn't do anything
|
## plain text doesn't do anything
|
||||||
|
|
||||||
p.codec = PlainTextCodec
|
p.codec = PlainTextCodec
|
||||||
|
@ -135,10 +135,9 @@ method init*(s: Secure) =
|
|||||||
|
|
||||||
method secure*(s: Secure,
|
method secure*(s: Secure,
|
||||||
conn: Connection,
|
conn: Connection,
|
||||||
initiator: bool,
|
|
||||||
peerId: Opt[PeerId]):
|
peerId: Opt[PeerId]):
|
||||||
Future[Connection] {.base.} =
|
Future[Connection] {.base.} =
|
||||||
s.handleConn(conn, initiator, peerId)
|
s.handleConn(conn, conn.dir == Direction.Out, peerId)
|
||||||
|
|
||||||
method readOnce*(s: SecureConn,
|
method readOnce*(s: SecureConn,
|
||||||
pbytes: pointer,
|
pbytes: pointer,
|
||||||
|
@ -37,7 +37,7 @@ proc isRunning*(self: AutoRelayService): bool =
|
|||||||
|
|
||||||
proc addressMapper(
|
proc addressMapper(
|
||||||
self: AutoRelayService,
|
self: AutoRelayService,
|
||||||
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||||
return concat(toSeq(self.relayAddresses.values))
|
return concat(toSeq(self.relayAddresses.values))
|
||||||
|
|
||||||
proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, switch: Switch) {.async.} =
|
proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, switch: Switch) {.async.} =
|
||||||
@ -58,8 +58,8 @@ proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, switch: Switch)
|
|||||||
self.onReservation(concat(toSeq(self.relayAddresses.values)))
|
self.onReservation(concat(toSeq(self.relayAddresses.values)))
|
||||||
await sleepAsync chronos.seconds(ttl - 30)
|
await sleepAsync chronos.seconds(ttl - 30)
|
||||||
|
|
||||||
method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcsafe.} =
|
method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
|
||||||
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||||
return await addressMapper(self, listenAddrs)
|
return await addressMapper(self, listenAddrs)
|
||||||
|
|
||||||
let hasBeenSetUp = await procCall Service(self).setup(switch)
|
let hasBeenSetUp = await procCall Service(self).setup(switch)
|
||||||
@ -83,7 +83,7 @@ proc manageBackedOff(self: AutoRelayService, pid: PeerId) {.async.} =
|
|||||||
self.backingOff.keepItIf(it != pid)
|
self.backingOff.keepItIf(it != pid)
|
||||||
self.peerAvailable.fire()
|
self.peerAvailable.fire()
|
||||||
|
|
||||||
proc innerRun(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
|
proc innerRun(self: AutoRelayService, switch: Switch) {.async.} =
|
||||||
while true:
|
while true:
|
||||||
# Remove relayPeers that failed
|
# Remove relayPeers that failed
|
||||||
let peers = toSeq(self.relayPeers.keys())
|
let peers = toSeq(self.relayPeers.keys())
|
||||||
@ -116,14 +116,14 @@ proc innerRun(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
|
|||||||
await self.peerAvailable.wait()
|
await self.peerAvailable.wait()
|
||||||
await sleepAsync(200.millis)
|
await sleepAsync(200.millis)
|
||||||
|
|
||||||
method run*(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
|
method run*(self: AutoRelayService, switch: Switch) {.async.} =
|
||||||
if self.running:
|
if self.running:
|
||||||
trace "Autorelay is already running"
|
trace "Autorelay is already running"
|
||||||
return
|
return
|
||||||
self.running = true
|
self.running = true
|
||||||
self.runner = self.innerRun(switch)
|
self.runner = self.innerRun(switch)
|
||||||
|
|
||||||
method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcsafe.} =
|
method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
|
||||||
let hasBeenStopped = await procCall Service(self).stop(switch)
|
let hasBeenStopped = await procCall Service(self).stop(switch)
|
||||||
if hasBeenStopped:
|
if hasBeenStopped:
|
||||||
self.running = false
|
self.running = false
|
||||||
|
@ -94,7 +94,7 @@ method setup*(self: HPService, switch: Switch): Future[bool] {.async.} =
|
|||||||
|
|
||||||
switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
|
switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
|
||||||
|
|
||||||
self.onNewStatusHandler = proc (networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
|
self.onNewStatusHandler = proc (networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
|
||||||
if networkReachability == NetworkReachability.NotReachable and not self.autoRelayService.isRunning():
|
if networkReachability == NetworkReachability.NotReachable and not self.autoRelayService.isRunning():
|
||||||
discard await self.autoRelayService.setup(switch)
|
discard await self.autoRelayService.setup(switch)
|
||||||
elif networkReachability == NetworkReachability.Reachable and self.autoRelayService.isRunning():
|
elif networkReachability == NetworkReachability.Reachable and self.autoRelayService.isRunning():
|
||||||
|
@ -50,7 +50,7 @@ method initStream*(s: ChronosStream) =
|
|||||||
if s.objName.len == 0:
|
if s.objName.len == 0:
|
||||||
s.objName = ChronosStreamTrackerName
|
s.objName = ChronosStreamTrackerName
|
||||||
|
|
||||||
s.timeoutHandler = proc() {.async, gcsafe.} =
|
s.timeoutHandler = proc() {.async.} =
|
||||||
trace "Idle timeout expired, closing ChronosStream", s
|
trace "Idle timeout expired, closing ChronosStream", s
|
||||||
await s.close()
|
await s.close()
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ type
|
|||||||
when defined(libp2p_agents_metrics):
|
when defined(libp2p_agents_metrics):
|
||||||
shortAgent*: string
|
shortAgent*: string
|
||||||
|
|
||||||
proc timeoutMonitor(s: Connection) {.async, gcsafe.}
|
proc timeoutMonitor(s: Connection) {.async.}
|
||||||
|
|
||||||
func shortLog*(conn: Connection): string =
|
func shortLog*(conn: Connection): string =
|
||||||
try:
|
try:
|
||||||
@ -110,7 +110,7 @@ proc pollActivity(s: Connection): Future[bool] {.async.} =
|
|||||||
|
|
||||||
return false
|
return false
|
||||||
|
|
||||||
proc timeoutMonitor(s: Connection) {.async, gcsafe.} =
|
proc timeoutMonitor(s: Connection) {.async.} =
|
||||||
## monitor the channel for inactivity
|
## monitor the channel for inactivity
|
||||||
##
|
##
|
||||||
## if the timeout was hit, it means that
|
## if the timeout was hit, it means that
|
||||||
|
@ -246,7 +246,7 @@ proc readLine*(s: LPStream,
|
|||||||
if len(result) == lim:
|
if len(result) == lim:
|
||||||
break
|
break
|
||||||
|
|
||||||
proc readVarint*(conn: LPStream): Future[uint64] {.async, gcsafe, public.} =
|
proc readVarint*(conn: LPStream): Future[uint64] {.async, public.} =
|
||||||
var
|
var
|
||||||
buffer: array[10, byte]
|
buffer: array[10, byte]
|
||||||
|
|
||||||
@ -264,7 +264,7 @@ proc readVarint*(conn: LPStream): Future[uint64] {.async, gcsafe, public.} =
|
|||||||
if true: # can't end with a raise apparently
|
if true: # can't end with a raise apparently
|
||||||
raise (ref InvalidVarintError)(msg: "Cannot parse varint")
|
raise (ref InvalidVarintError)(msg: "Cannot parse varint")
|
||||||
|
|
||||||
proc readLp*(s: LPStream, maxSize: int): Future[seq[byte]] {.async, gcsafe, public.} =
|
proc readLp*(s: LPStream, maxSize: int): Future[seq[byte]] {.async, public.} =
|
||||||
## read length prefixed msg, with the length encoded as a varint
|
## read length prefixed msg, with the length encoded as a varint
|
||||||
let
|
let
|
||||||
length = await s.readVarint()
|
length = await s.readVarint()
|
||||||
|
@ -71,17 +71,17 @@ type
|
|||||||
inUse: bool
|
inUse: bool
|
||||||
|
|
||||||
|
|
||||||
method setup*(self: Service, switch: Switch): Future[bool] {.base, async, gcsafe.} =
|
method setup*(self: Service, switch: Switch): Future[bool] {.base, async.} =
|
||||||
if self.inUse:
|
if self.inUse:
|
||||||
warn "service setup has already been called"
|
warn "service setup has already been called"
|
||||||
return false
|
return false
|
||||||
self.inUse = true
|
self.inUse = true
|
||||||
return true
|
return true
|
||||||
|
|
||||||
method run*(self: Service, switch: Switch) {.base, async, gcsafe.} =
|
method run*(self: Service, switch: Switch) {.base, async.} =
|
||||||
doAssert(false, "Not implemented!")
|
doAssert(false, "Not implemented!")
|
||||||
|
|
||||||
method stop*(self: Service, switch: Switch): Future[bool] {.base, async, gcsafe.} =
|
method stop*(self: Service, switch: Switch): Future[bool] {.base, async.} =
|
||||||
if not self.inUse:
|
if not self.inUse:
|
||||||
warn "service is already stopped"
|
warn "service is already stopped"
|
||||||
return false
|
return false
|
||||||
@ -141,10 +141,10 @@ method connect*(
|
|||||||
addrs: seq[MultiAddress],
|
addrs: seq[MultiAddress],
|
||||||
forceDial = false,
|
forceDial = false,
|
||||||
reuseConnection = true,
|
reuseConnection = true,
|
||||||
upgradeDir = Direction.Out): Future[void] {.public.} =
|
dir = Direction.Out): Future[void] {.public.} =
|
||||||
## Connects to a peer without opening a stream to it
|
## Connects to a peer without opening a stream to it
|
||||||
|
|
||||||
s.dialer.connect(peerId, addrs, forceDial, reuseConnection, upgradeDir)
|
s.dialer.connect(peerId, addrs, forceDial, reuseConnection, dir)
|
||||||
|
|
||||||
method connect*(
|
method connect*(
|
||||||
s: Switch,
|
s: Switch,
|
||||||
@ -213,7 +213,7 @@ proc mount*[T: LPProtocol](s: Switch, proto: T, matcher: Matcher = nil)
|
|||||||
s.peerInfo.protocols.add(proto.codec)
|
s.peerInfo.protocols.add(proto.codec)
|
||||||
|
|
||||||
proc upgrader(switch: Switch, trans: Transport, conn: Connection) {.async.} =
|
proc upgrader(switch: Switch, trans: Transport, conn: Connection) {.async.} =
|
||||||
let muxed = await trans.upgrade(conn, Direction.In, Opt.none(PeerId))
|
let muxed = await trans.upgrade(conn, Opt.none(PeerId))
|
||||||
switch.connManager.storeMuxer(muxed)
|
switch.connManager.storeMuxer(muxed)
|
||||||
await switch.peerStore.identify(muxed)
|
await switch.peerStore.identify(muxed)
|
||||||
trace "Connection upgrade succeeded"
|
trace "Connection upgrade succeeded"
|
||||||
@ -321,7 +321,7 @@ proc stop*(s: Switch) {.async, public.} =
|
|||||||
|
|
||||||
trace "Switch stopped"
|
trace "Switch stopped"
|
||||||
|
|
||||||
proc start*(s: Switch) {.async, gcsafe, public.} =
|
proc start*(s: Switch) {.async, public.} =
|
||||||
## Start listening on every transport
|
## Start listening on every transport
|
||||||
|
|
||||||
if s.started:
|
if s.started:
|
||||||
|
@ -174,7 +174,7 @@ method start*(
|
|||||||
|
|
||||||
trace "Listening on", address = ma
|
trace "Listening on", address = ma
|
||||||
|
|
||||||
method stop*(self: TcpTransport) {.async, gcsafe.} =
|
method stop*(self: TcpTransport) {.async.} =
|
||||||
## stop the transport
|
## stop the transport
|
||||||
##
|
##
|
||||||
try:
|
try:
|
||||||
@ -210,7 +210,7 @@ method stop*(self: TcpTransport) {.async, gcsafe.} =
|
|||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "Error shutting down tcp transport", exc = exc.msg
|
trace "Error shutting down tcp transport", exc = exc.msg
|
||||||
|
|
||||||
method accept*(self: TcpTransport): Future[Connection] {.async, gcsafe.} =
|
method accept*(self: TcpTransport): Future[Connection] {.async.} =
|
||||||
## accept a new TCP connection
|
## accept a new TCP connection
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -260,7 +260,7 @@ method dial*(
|
|||||||
self: TcpTransport,
|
self: TcpTransport,
|
||||||
hostname: string,
|
hostname: string,
|
||||||
address: MultiAddress,
|
address: MultiAddress,
|
||||||
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
|
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
|
||||||
## dial a peer
|
## dial a peer
|
||||||
##
|
##
|
||||||
|
|
||||||
|
@ -82,7 +82,7 @@ proc handlesStart(address: MultiAddress): bool {.gcsafe.} =
|
|||||||
return TcpOnion3.match(address)
|
return TcpOnion3.match(address)
|
||||||
|
|
||||||
proc connectToTorServer(
|
proc connectToTorServer(
|
||||||
transportAddress: TransportAddress): Future[StreamTransport] {.async, gcsafe.} =
|
transportAddress: TransportAddress): Future[StreamTransport] {.async.} =
|
||||||
let transp = await connect(transportAddress)
|
let transp = await connect(transportAddress)
|
||||||
try:
|
try:
|
||||||
discard await transp.write(@[Socks5ProtocolVersion, NMethods, Socks5AuthMethod.NoAuth.byte])
|
discard await transp.write(@[Socks5ProtocolVersion, NMethods, Socks5AuthMethod.NoAuth.byte])
|
||||||
@ -99,7 +99,7 @@ proc connectToTorServer(
|
|||||||
await transp.closeWait()
|
await transp.closeWait()
|
||||||
raise err
|
raise err
|
||||||
|
|
||||||
proc readServerReply(transp: StreamTransport) {.async, gcsafe.} =
|
proc readServerReply(transp: StreamTransport) {.async.} =
|
||||||
## The specification for this code is defined on
|
## The specification for this code is defined on
|
||||||
## [link text](https://www.rfc-editor.org/rfc/rfc1928#section-5)
|
## [link text](https://www.rfc-editor.org/rfc/rfc1928#section-5)
|
||||||
## and [link text](https://www.rfc-editor.org/rfc/rfc1928#section-6).
|
## and [link text](https://www.rfc-editor.org/rfc/rfc1928#section-6).
|
||||||
@ -121,7 +121,7 @@ proc readServerReply(transp: StreamTransport) {.async, gcsafe.} =
|
|||||||
let atyp = firstFourOctets[3]
|
let atyp = firstFourOctets[3]
|
||||||
case atyp:
|
case atyp:
|
||||||
of Socks5AddressType.IPv4.byte:
|
of Socks5AddressType.IPv4.byte:
|
||||||
discard await transp.read(ipV4NumOctets + portNumOctets)
|
discard await transp.read(ipV4NumOctets + portNumOctets)
|
||||||
of Socks5AddressType.FQDN.byte:
|
of Socks5AddressType.FQDN.byte:
|
||||||
let fqdnNumOctets = await transp.read(1)
|
let fqdnNumOctets = await transp.read(1)
|
||||||
discard await transp.read(int(uint8.fromBytes(fqdnNumOctets)) + portNumOctets)
|
discard await transp.read(int(uint8.fromBytes(fqdnNumOctets)) + portNumOctets)
|
||||||
@ -166,7 +166,7 @@ proc parseDnsTcp(address: MultiAddress):
|
|||||||
(Socks5AddressType.FQDN.byte, dstAddr, dstPort)
|
(Socks5AddressType.FQDN.byte, dstAddr, dstPort)
|
||||||
|
|
||||||
proc dialPeer(
|
proc dialPeer(
|
||||||
transp: StreamTransport, address: MultiAddress) {.async, gcsafe.} =
|
transp: StreamTransport, address: MultiAddress) {.async.} =
|
||||||
let (atyp, dstAddr, dstPort) =
|
let (atyp, dstAddr, dstPort) =
|
||||||
if Onion3.match(address):
|
if Onion3.match(address):
|
||||||
parseOnion3(address)
|
parseOnion3(address)
|
||||||
@ -190,7 +190,7 @@ method dial*(
|
|||||||
self: TorTransport,
|
self: TorTransport,
|
||||||
hostname: string,
|
hostname: string,
|
||||||
address: MultiAddress,
|
address: MultiAddress,
|
||||||
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
|
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
|
||||||
## dial a peer
|
## dial a peer
|
||||||
##
|
##
|
||||||
if not handlesDial(address):
|
if not handlesDial(address):
|
||||||
@ -229,14 +229,14 @@ method start*(
|
|||||||
else:
|
else:
|
||||||
raise newException(TransportStartError, "Tor Transport couldn't start, no supported addr was provided.")
|
raise newException(TransportStartError, "Tor Transport couldn't start, no supported addr was provided.")
|
||||||
|
|
||||||
method accept*(self: TorTransport): Future[Connection] {.async, gcsafe.} =
|
method accept*(self: TorTransport): Future[Connection] {.async.} =
|
||||||
## accept a new Tor connection
|
## accept a new Tor connection
|
||||||
##
|
##
|
||||||
let conn = await self.tcpTransport.accept()
|
let conn = await self.tcpTransport.accept()
|
||||||
conn.observedAddr = Opt.none(MultiAddress)
|
conn.observedAddr = Opt.none(MultiAddress)
|
||||||
return conn
|
return conn
|
||||||
|
|
||||||
method stop*(self: TorTransport) {.async, gcsafe.} =
|
method stop*(self: TorTransport) {.async.} =
|
||||||
## stop the transport
|
## stop the transport
|
||||||
##
|
##
|
||||||
await procCall Transport(self).stop() # call base
|
await procCall Transport(self).stop() # call base
|
||||||
|
@ -83,13 +83,12 @@ proc dial*(
|
|||||||
method upgrade*(
|
method upgrade*(
|
||||||
self: Transport,
|
self: Transport,
|
||||||
conn: Connection,
|
conn: Connection,
|
||||||
direction: Direction,
|
|
||||||
peerId: Opt[PeerId]): Future[Muxer] {.base, gcsafe.} =
|
peerId: Opt[PeerId]): Future[Muxer] {.base, gcsafe.} =
|
||||||
## base upgrade method that the transport uses to perform
|
## base upgrade method that the transport uses to perform
|
||||||
## transport specific upgrades
|
## transport specific upgrades
|
||||||
##
|
##
|
||||||
|
|
||||||
self.upgrader.upgrade(conn, direction, peerId)
|
self.upgrader.upgrade(conn, peerId)
|
||||||
|
|
||||||
method handles*(
|
method handles*(
|
||||||
self: Transport,
|
self: Transport,
|
||||||
|
@ -173,7 +173,7 @@ method start*(
|
|||||||
|
|
||||||
self.running = true
|
self.running = true
|
||||||
|
|
||||||
method stop*(self: WsTransport) {.async, gcsafe.} =
|
method stop*(self: WsTransport) {.async.} =
|
||||||
## stop the transport
|
## stop the transport
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -237,7 +237,7 @@ proc connHandler(self: WsTransport,
|
|||||||
asyncSpawn onClose()
|
asyncSpawn onClose()
|
||||||
return conn
|
return conn
|
||||||
|
|
||||||
method accept*(self: WsTransport): Future[Connection] {.async, gcsafe.} =
|
method accept*(self: WsTransport): Future[Connection] {.async.} =
|
||||||
## accept a new WS connection
|
## accept a new WS connection
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -295,7 +295,7 @@ method dial*(
|
|||||||
self: WsTransport,
|
self: WsTransport,
|
||||||
hostname: string,
|
hostname: string,
|
||||||
address: MultiAddress,
|
address: MultiAddress,
|
||||||
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
|
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
|
||||||
## dial a peer
|
## dial a peer
|
||||||
##
|
##
|
||||||
|
|
||||||
|
@ -32,8 +32,7 @@ proc getMuxerByCodec(self: MuxedUpgrade, muxerName: string): MuxerProvider =
|
|||||||
|
|
||||||
proc mux*(
|
proc mux*(
|
||||||
self: MuxedUpgrade,
|
self: MuxedUpgrade,
|
||||||
conn: Connection,
|
conn: Connection): Future[Muxer] {.async.} =
|
||||||
direction: Direction): Future[Muxer] {.async, gcsafe.} =
|
|
||||||
## mux connection
|
## mux connection
|
||||||
|
|
||||||
trace "Muxing connection", conn
|
trace "Muxing connection", conn
|
||||||
@ -42,7 +41,7 @@ proc mux*(
|
|||||||
return
|
return
|
||||||
|
|
||||||
let muxerName =
|
let muxerName =
|
||||||
if direction == Out: await self.ms.select(conn, self.muxers.mapIt(it.codec))
|
if conn.dir == Out: await self.ms.select(conn, self.muxers.mapIt(it.codec))
|
||||||
else: await MultistreamSelect.handle(conn, self.muxers.mapIt(it.codec))
|
else: await MultistreamSelect.handle(conn, self.muxers.mapIt(it.codec))
|
||||||
|
|
||||||
if muxerName.len == 0 or muxerName == "na":
|
if muxerName.len == 0 or muxerName == "na":
|
||||||
@ -62,16 +61,15 @@ proc mux*(
|
|||||||
method upgrade*(
|
method upgrade*(
|
||||||
self: MuxedUpgrade,
|
self: MuxedUpgrade,
|
||||||
conn: Connection,
|
conn: Connection,
|
||||||
direction: Direction,
|
|
||||||
peerId: Opt[PeerId]): Future[Muxer] {.async.} =
|
peerId: Opt[PeerId]): Future[Muxer] {.async.} =
|
||||||
trace "Upgrading connection", conn, direction
|
trace "Upgrading connection", conn, direction = conn.dir
|
||||||
|
|
||||||
let sconn = await self.secure(conn, direction, peerId) # secure the connection
|
let sconn = await self.secure(conn, peerId) # secure the connection
|
||||||
if isNil(sconn):
|
if isNil(sconn):
|
||||||
raise newException(UpgradeFailedError,
|
raise newException(UpgradeFailedError,
|
||||||
"unable to secure connection, stopping upgrade")
|
"unable to secure connection, stopping upgrade")
|
||||||
|
|
||||||
let muxer = await self.mux(sconn, direction) # mux it if possible
|
let muxer = await self.mux(sconn) # mux it if possible
|
||||||
if muxer == nil:
|
if muxer == nil:
|
||||||
raise newException(UpgradeFailedError,
|
raise newException(UpgradeFailedError,
|
||||||
"a muxer is required for outgoing connections")
|
"a muxer is required for outgoing connections")
|
||||||
@ -84,7 +82,7 @@ method upgrade*(
|
|||||||
raise newException(UpgradeFailedError,
|
raise newException(UpgradeFailedError,
|
||||||
"Connection closed or missing peer info, stopping upgrade")
|
"Connection closed or missing peer info, stopping upgrade")
|
||||||
|
|
||||||
trace "Upgraded connection", conn, sconn, direction
|
trace "Upgraded connection", conn, sconn, direction = conn.dir
|
||||||
return muxer
|
return muxer
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
@ -98,8 +96,7 @@ proc new*(
|
|||||||
secureManagers: @secureManagers,
|
secureManagers: @secureManagers,
|
||||||
ms: ms)
|
ms: ms)
|
||||||
|
|
||||||
upgrader.streamHandler = proc(conn: Connection)
|
upgrader.streamHandler = proc(conn: Connection) {.async.} =
|
||||||
{.async, gcsafe, raises: [].} =
|
|
||||||
trace "Starting stream handler", conn
|
trace "Starting stream handler", conn
|
||||||
try:
|
try:
|
||||||
await upgrader.ms.handle(conn) # handle incoming connection
|
await upgrader.ms.handle(conn) # handle incoming connection
|
||||||
|
@ -40,20 +40,18 @@ type
|
|||||||
method upgrade*(
|
method upgrade*(
|
||||||
self: Upgrade,
|
self: Upgrade,
|
||||||
conn: Connection,
|
conn: Connection,
|
||||||
direction: Direction,
|
|
||||||
peerId: Opt[PeerId]): Future[Muxer] {.base.} =
|
peerId: Opt[PeerId]): Future[Muxer] {.base.} =
|
||||||
doAssert(false, "Not implemented!")
|
doAssert(false, "Not implemented!")
|
||||||
|
|
||||||
proc secure*(
|
proc secure*(
|
||||||
self: Upgrade,
|
self: Upgrade,
|
||||||
conn: Connection,
|
conn: Connection,
|
||||||
direction: Direction,
|
peerId: Opt[PeerId]): Future[Connection] {.async.} =
|
||||||
peerId: Opt[PeerId]): Future[Connection] {.async, gcsafe.} =
|
|
||||||
if self.secureManagers.len <= 0:
|
if self.secureManagers.len <= 0:
|
||||||
raise newException(UpgradeFailedError, "No secure managers registered!")
|
raise newException(UpgradeFailedError, "No secure managers registered!")
|
||||||
|
|
||||||
let codec =
|
let codec =
|
||||||
if direction == Out: await self.ms.select(conn, self.secureManagers.mapIt(it.codec))
|
if conn.dir == Out: await self.ms.select(conn, self.secureManagers.mapIt(it.codec))
|
||||||
else: await MultistreamSelect.handle(conn, self.secureManagers.mapIt(it.codec))
|
else: await MultistreamSelect.handle(conn, self.secureManagers.mapIt(it.codec))
|
||||||
if codec.len == 0:
|
if codec.len == 0:
|
||||||
raise newException(UpgradeFailedError, "Unable to negotiate a secure channel!")
|
raise newException(UpgradeFailedError, "Unable to negotiate a secure channel!")
|
||||||
@ -65,4 +63,4 @@ proc secure*(
|
|||||||
# let's avoid duplicating checks but detect if it fails to do it properly
|
# let's avoid duplicating checks but detect if it fails to do it properly
|
||||||
doAssert(secureProtocol.len > 0)
|
doAssert(secureProtocol.len > 0)
|
||||||
|
|
||||||
return await secureProtocol[0].secure(conn, direction == Out, peerId)
|
return await secureProtocol[0].secure(conn, peerId)
|
||||||
|
@ -89,8 +89,27 @@ template exceptionToAssert*(body: untyped): untyped =
|
|||||||
res
|
res
|
||||||
|
|
||||||
template withValue*[T](self: Opt[T] | Option[T], value, body: untyped): untyped =
|
template withValue*[T](self: Opt[T] | Option[T], value, body: untyped): untyped =
|
||||||
if self.isSome:
|
## This template provides a convenient way to work with `Option` types in Nim.
|
||||||
let value {.inject.} = self.get()
|
## It allows you to execute a block of code (`body`) only when the `Option` is not empty.
|
||||||
|
##
|
||||||
|
## `self` is the `Option` instance being checked.
|
||||||
|
## `value` is the variable name to be used within the `body` for the unwrapped value.
|
||||||
|
## `body` is a block of code that is executed only if `self` contains a value.
|
||||||
|
##
|
||||||
|
## The `value` within `body` is automatically unwrapped from the `Option`, making it
|
||||||
|
## simpler to work with without needing explicit checks or unwrapping.
|
||||||
|
##
|
||||||
|
## Example:
|
||||||
|
## ```nim
|
||||||
|
## let myOpt = Opt.some(5)
|
||||||
|
## myOpt.withValue(value):
|
||||||
|
## echo value # Will print 5
|
||||||
|
## ```
|
||||||
|
##
|
||||||
|
## Note: This is a template, and it will be inlined at the call site, offering good performance.
|
||||||
|
let temp = (self)
|
||||||
|
if temp.isSome:
|
||||||
|
let value {.inject.} = temp.get()
|
||||||
body
|
body
|
||||||
|
|
||||||
macro withValue*[T](self: Opt[T] | Option[T], value, body, body2: untyped): untyped =
|
macro withValue*[T](self: Opt[T] | Option[T], value, body, body2: untyped): untyped =
|
||||||
|
@ -5,21 +5,21 @@ export unittest2, chronos
|
|||||||
template asyncTeardown*(body: untyped): untyped =
|
template asyncTeardown*(body: untyped): untyped =
|
||||||
teardown:
|
teardown:
|
||||||
waitFor((
|
waitFor((
|
||||||
proc() {.async, gcsafe.} =
|
proc() {.async.} =
|
||||||
body
|
body
|
||||||
)())
|
)())
|
||||||
|
|
||||||
template asyncSetup*(body: untyped): untyped =
|
template asyncSetup*(body: untyped): untyped =
|
||||||
setup:
|
setup:
|
||||||
waitFor((
|
waitFor((
|
||||||
proc() {.async, gcsafe.} =
|
proc() {.async.} =
|
||||||
body
|
body
|
||||||
)())
|
)())
|
||||||
|
|
||||||
template asyncTest*(name: string, body: untyped): untyped =
|
template asyncTest*(name: string, body: untyped): untyped =
|
||||||
test name:
|
test name:
|
||||||
waitFor((
|
waitFor((
|
||||||
proc() {.async, gcsafe.} =
|
proc() {.async.} =
|
||||||
body
|
body
|
||||||
)())
|
)())
|
||||||
|
|
||||||
@ -31,7 +31,7 @@ template flakyAsyncTest*(name: string, attempts: int, body: untyped): untyped =
|
|||||||
inc attemptNumber
|
inc attemptNumber
|
||||||
try:
|
try:
|
||||||
waitFor((
|
waitFor((
|
||||||
proc() {.async, gcsafe.} =
|
proc() {.async.} =
|
||||||
body
|
body
|
||||||
)())
|
)())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -20,7 +20,7 @@ proc writeLp(s: StreamTransport, msg: string | seq[byte]): Future[int] {.gcsafe.
|
|||||||
buf.finish()
|
buf.finish()
|
||||||
result = s.write(buf.buffer)
|
result = s.write(buf.buffer)
|
||||||
|
|
||||||
proc readLp(s: StreamTransport): Future[seq[byte]] {.async, gcsafe.} =
|
proc readLp(s: StreamTransport): Future[seq[byte]] {.async.} =
|
||||||
## read length prefixed msg
|
## read length prefixed msg
|
||||||
var
|
var
|
||||||
size: uint
|
size: uint
|
||||||
|
@ -30,7 +30,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
|
|||||||
|
|
||||||
let transport2 = transpProvider()
|
let transport2 = transpProvider()
|
||||||
|
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
if conn.observedAddr.isSome():
|
if conn.observedAddr.isSome():
|
||||||
check transport1.handles(conn.observedAddr.get())
|
check transport1.handles(conn.observedAddr.get())
|
||||||
@ -58,7 +58,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
|
|||||||
let transport1 = transpProvider()
|
let transport1 = transpProvider()
|
||||||
await transport1.start(ma)
|
await transport1.start(ma)
|
||||||
|
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
await conn.write("Hello!")
|
await conn.write("Hello!")
|
||||||
await conn.close()
|
await conn.close()
|
||||||
@ -85,7 +85,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
|
|||||||
let transport1 = transpProvider()
|
let transport1 = transpProvider()
|
||||||
await transport1.start(ma)
|
await transport1.start(ma)
|
||||||
|
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
var msg = newSeq[byte](6)
|
var msg = newSeq[byte](6)
|
||||||
await conn.readExactly(addr msg[0], 6)
|
await conn.readExactly(addr msg[0], 6)
|
||||||
@ -147,7 +147,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
|
|||||||
let transport1 = transpProvider()
|
let transport1 = transpProvider()
|
||||||
await transport1.start(addrs)
|
await transport1.start(addrs)
|
||||||
|
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
while true:
|
while true:
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
await conn.write(newSeq[byte](0))
|
await conn.write(newSeq[byte](0))
|
||||||
@ -214,7 +214,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
|
|||||||
let transport1 = transpProvider()
|
let transport1 = transpProvider()
|
||||||
await transport1.start(ma)
|
await transport1.start(ma)
|
||||||
|
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
await conn.close()
|
await conn.close()
|
||||||
|
|
||||||
|
@ -111,7 +111,7 @@ proc bridgedConnections*: (Connection, Connection) =
|
|||||||
return (connA, connB)
|
return (connA, connB)
|
||||||
|
|
||||||
|
|
||||||
proc checkExpiringInternal(cond: proc(): bool {.raises: [], gcsafe.} ): Future[bool] {.async, gcsafe.} =
|
proc checkExpiringInternal(cond: proc(): bool {.raises: [], gcsafe.} ): Future[bool] {.async.} =
|
||||||
let start = Moment.now()
|
let start = Moment.now()
|
||||||
while true:
|
while true:
|
||||||
if Moment.now() > (start + chronos.seconds(5)):
|
if Moment.now() > (start + chronos.seconds(5)):
|
||||||
@ -146,8 +146,8 @@ proc default*(T: typedesc[MockResolver]): T =
|
|||||||
resolver.ipResponses[("localhost", true)] = @["::1"]
|
resolver.ipResponses[("localhost", true)] = @["::1"]
|
||||||
resolver
|
resolver
|
||||||
|
|
||||||
proc setDNSAddr*(switch: Switch) {.gcsafe, async.} =
|
proc setDNSAddr*(switch: Switch) {.async.} =
|
||||||
proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||||
return @[MultiAddress.init("/dns4/localhost/").tryGet() & listenAddrs[0][1].tryGet()]
|
return @[MultiAddress.init("/dns4/localhost/").tryGet() & listenAddrs[0][1].tryGet()]
|
||||||
switch.peerInfo.addressMappers.add(addressMapper)
|
switch.peerInfo.addressMappers.add(addressMapper)
|
||||||
await switch.peerInfo.update()
|
await switch.peerInfo.update()
|
||||||
|
17
tests/hole-punching-interop/Dockerfile
Normal file
17
tests/hole-punching-interop/Dockerfile
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# syntax=docker/dockerfile:1.5-labs
|
||||||
|
FROM nimlang/nim:1.6.14 as builder
|
||||||
|
|
||||||
|
WORKDIR /workspace
|
||||||
|
|
||||||
|
COPY .pinned libp2p.nimble nim-libp2p/
|
||||||
|
|
||||||
|
RUN cd nim-libp2p && nimble install_pinned && nimble install redis -y
|
||||||
|
|
||||||
|
COPY . nim-libp2p/
|
||||||
|
|
||||||
|
RUN cd nim-libp2p && nim c --skipParentCfg --NimblePath:./nimbledeps/pkgs -d:chronicles_log_level=DEBUG -d:chronicles_default_output_device=stderr -d:release --threads:off --skipProjCfg -o:hole-punching-tests ./tests/hole-punching-interop/hole_punching.nim
|
||||||
|
|
||||||
|
FROM --platform=linux/amd64 debian:bookworm-slim
|
||||||
|
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y dnsutils jq curl tcpdump iproute2
|
||||||
|
COPY --from=builder /workspace/nim-libp2p/hole-punching-tests /usr/bin/hole-punch-client
|
||||||
|
ENV RUST_BACKTRACE=1
|
114
tests/hole-punching-interop/hole_punching.nim
Normal file
114
tests/hole-punching-interop/hole_punching.nim
Normal file
@ -0,0 +1,114 @@
|
|||||||
|
import std/[os, options, strformat]
|
||||||
|
import redis
|
||||||
|
import chronos, chronicles
|
||||||
|
import ../../libp2p/[builders,
|
||||||
|
switch,
|
||||||
|
observedaddrmanager,
|
||||||
|
services/hpservice,
|
||||||
|
services/autorelayservice,
|
||||||
|
protocols/connectivity/autonat/client as aclient,
|
||||||
|
protocols/connectivity/relay/client as rclient,
|
||||||
|
protocols/connectivity/relay/relay,
|
||||||
|
protocols/connectivity/autonat/service,
|
||||||
|
protocols/ping]
|
||||||
|
import ../stubs/autonatclientstub
|
||||||
|
|
||||||
|
proc createSwitch(r: Relay = nil, hpService: Service = nil): Switch =
|
||||||
|
let rng = newRng()
|
||||||
|
var builder = SwitchBuilder.new()
|
||||||
|
.withRng(rng)
|
||||||
|
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
|
||||||
|
.withObservedAddrManager(ObservedAddrManager.new(maxSize = 1, minCount = 1))
|
||||||
|
.withTcpTransport({ServerFlags.TcpNoDelay})
|
||||||
|
.withYamux()
|
||||||
|
.withAutonat()
|
||||||
|
.withNoise()
|
||||||
|
|
||||||
|
if hpService != nil:
|
||||||
|
builder = builder.withServices(@[hpService])
|
||||||
|
|
||||||
|
if r != nil:
|
||||||
|
builder = builder.withCircuitRelay(r)
|
||||||
|
|
||||||
|
let s = builder.build()
|
||||||
|
s.mount(Ping.new(rng=rng))
|
||||||
|
return s
|
||||||
|
|
||||||
|
proc main() {.async.} =
|
||||||
|
try:
|
||||||
|
let relayClient = RelayClient.new()
|
||||||
|
let autoRelayService = AutoRelayService.new(1, relayClient, nil, newRng())
|
||||||
|
let autonatClientStub = AutonatClientStub.new(expectedDials = 1)
|
||||||
|
autonatClientStub.answer = NotReachable
|
||||||
|
let autonatService = AutonatService.new(autonatClientStub, newRng(), maxQueueSize = 1)
|
||||||
|
let hpservice = HPService.new(autonatService, autoRelayService)
|
||||||
|
|
||||||
|
let
|
||||||
|
isListener = getEnv("MODE") == "listen"
|
||||||
|
switch = createSwitch(relayClient, hpservice)
|
||||||
|
auxSwitch = createSwitch()
|
||||||
|
redisClient = open("redis", 6379.Port)
|
||||||
|
|
||||||
|
debug "Connected to redis"
|
||||||
|
|
||||||
|
await switch.start()
|
||||||
|
await auxSwitch.start()
|
||||||
|
|
||||||
|
let relayAddr =
|
||||||
|
try:
|
||||||
|
redisClient.bLPop(@["RELAY_TCP_ADDRESS"], 0)
|
||||||
|
except Exception as e:
|
||||||
|
raise newException(CatchableError, e.msg)
|
||||||
|
|
||||||
|
# This is necessary to make the autonat service work. It will ask this peer for our reachability which the autonat
|
||||||
|
# client stub will answer NotReachable.
|
||||||
|
await switch.connect(auxSwitch.peerInfo.peerId, auxSwitch.peerInfo.addrs)
|
||||||
|
|
||||||
|
# Wait for autonat to be NotReachable
|
||||||
|
while autonatService.networkReachability != NetworkReachability.NotReachable:
|
||||||
|
await sleepAsync(100.milliseconds)
|
||||||
|
|
||||||
|
# This will trigger the autonat relay service to make a reservation.
|
||||||
|
let relayMA = MultiAddress.init(relayAddr[1]).tryGet()
|
||||||
|
debug "Got relay address", relayMA
|
||||||
|
let relayId = await switch.connect(relayMA)
|
||||||
|
debug "Connected to relay", relayId
|
||||||
|
|
||||||
|
# Wait for our relay address to be published
|
||||||
|
while switch.peerInfo.addrs.len == 0:
|
||||||
|
await sleepAsync(100.milliseconds)
|
||||||
|
|
||||||
|
if isListener:
|
||||||
|
let listenerPeerId = switch.peerInfo.peerId
|
||||||
|
discard redisClient.rPush("LISTEN_CLIENT_PEER_ID", $listenerPeerId)
|
||||||
|
debug "Pushed listener client peer id to redis", listenerPeerId
|
||||||
|
|
||||||
|
# Nothing to do anymore, wait to be killed
|
||||||
|
await sleepAsync(2.minutes)
|
||||||
|
else:
|
||||||
|
let listenerId =
|
||||||
|
try:
|
||||||
|
PeerId.init(redisClient.bLPop(@["LISTEN_CLIENT_PEER_ID"], 0)[1]).tryGet()
|
||||||
|
except Exception as e:
|
||||||
|
raise newException(CatchableError, e.msg)
|
||||||
|
|
||||||
|
debug "Got listener peer id", listenerId
|
||||||
|
let listenerRelayAddr = MultiAddress.init($relayMA & "/p2p-circuit").tryGet()
|
||||||
|
|
||||||
|
debug "Dialing listener relay address", listenerRelayAddr
|
||||||
|
await switch.connect(listenerId, @[listenerRelayAddr])
|
||||||
|
|
||||||
|
# wait for hole-punching to complete in the background
|
||||||
|
await sleepAsync(5000.milliseconds)
|
||||||
|
|
||||||
|
let conn = switch.connManager.selectMuxer(listenerId).connection
|
||||||
|
let channel = await switch.dial(listenerId, @[listenerRelayAddr], PingCodec)
|
||||||
|
let delay = await Ping.new().ping(channel)
|
||||||
|
await allFuturesThrowing(channel.close(), conn.close(), switch.stop(), auxSwitch.stop())
|
||||||
|
echo &"""{{"rtt_to_holepunched_peer_millis":{delay.millis}}}"""
|
||||||
|
quit(0)
|
||||||
|
except CatchableError as e:
|
||||||
|
error "Unexpected error", msg = e.msg
|
||||||
|
|
||||||
|
discard waitFor(main().withTimeout(4.minutes))
|
||||||
|
quit(1)
|
7
tests/hole-punching-interop/version.json
Normal file
7
tests/hole-punching-interop/version.json
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
{
|
||||||
|
"id": "nim-libp2p-head",
|
||||||
|
"containerImageID": "nim-libp2p-head",
|
||||||
|
"transports": [
|
||||||
|
"tcp"
|
||||||
|
]
|
||||||
|
}
|
@ -26,7 +26,7 @@ import ../../libp2p/protocols/pubsub/errors as pubsub_errors
|
|||||||
|
|
||||||
import ../helpers
|
import ../helpers
|
||||||
|
|
||||||
proc waitSub(sender, receiver: auto; key: string) {.async, gcsafe.} =
|
proc waitSub(sender, receiver: auto; key: string) {.async.} =
|
||||||
# turn things deterministic
|
# turn things deterministic
|
||||||
# this is for testing purposes only
|
# this is for testing purposes only
|
||||||
var ceil = 15
|
var ceil = 15
|
||||||
@ -43,7 +43,7 @@ suite "FloodSub":
|
|||||||
|
|
||||||
asyncTest "FloodSub basic publish/subscribe A -> B":
|
asyncTest "FloodSub basic publish/subscribe A -> B":
|
||||||
var completionFut = newFuture[bool]()
|
var completionFut = newFuture[bool]()
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||||
check topic == "foobar"
|
check topic == "foobar"
|
||||||
completionFut.complete(true)
|
completionFut.complete(true)
|
||||||
|
|
||||||
@ -81,7 +81,7 @@ suite "FloodSub":
|
|||||||
|
|
||||||
asyncTest "FloodSub basic publish/subscribe B -> A":
|
asyncTest "FloodSub basic publish/subscribe B -> A":
|
||||||
var completionFut = newFuture[bool]()
|
var completionFut = newFuture[bool]()
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||||
check topic == "foobar"
|
check topic == "foobar"
|
||||||
completionFut.complete(true)
|
completionFut.complete(true)
|
||||||
|
|
||||||
@ -113,7 +113,7 @@ suite "FloodSub":
|
|||||||
|
|
||||||
asyncTest "FloodSub validation should succeed":
|
asyncTest "FloodSub validation should succeed":
|
||||||
var handlerFut = newFuture[bool]()
|
var handlerFut = newFuture[bool]()
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||||
check topic == "foobar"
|
check topic == "foobar"
|
||||||
handlerFut.complete(true)
|
handlerFut.complete(true)
|
||||||
|
|
||||||
@ -151,7 +151,7 @@ suite "FloodSub":
|
|||||||
await allFuturesThrowing(nodesFut)
|
await allFuturesThrowing(nodesFut)
|
||||||
|
|
||||||
asyncTest "FloodSub validation should fail":
|
asyncTest "FloodSub validation should fail":
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||||
check false # if we get here, it should fail
|
check false # if we get here, it should fail
|
||||||
|
|
||||||
let
|
let
|
||||||
@ -186,7 +186,7 @@ suite "FloodSub":
|
|||||||
|
|
||||||
asyncTest "FloodSub validation one fails and one succeeds":
|
asyncTest "FloodSub validation one fails and one succeeds":
|
||||||
var handlerFut = newFuture[bool]()
|
var handlerFut = newFuture[bool]()
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||||
check topic == "foo"
|
check topic == "foo"
|
||||||
handlerFut.complete(true)
|
handlerFut.complete(true)
|
||||||
|
|
||||||
@ -235,7 +235,7 @@ suite "FloodSub":
|
|||||||
counter = new int
|
counter = new int
|
||||||
futs[i] = (
|
futs[i] = (
|
||||||
fut,
|
fut,
|
||||||
(proc(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
(proc(topic: string, data: seq[byte]) {.async.} =
|
||||||
check topic == "foobar"
|
check topic == "foobar"
|
||||||
inc counter[]
|
inc counter[]
|
||||||
if counter[] == runs - 1:
|
if counter[] == runs - 1:
|
||||||
@ -283,7 +283,7 @@ suite "FloodSub":
|
|||||||
counter = new int
|
counter = new int
|
||||||
futs[i] = (
|
futs[i] = (
|
||||||
fut,
|
fut,
|
||||||
(proc(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
(proc(topic: string, data: seq[byte]) {.async.} =
|
||||||
check topic == "foobar"
|
check topic == "foobar"
|
||||||
inc counter[]
|
inc counter[]
|
||||||
if counter[] == runs - 1:
|
if counter[] == runs - 1:
|
||||||
@ -333,7 +333,7 @@ suite "FloodSub":
|
|||||||
|
|
||||||
asyncTest "FloodSub message size validation":
|
asyncTest "FloodSub message size validation":
|
||||||
var messageReceived = 0
|
var messageReceived = 0
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||||
check data.len < 50
|
check data.len < 50
|
||||||
inc(messageReceived)
|
inc(messageReceived)
|
||||||
|
|
||||||
@ -375,7 +375,7 @@ suite "FloodSub":
|
|||||||
|
|
||||||
asyncTest "FloodSub message size validation 2":
|
asyncTest "FloodSub message size validation 2":
|
||||||
var messageReceived = 0
|
var messageReceived = 0
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||||
inc(messageReceived)
|
inc(messageReceived)
|
||||||
|
|
||||||
let
|
let
|
||||||
|
@ -24,7 +24,7 @@ import utils
|
|||||||
|
|
||||||
import ../helpers
|
import ../helpers
|
||||||
|
|
||||||
proc noop(data: seq[byte]) {.async, gcsafe.} = discard
|
proc noop(data: seq[byte]) {.async.} = discard
|
||||||
|
|
||||||
const MsgIdSuccess = "msg id gen success"
|
const MsgIdSuccess = "msg id gen success"
|
||||||
|
|
||||||
@ -730,10 +730,10 @@ suite "GossipSub internal":
|
|||||||
|
|
||||||
var receivedMessages = new(HashSet[seq[byte]])
|
var receivedMessages = new(HashSet[seq[byte]])
|
||||||
|
|
||||||
proc handlerA(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handlerA(topic: string, data: seq[byte]) {.async.} =
|
||||||
receivedMessages[].incl(data)
|
receivedMessages[].incl(data)
|
||||||
|
|
||||||
proc handlerB(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handlerB(topic: string, data: seq[byte]) {.async.} =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
nodes[0].subscribe("foobar", handlerA)
|
nodes[0].subscribe("foobar", handlerA)
|
||||||
|
@ -47,7 +47,7 @@ suite "GossipSub":
|
|||||||
|
|
||||||
asyncTest "GossipSub validation should succeed":
|
asyncTest "GossipSub validation should succeed":
|
||||||
var handlerFut = newFuture[bool]()
|
var handlerFut = newFuture[bool]()
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||||
check topic == "foobar"
|
check topic == "foobar"
|
||||||
handlerFut.complete(true)
|
handlerFut.complete(true)
|
||||||
|
|
||||||
@ -92,7 +92,7 @@ suite "GossipSub":
|
|||||||
await allFuturesThrowing(nodesFut.concat())
|
await allFuturesThrowing(nodesFut.concat())
|
||||||
|
|
||||||
asyncTest "GossipSub validation should fail (reject)":
|
asyncTest "GossipSub validation should fail (reject)":
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||||
check false # if we get here, it should fail
|
check false # if we get here, it should fail
|
||||||
|
|
||||||
let
|
let
|
||||||
@ -138,7 +138,7 @@ suite "GossipSub":
|
|||||||
await allFuturesThrowing(nodesFut.concat())
|
await allFuturesThrowing(nodesFut.concat())
|
||||||
|
|
||||||
asyncTest "GossipSub validation should fail (ignore)":
|
asyncTest "GossipSub validation should fail (ignore)":
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||||
check false # if we get here, it should fail
|
check false # if we get here, it should fail
|
||||||
|
|
||||||
let
|
let
|
||||||
@ -185,7 +185,7 @@ suite "GossipSub":
|
|||||||
|
|
||||||
asyncTest "GossipSub validation one fails and one succeeds":
|
asyncTest "GossipSub validation one fails and one succeeds":
|
||||||
var handlerFut = newFuture[bool]()
|
var handlerFut = newFuture[bool]()
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||||
check topic == "foo"
|
check topic == "foo"
|
||||||
handlerFut.complete(true)
|
handlerFut.complete(true)
|
||||||
|
|
||||||
@ -238,7 +238,7 @@ suite "GossipSub":
|
|||||||
|
|
||||||
asyncTest "GossipSub unsub - resub faster than backoff":
|
asyncTest "GossipSub unsub - resub faster than backoff":
|
||||||
var handlerFut = newFuture[bool]()
|
var handlerFut = newFuture[bool]()
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||||
check topic == "foobar"
|
check topic == "foobar"
|
||||||
handlerFut.complete(true)
|
handlerFut.complete(true)
|
||||||
|
|
||||||
@ -289,7 +289,7 @@ suite "GossipSub":
|
|||||||
await allFuturesThrowing(nodesFut.concat())
|
await allFuturesThrowing(nodesFut.concat())
|
||||||
|
|
||||||
asyncTest "e2e - GossipSub should add remote peer topic subscriptions":
|
asyncTest "e2e - GossipSub should add remote peer topic subscriptions":
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
let
|
let
|
||||||
@ -323,7 +323,7 @@ suite "GossipSub":
|
|||||||
await allFuturesThrowing(nodesFut.concat())
|
await allFuturesThrowing(nodesFut.concat())
|
||||||
|
|
||||||
asyncTest "e2e - GossipSub should add remote peer topic subscriptions if both peers are subscribed":
|
asyncTest "e2e - GossipSub should add remote peer topic subscriptions if both peers are subscribed":
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
let
|
let
|
||||||
@ -374,7 +374,7 @@ suite "GossipSub":
|
|||||||
|
|
||||||
asyncTest "e2e - GossipSub send over fanout A -> B":
|
asyncTest "e2e - GossipSub send over fanout A -> B":
|
||||||
var passed = newFuture[void]()
|
var passed = newFuture[void]()
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||||
check topic == "foobar"
|
check topic == "foobar"
|
||||||
passed.complete()
|
passed.complete()
|
||||||
|
|
||||||
@ -428,7 +428,7 @@ suite "GossipSub":
|
|||||||
|
|
||||||
asyncTest "e2e - GossipSub send over fanout A -> B for subscribed topic":
|
asyncTest "e2e - GossipSub send over fanout A -> B for subscribed topic":
|
||||||
var passed = newFuture[void]()
|
var passed = newFuture[void]()
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||||
check topic == "foobar"
|
check topic == "foobar"
|
||||||
passed.complete()
|
passed.complete()
|
||||||
|
|
||||||
@ -481,7 +481,7 @@ suite "GossipSub":
|
|||||||
|
|
||||||
asyncTest "e2e - GossipSub send over mesh A -> B":
|
asyncTest "e2e - GossipSub send over mesh A -> B":
|
||||||
var passed: Future[bool] = newFuture[bool]()
|
var passed: Future[bool] = newFuture[bool]()
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||||
check topic == "foobar"
|
check topic == "foobar"
|
||||||
passed.complete(true)
|
passed.complete(true)
|
||||||
|
|
||||||
@ -548,11 +548,11 @@ suite "GossipSub":
|
|||||||
var
|
var
|
||||||
aReceived = 0
|
aReceived = 0
|
||||||
cReceived = 0
|
cReceived = 0
|
||||||
proc handlerA(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handlerA(topic: string, data: seq[byte]) {.async.} =
|
||||||
inc aReceived
|
inc aReceived
|
||||||
check aReceived < 2
|
check aReceived < 2
|
||||||
proc handlerB(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
|
proc handlerB(topic: string, data: seq[byte]) {.async.} = discard
|
||||||
proc handlerC(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handlerC(topic: string, data: seq[byte]) {.async.} =
|
||||||
inc cReceived
|
inc cReceived
|
||||||
check cReceived < 2
|
check cReceived < 2
|
||||||
cRelayed.complete()
|
cRelayed.complete()
|
||||||
@ -596,7 +596,7 @@ suite "GossipSub":
|
|||||||
|
|
||||||
asyncTest "e2e - GossipSub send over floodPublish A -> B":
|
asyncTest "e2e - GossipSub send over floodPublish A -> B":
|
||||||
var passed: Future[bool] = newFuture[bool]()
|
var passed: Future[bool] = newFuture[bool]()
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||||
check topic == "foobar"
|
check topic == "foobar"
|
||||||
passed.complete(true)
|
passed.complete(true)
|
||||||
|
|
||||||
@ -653,7 +653,7 @@ suite "GossipSub":
|
|||||||
)
|
)
|
||||||
|
|
||||||
proc connectNodes(nodes: seq[PubSub], target: PubSub) {.async.} =
|
proc connectNodes(nodes: seq[PubSub], target: PubSub) {.async.} =
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||||
check topic == "foobar"
|
check topic == "foobar"
|
||||||
|
|
||||||
for node in nodes:
|
for node in nodes:
|
||||||
@ -661,7 +661,7 @@ suite "GossipSub":
|
|||||||
await node.switch.connect(target.peerInfo.peerId, target.peerInfo.addrs)
|
await node.switch.connect(target.peerInfo.peerId, target.peerInfo.addrs)
|
||||||
|
|
||||||
proc baseTestProcedure(nodes: seq[PubSub], gossip1: GossipSub, numPeersFirstMsg: int, numPeersSecondMsg: int) {.async.} =
|
proc baseTestProcedure(nodes: seq[PubSub], gossip1: GossipSub, numPeersFirstMsg: int, numPeersSecondMsg: int) {.async.} =
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||||
check topic == "foobar"
|
check topic == "foobar"
|
||||||
|
|
||||||
block setup:
|
block setup:
|
||||||
@ -727,7 +727,7 @@ suite "GossipSub":
|
|||||||
var handler: TopicHandler
|
var handler: TopicHandler
|
||||||
closureScope:
|
closureScope:
|
||||||
var peerName = $dialer.peerInfo.peerId
|
var peerName = $dialer.peerInfo.peerId
|
||||||
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
|
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
|
||||||
if peerName notin seen:
|
if peerName notin seen:
|
||||||
seen[peerName] = 0
|
seen[peerName] = 0
|
||||||
seen[peerName].inc
|
seen[peerName].inc
|
||||||
@ -778,7 +778,7 @@ suite "GossipSub":
|
|||||||
var handler: TopicHandler
|
var handler: TopicHandler
|
||||||
capture dialer, i:
|
capture dialer, i:
|
||||||
var peerName = $dialer.peerInfo.peerId
|
var peerName = $dialer.peerInfo.peerId
|
||||||
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
|
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
|
||||||
if peerName notin seen:
|
if peerName notin seen:
|
||||||
seen[peerName] = 0
|
seen[peerName] = 0
|
||||||
seen[peerName].inc
|
seen[peerName].inc
|
||||||
@ -819,7 +819,7 @@ suite "GossipSub":
|
|||||||
# PX to A & C
|
# PX to A & C
|
||||||
#
|
#
|
||||||
# C sent his SPR, not A
|
# C sent his SPR, not A
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||||
discard # not used in this test
|
discard # not used in this test
|
||||||
|
|
||||||
let
|
let
|
||||||
@ -895,9 +895,9 @@ suite "GossipSub":
|
|||||||
await nodes[1].switch.connect(nodes[2].switch.peerInfo.peerId, nodes[2].switch.peerInfo.addrs)
|
await nodes[1].switch.connect(nodes[2].switch.peerInfo.peerId, nodes[2].switch.peerInfo.addrs)
|
||||||
|
|
||||||
let bFinished = newFuture[void]()
|
let bFinished = newFuture[void]()
|
||||||
proc handlerA(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
|
proc handlerA(topic: string, data: seq[byte]) {.async.} = discard
|
||||||
proc handlerB(topic: string, data: seq[byte]) {.async, gcsafe.} = bFinished.complete()
|
proc handlerB(topic: string, data: seq[byte]) {.async.} = bFinished.complete()
|
||||||
proc handlerC(topic: string, data: seq[byte]) {.async, gcsafe.} = doAssert false
|
proc handlerC(topic: string, data: seq[byte]) {.async.} = doAssert false
|
||||||
|
|
||||||
nodes[0].subscribe("foobar", handlerA)
|
nodes[0].subscribe("foobar", handlerA)
|
||||||
nodes[1].subscribe("foobar", handlerB)
|
nodes[1].subscribe("foobar", handlerB)
|
||||||
@ -943,7 +943,7 @@ suite "GossipSub":
|
|||||||
|
|
||||||
await subscribeNodes(nodes)
|
await subscribeNodes(nodes)
|
||||||
|
|
||||||
proc handle(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
|
proc handle(topic: string, data: seq[byte]) {.async.} = discard
|
||||||
|
|
||||||
let gossip0 = GossipSub(nodes[0])
|
let gossip0 = GossipSub(nodes[0])
|
||||||
let gossip1 = GossipSub(nodes[1])
|
let gossip1 = GossipSub(nodes[1])
|
||||||
|
@ -59,7 +59,7 @@ suite "GossipSub":
|
|||||||
var handler: TopicHandler
|
var handler: TopicHandler
|
||||||
closureScope:
|
closureScope:
|
||||||
var peerName = $dialer.peerInfo.peerId
|
var peerName = $dialer.peerInfo.peerId
|
||||||
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
|
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
|
||||||
if peerName notin seen:
|
if peerName notin seen:
|
||||||
seen[peerName] = 0
|
seen[peerName] = 0
|
||||||
seen[peerName].inc
|
seen[peerName].inc
|
||||||
@ -93,7 +93,7 @@ suite "GossipSub":
|
|||||||
|
|
||||||
asyncTest "GossipSub invalid topic subscription":
|
asyncTest "GossipSub invalid topic subscription":
|
||||||
var handlerFut = newFuture[bool]()
|
var handlerFut = newFuture[bool]()
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||||
check topic == "foobar"
|
check topic == "foobar"
|
||||||
handlerFut.complete(true)
|
handlerFut.complete(true)
|
||||||
|
|
||||||
@ -155,7 +155,7 @@ suite "GossipSub":
|
|||||||
# DO NOT SUBSCRIBE, CONNECTION SHOULD HAPPEN
|
# DO NOT SUBSCRIBE, CONNECTION SHOULD HAPPEN
|
||||||
### await subscribeNodes(nodes)
|
### await subscribeNodes(nodes)
|
||||||
|
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
|
proc handler(topic: string, data: seq[byte]) {.async.} = discard
|
||||||
nodes[1].subscribe("foobar", handler)
|
nodes[1].subscribe("foobar", handler)
|
||||||
|
|
||||||
await invalidDetected.wait(10.seconds)
|
await invalidDetected.wait(10.seconds)
|
||||||
@ -182,10 +182,10 @@ suite "GossipSub":
|
|||||||
await GossipSub(nodes[2]).addDirectPeer(nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs)
|
await GossipSub(nodes[2]).addDirectPeer(nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs)
|
||||||
|
|
||||||
var handlerFut = newFuture[void]()
|
var handlerFut = newFuture[void]()
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||||
check topic == "foobar"
|
check topic == "foobar"
|
||||||
handlerFut.complete()
|
handlerFut.complete()
|
||||||
proc noop(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc noop(topic: string, data: seq[byte]) {.async.} =
|
||||||
check topic == "foobar"
|
check topic == "foobar"
|
||||||
|
|
||||||
nodes[0].subscribe("foobar", noop)
|
nodes[0].subscribe("foobar", noop)
|
||||||
@ -226,7 +226,7 @@ suite "GossipSub":
|
|||||||
GossipSub(nodes[1]).parameters.graylistThreshold = 100000
|
GossipSub(nodes[1]).parameters.graylistThreshold = 100000
|
||||||
|
|
||||||
var handlerFut = newFuture[void]()
|
var handlerFut = newFuture[void]()
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||||
check topic == "foobar"
|
check topic == "foobar"
|
||||||
handlerFut.complete()
|
handlerFut.complete()
|
||||||
|
|
||||||
@ -272,7 +272,7 @@ suite "GossipSub":
|
|||||||
var handler: TopicHandler
|
var handler: TopicHandler
|
||||||
closureScope:
|
closureScope:
|
||||||
var peerName = $dialer.peerInfo.peerId
|
var peerName = $dialer.peerInfo.peerId
|
||||||
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
|
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
|
||||||
if peerName notin seen:
|
if peerName notin seen:
|
||||||
seen[peerName] = 0
|
seen[peerName] = 0
|
||||||
seen[peerName].inc
|
seen[peerName].inc
|
||||||
@ -324,7 +324,7 @@ suite "GossipSub":
|
|||||||
|
|
||||||
# Adding again subscriptions
|
# Adding again subscriptions
|
||||||
|
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||||
check topic == "foobar"
|
check topic == "foobar"
|
||||||
|
|
||||||
for i in 0..<runs:
|
for i in 0..<runs:
|
||||||
@ -368,7 +368,7 @@ suite "GossipSub":
|
|||||||
)
|
)
|
||||||
|
|
||||||
var handlerFut = newFuture[void]()
|
var handlerFut = newFuture[void]()
|
||||||
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||||
handlerFut.complete()
|
handlerFut.complete()
|
||||||
|
|
||||||
await subscribeNodes(nodes)
|
await subscribeNodes(nodes)
|
||||||
|
@ -128,7 +128,7 @@ proc subscribeRandom*(nodes: seq[PubSub]) {.async.} =
|
|||||||
await dialer.switch.connect(node.peerInfo.peerId, node.peerInfo.addrs)
|
await dialer.switch.connect(node.peerInfo.peerId, node.peerInfo.addrs)
|
||||||
dialed.add(node.peerInfo.peerId)
|
dialed.add(node.peerInfo.peerId)
|
||||||
|
|
||||||
proc waitSub*(sender, receiver: auto; key: string) {.async, gcsafe.} =
|
proc waitSub*(sender, receiver: auto; key: string) {.async.} =
|
||||||
if sender == receiver:
|
if sender == receiver:
|
||||||
return
|
return
|
||||||
let timeout = Moment.now() + 5.seconds
|
let timeout = Moment.now() + 5.seconds
|
||||||
@ -148,7 +148,7 @@ proc waitSub*(sender, receiver: auto; key: string) {.async, gcsafe.} =
|
|||||||
await sleepAsync(5.milliseconds)
|
await sleepAsync(5.milliseconds)
|
||||||
doAssert Moment.now() < timeout, "waitSub timeout!"
|
doAssert Moment.now() < timeout, "waitSub timeout!"
|
||||||
|
|
||||||
proc waitSubGraph*(nodes: seq[PubSub], key: string) {.async, gcsafe.} =
|
proc waitSubGraph*(nodes: seq[PubSub], key: string) {.async.} =
|
||||||
let timeout = Moment.now() + 5.seconds
|
let timeout = Moment.now() + 5.seconds
|
||||||
while true:
|
while true:
|
||||||
var
|
var
|
||||||
|
@ -24,7 +24,7 @@ type
|
|||||||
addrs: seq[MultiAddress],
|
addrs: seq[MultiAddress],
|
||||||
forceDial = false,
|
forceDial = false,
|
||||||
reuseConnection = true,
|
reuseConnection = true,
|
||||||
upgradeDir = Direction.Out): Future[void] {.gcsafe, async.}
|
dir = Direction.Out): Future[void] {.async.}
|
||||||
|
|
||||||
method connect*(
|
method connect*(
|
||||||
self: SwitchStub,
|
self: SwitchStub,
|
||||||
@ -32,11 +32,11 @@ method connect*(
|
|||||||
addrs: seq[MultiAddress],
|
addrs: seq[MultiAddress],
|
||||||
forceDial = false,
|
forceDial = false,
|
||||||
reuseConnection = true,
|
reuseConnection = true,
|
||||||
upgradeDir = Direction.Out) {.async.} =
|
dir = Direction.Out) {.async.} =
|
||||||
if (self.connectStub != nil):
|
if (self.connectStub != nil):
|
||||||
await self.connectStub(self, peerId, addrs, forceDial, reuseConnection, upgradeDir)
|
await self.connectStub(self, peerId, addrs, forceDial, reuseConnection, dir)
|
||||||
else:
|
else:
|
||||||
await self.switch.connect(peerId, addrs, forceDial, reuseConnection, upgradeDir)
|
await self.switch.connect(peerId, addrs, forceDial, reuseConnection, dir)
|
||||||
|
|
||||||
proc new*(T: typedesc[SwitchStub], switch: Switch, connectStub: connectStubType = nil): T =
|
proc new*(T: typedesc[SwitchStub], switch: Switch, connectStub: connectStubType = nil): T =
|
||||||
return SwitchStub(
|
return SwitchStub(
|
||||||
|
@ -39,7 +39,7 @@ proc createAutonatSwitch(nameResolver: NameResolver = nil): Switch =
|
|||||||
|
|
||||||
proc makeAutonatServicePrivate(): Switch =
|
proc makeAutonatServicePrivate(): Switch =
|
||||||
var autonatProtocol = new LPProtocol
|
var autonatProtocol = new LPProtocol
|
||||||
autonatProtocol.handler = proc (conn: Connection, proto: string) {.async, gcsafe.} =
|
autonatProtocol.handler = proc (conn: Connection, proto: string) {.async.} =
|
||||||
discard await conn.readLp(1024)
|
discard await conn.readLp(1024)
|
||||||
await conn.writeLp(AutonatDialResponse(
|
await conn.writeLp(AutonatDialResponse(
|
||||||
status: DialError,
|
status: DialError,
|
||||||
|
@ -87,7 +87,7 @@ suite "Autonat Service":
|
|||||||
|
|
||||||
let awaiter = newFuture[void]()
|
let awaiter = newFuture[void]()
|
||||||
|
|
||||||
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
|
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
|
||||||
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() >= 0.3:
|
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() >= 0.3:
|
||||||
if not awaiter.finished:
|
if not awaiter.finished:
|
||||||
awaiter.complete()
|
awaiter.complete()
|
||||||
@ -131,7 +131,7 @@ suite "Autonat Service":
|
|||||||
|
|
||||||
let awaiter = newFuture[void]()
|
let awaiter = newFuture[void]()
|
||||||
|
|
||||||
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
|
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
|
||||||
if networkReachability == NetworkReachability.NotReachable and confidence.isSome() and confidence.get() >= 0.3:
|
if networkReachability == NetworkReachability.NotReachable and confidence.isSome() and confidence.get() >= 0.3:
|
||||||
if not awaiter.finished:
|
if not awaiter.finished:
|
||||||
autonatClientStub.answer = Reachable
|
autonatClientStub.answer = Reachable
|
||||||
@ -173,7 +173,7 @@ suite "Autonat Service":
|
|||||||
|
|
||||||
let awaiter = newFuture[void]()
|
let awaiter = newFuture[void]()
|
||||||
|
|
||||||
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
|
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
|
||||||
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
|
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
|
||||||
if not awaiter.finished:
|
if not awaiter.finished:
|
||||||
awaiter.complete()
|
awaiter.complete()
|
||||||
@ -213,7 +213,7 @@ suite "Autonat Service":
|
|||||||
|
|
||||||
let awaiter = newFuture[void]()
|
let awaiter = newFuture[void]()
|
||||||
|
|
||||||
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
|
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
|
||||||
if networkReachability == NetworkReachability.NotReachable and confidence.isSome() and confidence.get() >= 0.3:
|
if networkReachability == NetworkReachability.NotReachable and confidence.isSome() and confidence.get() >= 0.3:
|
||||||
if not awaiter.finished:
|
if not awaiter.finished:
|
||||||
autonatClientStub.answer = Unknown
|
autonatClientStub.answer = Unknown
|
||||||
@ -267,7 +267,7 @@ suite "Autonat Service":
|
|||||||
|
|
||||||
let awaiter = newFuture[void]()
|
let awaiter = newFuture[void]()
|
||||||
|
|
||||||
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
|
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
|
||||||
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
|
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
|
||||||
if not awaiter.finished:
|
if not awaiter.finished:
|
||||||
awaiter.complete()
|
awaiter.complete()
|
||||||
@ -302,12 +302,12 @@ suite "Autonat Service":
|
|||||||
let awaiter2 = newFuture[void]()
|
let awaiter2 = newFuture[void]()
|
||||||
let awaiter3 = newFuture[void]()
|
let awaiter3 = newFuture[void]()
|
||||||
|
|
||||||
proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
|
proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
|
||||||
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
|
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
|
||||||
if not awaiter1.finished:
|
if not awaiter1.finished:
|
||||||
awaiter1.complete()
|
awaiter1.complete()
|
||||||
|
|
||||||
proc statusAndConfidenceHandler2(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
|
proc statusAndConfidenceHandler2(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
|
||||||
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
|
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
|
||||||
if not awaiter2.finished:
|
if not awaiter2.finished:
|
||||||
awaiter2.complete()
|
awaiter2.complete()
|
||||||
@ -345,7 +345,7 @@ suite "Autonat Service":
|
|||||||
|
|
||||||
let awaiter1 = newFuture[void]()
|
let awaiter1 = newFuture[void]()
|
||||||
|
|
||||||
proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
|
proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
|
||||||
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
|
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
|
||||||
if not awaiter1.finished:
|
if not awaiter1.finished:
|
||||||
awaiter1.complete()
|
awaiter1.complete()
|
||||||
@ -388,7 +388,7 @@ suite "Autonat Service":
|
|||||||
|
|
||||||
var awaiter = newFuture[void]()
|
var awaiter = newFuture[void]()
|
||||||
|
|
||||||
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
|
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
|
||||||
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
|
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
|
||||||
if not awaiter.finished:
|
if not awaiter.finished:
|
||||||
awaiter.complete()
|
awaiter.complete()
|
||||||
@ -428,7 +428,7 @@ suite "Autonat Service":
|
|||||||
let switch1 = createSwitch(autonatService)
|
let switch1 = createSwitch(autonatService)
|
||||||
let switch2 = createSwitch()
|
let switch2 = createSwitch()
|
||||||
|
|
||||||
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
|
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
|
||||||
fail()
|
fail()
|
||||||
|
|
||||||
check autonatService.networkReachability == NetworkReachability.Unknown
|
check autonatService.networkReachability == NetworkReachability.Unknown
|
||||||
|
@ -32,7 +32,7 @@ method newStream*(
|
|||||||
m: TestMuxer,
|
m: TestMuxer,
|
||||||
name: string = "",
|
name: string = "",
|
||||||
lazy: bool = false):
|
lazy: bool = false):
|
||||||
Future[Connection] {.async, gcsafe.} =
|
Future[Connection] {.async.} =
|
||||||
result = Connection.new(m.peerId, Direction.Out, Opt.none(MultiAddress))
|
result = Connection.new(m.peerId, Direction.Out, Opt.none(MultiAddress))
|
||||||
|
|
||||||
suite "Connection Manager":
|
suite "Connection Manager":
|
||||||
|
@ -57,14 +57,15 @@ suite "Dcutr":
|
|||||||
for t in behindNATSwitch.transports:
|
for t in behindNATSwitch.transports:
|
||||||
t.networkReachability = NetworkReachability.NotReachable
|
t.networkReachability = NetworkReachability.NotReachable
|
||||||
|
|
||||||
await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs)
|
expect CatchableError:
|
||||||
.wait(300.millis)
|
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
|
||||||
|
# in two connections attemps, instead of one. This dial is going to fail because the dcutr client is acting as the
|
||||||
|
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case.
|
||||||
|
await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs)
|
||||||
|
.wait(300.millis)
|
||||||
|
|
||||||
checkExpiring:
|
checkExpiring:
|
||||||
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
|
# we still expect a new connection to be open by the receiver peer acting as the dcutr server
|
||||||
# in two connections attemps, instead of one. The server dial is going to fail because it is acting as the
|
|
||||||
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case, but the client
|
|
||||||
# dial will succeed.
|
|
||||||
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2
|
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2
|
||||||
|
|
||||||
await allFutures(behindNATSwitch.stop(), publicSwitch.stop())
|
await allFutures(behindNATSwitch.stop(), publicSwitch.stop())
|
||||||
@ -83,8 +84,8 @@ suite "Dcutr":
|
|||||||
body
|
body
|
||||||
|
|
||||||
checkExpiring:
|
checkExpiring:
|
||||||
# no connection will be open by the receiver peer acting as the dcutr server
|
# we still expect a new connection to be open by the receiver peer acting as the dcutr server
|
||||||
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 1
|
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2
|
||||||
|
|
||||||
await allFutures(behindNATSwitch.stop(), publicSwitch.stop())
|
await allFutures(behindNATSwitch.stop(), publicSwitch.stop())
|
||||||
|
|
||||||
@ -95,7 +96,7 @@ suite "Dcutr":
|
|||||||
addrs: seq[MultiAddress],
|
addrs: seq[MultiAddress],
|
||||||
forceDial = false,
|
forceDial = false,
|
||||||
reuseConnection = true,
|
reuseConnection = true,
|
||||||
upgradeDir = Direction.Out): Future[void] {.async.} =
|
dir = Direction.Out): Future[void] {.async.} =
|
||||||
await sleepAsync(100.millis)
|
await sleepAsync(100.millis)
|
||||||
|
|
||||||
let behindNATSwitch = SwitchStub.new(newStandardSwitch(), connectTimeoutProc)
|
let behindNATSwitch = SwitchStub.new(newStandardSwitch(), connectTimeoutProc)
|
||||||
@ -114,7 +115,7 @@ suite "Dcutr":
|
|||||||
addrs: seq[MultiAddress],
|
addrs: seq[MultiAddress],
|
||||||
forceDial = false,
|
forceDial = false,
|
||||||
reuseConnection = true,
|
reuseConnection = true,
|
||||||
upgradeDir = Direction.Out): Future[void] {.async.} =
|
dir = Direction.Out): Future[void] {.async.} =
|
||||||
raise newException(CatchableError, "error")
|
raise newException(CatchableError, "error")
|
||||||
|
|
||||||
let behindNATSwitch = SwitchStub.new(newStandardSwitch(), connectErrorProc)
|
let behindNATSwitch = SwitchStub.new(newStandardSwitch(), connectErrorProc)
|
||||||
@ -142,13 +143,16 @@ suite "Dcutr":
|
|||||||
for t in behindNATSwitch.transports:
|
for t in behindNATSwitch.transports:
|
||||||
t.networkReachability = NetworkReachability.NotReachable
|
t.networkReachability = NetworkReachability.NotReachable
|
||||||
|
|
||||||
await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs)
|
expect CatchableError:
|
||||||
.wait(300.millis)
|
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
|
||||||
|
# in two connections attemps, instead of one. This dial is going to fail because the dcutr client is acting as the
|
||||||
|
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case.
|
||||||
|
await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs)
|
||||||
|
.wait(300.millis)
|
||||||
|
|
||||||
checkExpiring:
|
checkExpiring:
|
||||||
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
|
# we still expect a new connection to be open by the receiver peer acting as the dcutr server
|
||||||
# in two connections attemps, instead of one. The server dial is going to fail, but the client dial will succeed.
|
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 1
|
||||||
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2
|
|
||||||
|
|
||||||
await allFutures(behindNATSwitch.stop(), publicSwitch.stop())
|
await allFutures(behindNATSwitch.stop(), publicSwitch.stop())
|
||||||
|
|
||||||
@ -159,7 +163,7 @@ suite "Dcutr":
|
|||||||
addrs: seq[MultiAddress],
|
addrs: seq[MultiAddress],
|
||||||
forceDial = false,
|
forceDial = false,
|
||||||
reuseConnection = true,
|
reuseConnection = true,
|
||||||
upgradeDir = Direction.Out): Future[void] {.async.} =
|
dir = Direction.Out): Future[void] {.async.} =
|
||||||
await sleepAsync(100.millis)
|
await sleepAsync(100.millis)
|
||||||
|
|
||||||
await ductrServerTest(connectProc)
|
await ductrServerTest(connectProc)
|
||||||
@ -171,7 +175,23 @@ suite "Dcutr":
|
|||||||
addrs: seq[MultiAddress],
|
addrs: seq[MultiAddress],
|
||||||
forceDial = false,
|
forceDial = false,
|
||||||
reuseConnection = true,
|
reuseConnection = true,
|
||||||
upgradeDir = Direction.Out): Future[void] {.async.} =
|
dir = Direction.Out): Future[void] {.async.} =
|
||||||
raise newException(CatchableError, "error")
|
raise newException(CatchableError, "error")
|
||||||
|
|
||||||
await ductrServerTest(connectProc)
|
await ductrServerTest(connectProc)
|
||||||
|
|
||||||
|
test "should return valid TCP/IP and TCP/DNS addresses only":
|
||||||
|
let testAddrs = @[MultiAddress.init("/ip4/192.0.2.1/tcp/1234").tryGet(),
|
||||||
|
MultiAddress.init("/ip4/203.0.113.5/tcp/5678/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N").tryGet(),
|
||||||
|
MultiAddress.init("/ip6/::1/tcp/9012").tryGet(),
|
||||||
|
MultiAddress.init("/dns4/example.com/tcp/3456/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N").tryGet(),
|
||||||
|
MultiAddress.init("/ip4/198.51.100.42/udp/7890").tryGet()]
|
||||||
|
|
||||||
|
let expected = @[MultiAddress.init("/ip4/192.0.2.1/tcp/1234").tryGet(),
|
||||||
|
MultiAddress.init("/ip4/203.0.113.5/tcp/5678").tryGet(),
|
||||||
|
MultiAddress.init("/ip6/::1/tcp/9012").tryGet(),
|
||||||
|
MultiAddress.init("/dns4/example.com/tcp/3456").tryGet()]
|
||||||
|
|
||||||
|
let result = getHolePunchableAddrs(testAddrs)
|
||||||
|
|
||||||
|
check result == expected
|
@ -65,7 +65,7 @@ suite "Hole Punching":
|
|||||||
|
|
||||||
let publicPeerSwitch = createSwitch(RelayClient.new())
|
let publicPeerSwitch = createSwitch(RelayClient.new())
|
||||||
|
|
||||||
proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||||
return @[MultiAddress.init("/dns4/localhost/").tryGet() & listenAddrs[0][1].tryGet()]
|
return @[MultiAddress.init("/dns4/localhost/").tryGet() & listenAddrs[0][1].tryGet()]
|
||||||
publicPeerSwitch.peerInfo.addressMappers.add(addressMapper)
|
publicPeerSwitch.peerInfo.addressMappers.add(addressMapper)
|
||||||
await publicPeerSwitch.peerInfo.update()
|
await publicPeerSwitch.peerInfo.update()
|
||||||
@ -193,38 +193,24 @@ suite "Hole Punching":
|
|||||||
await privatePeerSwitch2.connect(privatePeerSwitch1.peerInfo.peerId, (await privatePeerRelayAddr1))
|
await privatePeerSwitch2.connect(privatePeerSwitch1.peerInfo.peerId, (await privatePeerRelayAddr1))
|
||||||
privatePeerSwitch2.connectStub = rcvConnectStub
|
privatePeerSwitch2.connectStub = rcvConnectStub
|
||||||
|
|
||||||
checkExpiring:
|
# wait for hole punching to finish in the background
|
||||||
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
|
await sleepAsync(600.millis)
|
||||||
# in two connections attemps, instead of one. The server dial is going to fail because it is acting as the
|
|
||||||
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case, but the client
|
|
||||||
# dial will succeed.
|
|
||||||
privatePeerSwitch1.connManager.connCount(privatePeerSwitch2.peerInfo.peerId) == 1 and
|
|
||||||
not isRelayed(privatePeerSwitch1.connManager.selectMuxer(privatePeerSwitch2.peerInfo.peerId).connection)
|
|
||||||
|
|
||||||
await allFuturesThrowing(
|
await allFuturesThrowing(
|
||||||
privatePeerSwitch1.stop(), privatePeerSwitch2.stop(), switchRelay.stop(),
|
privatePeerSwitch1.stop(), privatePeerSwitch2.stop(), switchRelay.stop(),
|
||||||
switchAux.stop(), switchAux2.stop(), switchAux3.stop(), switchAux4.stop())
|
switchAux.stop(), switchAux2.stop(), switchAux3.stop(), switchAux4.stop())
|
||||||
|
|
||||||
asyncTest "Hole punching when peers addresses are private":
|
asyncTest "Hole punching when peers addresses are private":
|
||||||
proc connectStub(self: SwitchStub,
|
await holePunchingTest(nil, nil, NotReachable)
|
||||||
peerId: PeerId,
|
|
||||||
addrs: seq[MultiAddress],
|
|
||||||
forceDial = false,
|
|
||||||
reuseConnection = true,
|
|
||||||
upgradeDir = Direction.Out): Future[void] {.async.} =
|
|
||||||
self.connectStub = nil # this stub should be called only once
|
|
||||||
await sleepAsync(100.millis) # avoid simultaneous dialing that causes address in use error
|
|
||||||
await self.switch.connect(peerId, addrs, forceDial, reuseConnection, upgradeDir)
|
|
||||||
await holePunchingTest(nil, connectStub, NotReachable)
|
|
||||||
|
|
||||||
asyncTest "Hole punching when there is an error during unilateral direct connection":
|
asyncTest "Hole punching when peers addresses are private and there is an error in the initiator side":
|
||||||
|
|
||||||
proc connectStub(self: SwitchStub,
|
proc connectStub(self: SwitchStub,
|
||||||
peerId: PeerId,
|
peerId: PeerId,
|
||||||
addrs: seq[MultiAddress],
|
addrs: seq[MultiAddress],
|
||||||
forceDial = false,
|
forceDial = false,
|
||||||
reuseConnection = true,
|
reuseConnection = true,
|
||||||
upgradeDir = Direction.Out): Future[void] {.async.} =
|
dir = Direction.Out): Future[void] {.async.} =
|
||||||
self.connectStub = nil # this stub should be called only once
|
self.connectStub = nil # this stub should be called only once
|
||||||
raise newException(CatchableError, "error")
|
raise newException(CatchableError, "error")
|
||||||
|
|
||||||
|
@ -73,7 +73,7 @@ suite "Identify":
|
|||||||
|
|
||||||
asyncTest "default agent version":
|
asyncTest "default agent version":
|
||||||
msListen.addHandler(IdentifyCodec, identifyProto1)
|
msListen.addHandler(IdentifyCodec, identifyProto1)
|
||||||
proc acceptHandler(): Future[void] {.async, gcsafe.} =
|
proc acceptHandler(): Future[void] {.async.} =
|
||||||
let c = await transport1.accept()
|
let c = await transport1.accept()
|
||||||
await msListen.handle(c)
|
await msListen.handle(c)
|
||||||
|
|
||||||
@ -95,7 +95,7 @@ suite "Identify":
|
|||||||
remotePeerInfo.agentVersion = customAgentVersion
|
remotePeerInfo.agentVersion = customAgentVersion
|
||||||
msListen.addHandler(IdentifyCodec, identifyProto1)
|
msListen.addHandler(IdentifyCodec, identifyProto1)
|
||||||
|
|
||||||
proc acceptHandler(): Future[void] {.async, gcsafe.} =
|
proc acceptHandler(): Future[void] {.async.} =
|
||||||
let c = await transport1.accept()
|
let c = await transport1.accept()
|
||||||
await msListen.handle(c)
|
await msListen.handle(c)
|
||||||
|
|
||||||
@ -136,7 +136,7 @@ suite "Identify":
|
|||||||
asyncTest "can send signed peer record":
|
asyncTest "can send signed peer record":
|
||||||
msListen.addHandler(IdentifyCodec, identifyProto1)
|
msListen.addHandler(IdentifyCodec, identifyProto1)
|
||||||
identifyProto1.sendSignedPeerRecord = true
|
identifyProto1.sendSignedPeerRecord = true
|
||||||
proc acceptHandler(): Future[void] {.async, gcsafe.} =
|
proc acceptHandler(): Future[void] {.async.} =
|
||||||
let c = await transport1.accept()
|
let c = await transport1.accept()
|
||||||
await msListen.handle(c)
|
await msListen.handle(c)
|
||||||
|
|
||||||
|
@ -97,7 +97,7 @@ suite "Mplex":
|
|||||||
|
|
||||||
suite "channel half-closed":
|
suite "channel half-closed":
|
||||||
asyncTest "(local close) - should close for write":
|
asyncTest "(local close) - should close for write":
|
||||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
proc writeHandler(data: seq[byte]) {.async.} = discard
|
||||||
let
|
let
|
||||||
conn = TestBufferStream.new(writeHandler)
|
conn = TestBufferStream.new(writeHandler)
|
||||||
chann = LPChannel.init(1, conn, true)
|
chann = LPChannel.init(1, conn, true)
|
||||||
@ -112,7 +112,7 @@ suite "Mplex":
|
|||||||
asyncTest "(local close) - should allow reads until remote closes":
|
asyncTest "(local close) - should allow reads until remote closes":
|
||||||
let
|
let
|
||||||
conn = TestBufferStream.new(
|
conn = TestBufferStream.new(
|
||||||
proc (data: seq[byte]) {.gcsafe, async.} =
|
proc (data: seq[byte]) {.async.} =
|
||||||
discard,
|
discard,
|
||||||
)
|
)
|
||||||
chann = LPChannel.init(1, conn, true)
|
chann = LPChannel.init(1, conn, true)
|
||||||
@ -139,7 +139,7 @@ suite "Mplex":
|
|||||||
asyncTest "(remote close) - channel should close for reading by remote":
|
asyncTest "(remote close) - channel should close for reading by remote":
|
||||||
let
|
let
|
||||||
conn = TestBufferStream.new(
|
conn = TestBufferStream.new(
|
||||||
proc (data: seq[byte]) {.gcsafe, async.} =
|
proc (data: seq[byte]) {.async.} =
|
||||||
discard,
|
discard,
|
||||||
)
|
)
|
||||||
chann = LPChannel.init(1, conn, true)
|
chann = LPChannel.init(1, conn, true)
|
||||||
@ -162,7 +162,7 @@ suite "Mplex":
|
|||||||
let
|
let
|
||||||
testData = "Hello!".toBytes
|
testData = "Hello!".toBytes
|
||||||
conn = TestBufferStream.new(
|
conn = TestBufferStream.new(
|
||||||
proc (data: seq[byte]) {.gcsafe, async.} =
|
proc (data: seq[byte]) {.async.} =
|
||||||
discard
|
discard
|
||||||
)
|
)
|
||||||
chann = LPChannel.init(1, conn, true)
|
chann = LPChannel.init(1, conn, true)
|
||||||
@ -175,7 +175,7 @@ suite "Mplex":
|
|||||||
await conn.close()
|
await conn.close()
|
||||||
|
|
||||||
asyncTest "should not allow pushing data to channel when remote end closed":
|
asyncTest "should not allow pushing data to channel when remote end closed":
|
||||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
proc writeHandler(data: seq[byte]) {.async.} = discard
|
||||||
let
|
let
|
||||||
conn = TestBufferStream.new(writeHandler)
|
conn = TestBufferStream.new(writeHandler)
|
||||||
chann = LPChannel.init(1, conn, true)
|
chann = LPChannel.init(1, conn, true)
|
||||||
@ -192,7 +192,7 @@ suite "Mplex":
|
|||||||
suite "channel reset":
|
suite "channel reset":
|
||||||
|
|
||||||
asyncTest "channel should fail reading":
|
asyncTest "channel should fail reading":
|
||||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
proc writeHandler(data: seq[byte]) {.async.} = discard
|
||||||
let
|
let
|
||||||
conn = TestBufferStream.new(writeHandler)
|
conn = TestBufferStream.new(writeHandler)
|
||||||
chann = LPChannel.init(1, conn, true)
|
chann = LPChannel.init(1, conn, true)
|
||||||
@ -205,7 +205,7 @@ suite "Mplex":
|
|||||||
await conn.close()
|
await conn.close()
|
||||||
|
|
||||||
asyncTest "reset should complete read":
|
asyncTest "reset should complete read":
|
||||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
proc writeHandler(data: seq[byte]) {.async.} = discard
|
||||||
let
|
let
|
||||||
conn = TestBufferStream.new(writeHandler)
|
conn = TestBufferStream.new(writeHandler)
|
||||||
chann = LPChannel.init(1, conn, true)
|
chann = LPChannel.init(1, conn, true)
|
||||||
@ -220,7 +220,7 @@ suite "Mplex":
|
|||||||
await conn.close()
|
await conn.close()
|
||||||
|
|
||||||
asyncTest "reset should complete pushData":
|
asyncTest "reset should complete pushData":
|
||||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
proc writeHandler(data: seq[byte]) {.async.} = discard
|
||||||
let
|
let
|
||||||
conn = TestBufferStream.new(writeHandler)
|
conn = TestBufferStream.new(writeHandler)
|
||||||
chann = LPChannel.init(1, conn, true)
|
chann = LPChannel.init(1, conn, true)
|
||||||
@ -239,7 +239,7 @@ suite "Mplex":
|
|||||||
await conn.close()
|
await conn.close()
|
||||||
|
|
||||||
asyncTest "reset should complete both read and push":
|
asyncTest "reset should complete both read and push":
|
||||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
proc writeHandler(data: seq[byte]) {.async.} = discard
|
||||||
let
|
let
|
||||||
conn = TestBufferStream.new(writeHandler)
|
conn = TestBufferStream.new(writeHandler)
|
||||||
chann = LPChannel.init(1, conn, true)
|
chann = LPChannel.init(1, conn, true)
|
||||||
@ -254,7 +254,7 @@ suite "Mplex":
|
|||||||
await conn.close()
|
await conn.close()
|
||||||
|
|
||||||
asyncTest "reset should complete both read and pushes":
|
asyncTest "reset should complete both read and pushes":
|
||||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
proc writeHandler(data: seq[byte]) {.async.} = discard
|
||||||
let
|
let
|
||||||
conn = TestBufferStream.new(writeHandler)
|
conn = TestBufferStream.new(writeHandler)
|
||||||
chann = LPChannel.init(1, conn, true)
|
chann = LPChannel.init(1, conn, true)
|
||||||
@ -279,7 +279,7 @@ suite "Mplex":
|
|||||||
await conn.close()
|
await conn.close()
|
||||||
|
|
||||||
asyncTest "reset should complete both read and push with cancel":
|
asyncTest "reset should complete both read and push with cancel":
|
||||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
proc writeHandler(data: seq[byte]) {.async.} = discard
|
||||||
let
|
let
|
||||||
conn = TestBufferStream.new(writeHandler)
|
conn = TestBufferStream.new(writeHandler)
|
||||||
chann = LPChannel.init(1, conn, true)
|
chann = LPChannel.init(1, conn, true)
|
||||||
@ -293,7 +293,7 @@ suite "Mplex":
|
|||||||
await conn.close()
|
await conn.close()
|
||||||
|
|
||||||
asyncTest "should complete both read and push after reset":
|
asyncTest "should complete both read and push after reset":
|
||||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
proc writeHandler(data: seq[byte]) {.async.} = discard
|
||||||
let
|
let
|
||||||
conn = TestBufferStream.new(writeHandler)
|
conn = TestBufferStream.new(writeHandler)
|
||||||
chann = LPChannel.init(1, conn, true)
|
chann = LPChannel.init(1, conn, true)
|
||||||
@ -311,7 +311,7 @@ suite "Mplex":
|
|||||||
await conn.close()
|
await conn.close()
|
||||||
|
|
||||||
asyncTest "reset should complete ongoing push without reader":
|
asyncTest "reset should complete ongoing push without reader":
|
||||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
proc writeHandler(data: seq[byte]) {.async.} = discard
|
||||||
let
|
let
|
||||||
conn = TestBufferStream.new(writeHandler)
|
conn = TestBufferStream.new(writeHandler)
|
||||||
chann = LPChannel.init(1, conn, true)
|
chann = LPChannel.init(1, conn, true)
|
||||||
@ -323,7 +323,7 @@ suite "Mplex":
|
|||||||
await conn.close()
|
await conn.close()
|
||||||
|
|
||||||
asyncTest "reset should complete ongoing read without a push":
|
asyncTest "reset should complete ongoing read without a push":
|
||||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
proc writeHandler(data: seq[byte]) {.async.} = discard
|
||||||
let
|
let
|
||||||
conn = TestBufferStream.new(writeHandler)
|
conn = TestBufferStream.new(writeHandler)
|
||||||
chann = LPChannel.init(1, conn, true)
|
chann = LPChannel.init(1, conn, true)
|
||||||
@ -335,7 +335,7 @@ suite "Mplex":
|
|||||||
await conn.close()
|
await conn.close()
|
||||||
|
|
||||||
asyncTest "reset should allow all reads and pushes to complete":
|
asyncTest "reset should allow all reads and pushes to complete":
|
||||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
proc writeHandler(data: seq[byte]) {.async.} = discard
|
||||||
let
|
let
|
||||||
conn = TestBufferStream.new(writeHandler)
|
conn = TestBufferStream.new(writeHandler)
|
||||||
chann = LPChannel.init(1, conn, true)
|
chann = LPChannel.init(1, conn, true)
|
||||||
@ -364,7 +364,7 @@ suite "Mplex":
|
|||||||
await conn.close()
|
await conn.close()
|
||||||
|
|
||||||
asyncTest "channel should fail writing":
|
asyncTest "channel should fail writing":
|
||||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
proc writeHandler(data: seq[byte]) {.async.} = discard
|
||||||
let
|
let
|
||||||
conn = TestBufferStream.new(writeHandler)
|
conn = TestBufferStream.new(writeHandler)
|
||||||
chann = LPChannel.init(1, conn, true)
|
chann = LPChannel.init(1, conn, true)
|
||||||
@ -376,7 +376,7 @@ suite "Mplex":
|
|||||||
await conn.close()
|
await conn.close()
|
||||||
|
|
||||||
asyncTest "channel should reset on timeout":
|
asyncTest "channel should reset on timeout":
|
||||||
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
|
proc writeHandler(data: seq[byte]) {.async.} = discard
|
||||||
let
|
let
|
||||||
conn = TestBufferStream.new(writeHandler)
|
conn = TestBufferStream.new(writeHandler)
|
||||||
chann = LPChannel.init(
|
chann = LPChannel.init(
|
||||||
@ -392,11 +392,11 @@ suite "Mplex":
|
|||||||
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
let listenFut = transport1.start(ma)
|
let listenFut = transport1.start(ma)
|
||||||
|
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let mplexListen = Mplex.new(conn)
|
let mplexListen = Mplex.new(conn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async.} =
|
||||||
let msg = await stream.readLp(1024)
|
let msg = await stream.readLp(1024)
|
||||||
check string.fromBytes(msg) == "HELLO"
|
check string.fromBytes(msg) == "HELLO"
|
||||||
await stream.close()
|
await stream.close()
|
||||||
@ -429,11 +429,11 @@ suite "Mplex":
|
|||||||
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
let listenFut = transport1.start(ma)
|
let listenFut = transport1.start(ma)
|
||||||
|
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let mplexListen = Mplex.new(conn)
|
let mplexListen = Mplex.new(conn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async.} =
|
||||||
let msg = await stream.readLp(1024)
|
let msg = await stream.readLp(1024)
|
||||||
check string.fromBytes(msg) == "HELLO"
|
check string.fromBytes(msg) == "HELLO"
|
||||||
await stream.close()
|
await stream.close()
|
||||||
@ -473,12 +473,12 @@ suite "Mplex":
|
|||||||
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
let listenFut = transport1.start(ma)
|
let listenFut = transport1.start(ma)
|
||||||
|
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
try:
|
try:
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let mplexListen = Mplex.new(conn)
|
let mplexListen = Mplex.new(conn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async.} =
|
||||||
let msg = await stream.readLp(MaxMsgSize)
|
let msg = await stream.readLp(MaxMsgSize)
|
||||||
check msg == bigseq
|
check msg == bigseq
|
||||||
trace "Bigseq check passed!"
|
trace "Bigseq check passed!"
|
||||||
@ -520,11 +520,11 @@ suite "Mplex":
|
|||||||
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
let listenFut = transport1.start(ma)
|
let listenFut = transport1.start(ma)
|
||||||
|
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let mplexListen = Mplex.new(conn)
|
let mplexListen = Mplex.new(conn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async.} =
|
||||||
await stream.writeLp("Hello from stream!")
|
await stream.writeLp("Hello from stream!")
|
||||||
await stream.close()
|
await stream.close()
|
||||||
|
|
||||||
@ -557,12 +557,12 @@ suite "Mplex":
|
|||||||
let listenFut = transport1.start(ma)
|
let listenFut = transport1.start(ma)
|
||||||
|
|
||||||
let done = newFuture[void]()
|
let done = newFuture[void]()
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
var count = 1
|
var count = 1
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let mplexListen = Mplex.new(conn)
|
let mplexListen = Mplex.new(conn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async.} =
|
||||||
let msg = await stream.readLp(1024)
|
let msg = await stream.readLp(1024)
|
||||||
check string.fromBytes(msg) == &"stream {count}!"
|
check string.fromBytes(msg) == &"stream {count}!"
|
||||||
count.inc
|
count.inc
|
||||||
@ -601,12 +601,12 @@ suite "Mplex":
|
|||||||
let listenFut = transport1.start(ma)
|
let listenFut = transport1.start(ma)
|
||||||
|
|
||||||
let done = newFuture[void]()
|
let done = newFuture[void]()
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
var count = 1
|
var count = 1
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let mplexListen = Mplex.new(conn)
|
let mplexListen = Mplex.new(conn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async.} =
|
||||||
let msg = await stream.readLp(1024)
|
let msg = await stream.readLp(1024)
|
||||||
check string.fromBytes(msg) == &"stream {count} from dialer!"
|
check string.fromBytes(msg) == &"stream {count} from dialer!"
|
||||||
await stream.writeLp(&"stream {count} from listener!")
|
await stream.writeLp(&"stream {count} from listener!")
|
||||||
@ -646,12 +646,12 @@ suite "Mplex":
|
|||||||
|
|
||||||
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
||||||
var listenStreams: seq[Connection]
|
var listenStreams: seq[Connection]
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let mplexListen = Mplex.new(conn)
|
let mplexListen = Mplex.new(conn)
|
||||||
|
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async.} =
|
||||||
listenStreams.add(stream)
|
listenStreams.add(stream)
|
||||||
try:
|
try:
|
||||||
discard await stream.readLp(1024)
|
discard await stream.readLp(1024)
|
||||||
@ -697,11 +697,11 @@ suite "Mplex":
|
|||||||
var count = 0
|
var count = 0
|
||||||
var done = newFuture[void]()
|
var done = newFuture[void]()
|
||||||
var listenStreams: seq[Connection]
|
var listenStreams: seq[Connection]
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let mplexListen = Mplex.new(conn)
|
let mplexListen = Mplex.new(conn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async.} =
|
||||||
listenStreams.add(stream)
|
listenStreams.add(stream)
|
||||||
count.inc()
|
count.inc()
|
||||||
if count == 10:
|
if count == 10:
|
||||||
@ -761,11 +761,11 @@ suite "Mplex":
|
|||||||
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
||||||
|
|
||||||
var listenStreams: seq[Connection]
|
var listenStreams: seq[Connection]
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let mplexListen = Mplex.new(conn)
|
let mplexListen = Mplex.new(conn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async.} =
|
||||||
listenStreams.add(stream)
|
listenStreams.add(stream)
|
||||||
await stream.join()
|
await stream.join()
|
||||||
|
|
||||||
@ -805,11 +805,11 @@ suite "Mplex":
|
|||||||
|
|
||||||
var mplexListen: Mplex
|
var mplexListen: Mplex
|
||||||
var listenStreams: seq[Connection]
|
var listenStreams: seq[Connection]
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
mplexListen = Mplex.new(conn)
|
mplexListen = Mplex.new(conn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async.} =
|
||||||
listenStreams.add(stream)
|
listenStreams.add(stream)
|
||||||
await stream.join()
|
await stream.join()
|
||||||
|
|
||||||
@ -851,11 +851,11 @@ suite "Mplex":
|
|||||||
|
|
||||||
var mplexHandle: Future[void]
|
var mplexHandle: Future[void]
|
||||||
var listenStreams: seq[Connection]
|
var listenStreams: seq[Connection]
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let mplexListen = Mplex.new(conn)
|
let mplexListen = Mplex.new(conn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async.} =
|
||||||
listenStreams.add(stream)
|
listenStreams.add(stream)
|
||||||
await stream.join()
|
await stream.join()
|
||||||
|
|
||||||
@ -896,11 +896,11 @@ suite "Mplex":
|
|||||||
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
||||||
|
|
||||||
var listenStreams: seq[Connection]
|
var listenStreams: seq[Connection]
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let mplexListen = Mplex.new(conn)
|
let mplexListen = Mplex.new(conn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async.} =
|
||||||
listenStreams.add(stream)
|
listenStreams.add(stream)
|
||||||
await stream.join()
|
await stream.join()
|
||||||
|
|
||||||
@ -943,11 +943,11 @@ suite "Mplex":
|
|||||||
|
|
||||||
var listenConn: Connection
|
var listenConn: Connection
|
||||||
var listenStreams: seq[Connection]
|
var listenStreams: seq[Connection]
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
listenConn = await transport1.accept()
|
listenConn = await transport1.accept()
|
||||||
let mplexListen = Mplex.new(listenConn)
|
let mplexListen = Mplex.new(listenConn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async.} =
|
||||||
listenStreams.add(stream)
|
listenStreams.add(stream)
|
||||||
await stream.join()
|
await stream.join()
|
||||||
|
|
||||||
@ -992,11 +992,11 @@ suite "Mplex":
|
|||||||
|
|
||||||
var complete = newFuture[void]()
|
var complete = newFuture[void]()
|
||||||
const MsgSize = 1024
|
const MsgSize = 1024
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let mplexListen = Mplex.new(conn)
|
let mplexListen = Mplex.new(conn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async.} =
|
||||||
try:
|
try:
|
||||||
let msg = await stream.readLp(MsgSize)
|
let msg = await stream.readLp(MsgSize)
|
||||||
check msg.len == MsgSize
|
check msg.len == MsgSize
|
||||||
@ -1064,11 +1064,11 @@ suite "Mplex":
|
|||||||
|
|
||||||
var complete = newFuture[void]()
|
var complete = newFuture[void]()
|
||||||
const MsgSize = 512
|
const MsgSize = 512
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let mplexListen = Mplex.new(conn)
|
let mplexListen = Mplex.new(conn)
|
||||||
mplexListen.streamHandler = proc(stream: Connection)
|
mplexListen.streamHandler = proc(stream: Connection)
|
||||||
{.async, gcsafe.} =
|
{.async.} =
|
||||||
let msg = await stream.readLp(MsgSize)
|
let msg = await stream.readLp(MsgSize)
|
||||||
check msg.len == MsgSize
|
check msg.len == MsgSize
|
||||||
await stream.close()
|
await stream.close()
|
||||||
|
@ -60,6 +60,7 @@ const
|
|||||||
"/ip4/127.0.0.1/tcp/1234",
|
"/ip4/127.0.0.1/tcp/1234",
|
||||||
"/ip4/127.0.0.1/tcp/1234/",
|
"/ip4/127.0.0.1/tcp/1234/",
|
||||||
"/ip4/127.0.0.1/udp/1234/quic",
|
"/ip4/127.0.0.1/udp/1234/quic",
|
||||||
|
"/ip4/192.168.80.3/udp/33422/quic-v1",
|
||||||
"/ip4/127.0.0.1/ipfs/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC",
|
"/ip4/127.0.0.1/ipfs/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC",
|
||||||
"/ip4/127.0.0.1/ipfs/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC/tcp/1234",
|
"/ip4/127.0.0.1/ipfs/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC/tcp/1234",
|
||||||
"/ip4/127.0.0.1/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC",
|
"/ip4/127.0.0.1/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC",
|
||||||
|
@ -34,7 +34,7 @@ type
|
|||||||
|
|
||||||
method readOnce*(s: TestSelectStream,
|
method readOnce*(s: TestSelectStream,
|
||||||
pbytes: pointer,
|
pbytes: pointer,
|
||||||
nbytes: int): Future[int] {.async, gcsafe.} =
|
nbytes: int): Future[int] {.async.} =
|
||||||
case s.step:
|
case s.step:
|
||||||
of 1:
|
of 1:
|
||||||
var buf = newSeq[byte](1)
|
var buf = newSeq[byte](1)
|
||||||
@ -64,9 +64,9 @@ method readOnce*(s: TestSelectStream,
|
|||||||
|
|
||||||
return "\0x3na\n".len()
|
return "\0x3na\n".len()
|
||||||
|
|
||||||
method write*(s: TestSelectStream, msg: seq[byte]) {.async, gcsafe.} = discard
|
method write*(s: TestSelectStream, msg: seq[byte]) {.async.} = discard
|
||||||
|
|
||||||
method close(s: TestSelectStream) {.async, gcsafe.} =
|
method close(s: TestSelectStream) {.async.} =
|
||||||
s.isClosed = true
|
s.isClosed = true
|
||||||
s.isEof = true
|
s.isEof = true
|
||||||
|
|
||||||
@ -113,11 +113,11 @@ method readOnce*(s: TestLsStream,
|
|||||||
copyMem(pbytes, addr buf[0], buf.len())
|
copyMem(pbytes, addr buf[0], buf.len())
|
||||||
return buf.len()
|
return buf.len()
|
||||||
|
|
||||||
method write*(s: TestLsStream, msg: seq[byte]) {.async, gcsafe.} =
|
method write*(s: TestLsStream, msg: seq[byte]) {.async.} =
|
||||||
if s.step == 4:
|
if s.step == 4:
|
||||||
await s.ls(msg)
|
await s.ls(msg)
|
||||||
|
|
||||||
method close(s: TestLsStream) {.async, gcsafe.} =
|
method close(s: TestLsStream) {.async.} =
|
||||||
s.isClosed = true
|
s.isClosed = true
|
||||||
s.isEof = true
|
s.isEof = true
|
||||||
|
|
||||||
@ -137,7 +137,7 @@ type
|
|||||||
method readOnce*(s: TestNaStream,
|
method readOnce*(s: TestNaStream,
|
||||||
pbytes: pointer,
|
pbytes: pointer,
|
||||||
nbytes: int):
|
nbytes: int):
|
||||||
Future[int] {.async, gcsafe.} =
|
Future[int] {.async.} =
|
||||||
case s.step:
|
case s.step:
|
||||||
of 1:
|
of 1:
|
||||||
var buf = newSeq[byte](1)
|
var buf = newSeq[byte](1)
|
||||||
@ -167,11 +167,11 @@ method readOnce*(s: TestNaStream,
|
|||||||
|
|
||||||
return "\0x3na\n".len()
|
return "\0x3na\n".len()
|
||||||
|
|
||||||
method write*(s: TestNaStream, msg: seq[byte]) {.async, gcsafe.} =
|
method write*(s: TestNaStream, msg: seq[byte]) {.async.} =
|
||||||
if s.step == 4:
|
if s.step == 4:
|
||||||
await s.na(string.fromBytes(msg))
|
await s.na(string.fromBytes(msg))
|
||||||
|
|
||||||
method close(s: TestNaStream) {.async, gcsafe.} =
|
method close(s: TestNaStream) {.async.} =
|
||||||
s.isClosed = true
|
s.isClosed = true
|
||||||
s.isEof = true
|
s.isEof = true
|
||||||
|
|
||||||
@ -197,7 +197,7 @@ suite "Multistream select":
|
|||||||
var protocol: LPProtocol = new LPProtocol
|
var protocol: LPProtocol = new LPProtocol
|
||||||
proc testHandler(conn: Connection,
|
proc testHandler(conn: Connection,
|
||||||
proto: string):
|
proto: string):
|
||||||
Future[void] {.async, gcsafe.} =
|
Future[void] {.async.} =
|
||||||
check proto == "/test/proto/1.0.0"
|
check proto == "/test/proto/1.0.0"
|
||||||
await conn.close()
|
await conn.close()
|
||||||
|
|
||||||
@ -210,7 +210,7 @@ suite "Multistream select":
|
|||||||
|
|
||||||
var conn: Connection = nil
|
var conn: Connection = nil
|
||||||
let done = newFuture[void]()
|
let done = newFuture[void]()
|
||||||
proc testLsHandler(proto: seq[byte]) {.async, gcsafe.} =
|
proc testLsHandler(proto: seq[byte]) {.async.} =
|
||||||
var strProto: string = string.fromBytes(proto)
|
var strProto: string = string.fromBytes(proto)
|
||||||
check strProto == "\x26/test/proto1/1.0.0\n/test/proto2/1.0.0\n"
|
check strProto == "\x26/test/proto1/1.0.0\n/test/proto2/1.0.0\n"
|
||||||
await conn.close()
|
await conn.close()
|
||||||
@ -218,7 +218,7 @@ suite "Multistream select":
|
|||||||
conn = Connection(newTestLsStream(testLsHandler))
|
conn = Connection(newTestLsStream(testLsHandler))
|
||||||
|
|
||||||
proc testHandler(conn: Connection, proto: string): Future[void]
|
proc testHandler(conn: Connection, proto: string): Future[void]
|
||||||
{.async, gcsafe.} = discard
|
{.async.} = discard
|
||||||
var protocol: LPProtocol = new LPProtocol
|
var protocol: LPProtocol = new LPProtocol
|
||||||
protocol.handler = testHandler
|
protocol.handler = testHandler
|
||||||
ms.addHandler("/test/proto1/1.0.0", protocol)
|
ms.addHandler("/test/proto1/1.0.0", protocol)
|
||||||
@ -230,7 +230,7 @@ suite "Multistream select":
|
|||||||
let ms = MultistreamSelect.new()
|
let ms = MultistreamSelect.new()
|
||||||
|
|
||||||
var conn: Connection = nil
|
var conn: Connection = nil
|
||||||
proc testNaHandler(msg: string): Future[void] {.async, gcsafe.} =
|
proc testNaHandler(msg: string): Future[void] {.async.} =
|
||||||
check msg == "\x03na\n"
|
check msg == "\x03na\n"
|
||||||
await conn.close()
|
await conn.close()
|
||||||
conn = newTestNaStream(testNaHandler)
|
conn = newTestNaStream(testNaHandler)
|
||||||
@ -238,7 +238,7 @@ suite "Multistream select":
|
|||||||
var protocol: LPProtocol = new LPProtocol
|
var protocol: LPProtocol = new LPProtocol
|
||||||
proc testHandler(conn: Connection,
|
proc testHandler(conn: Connection,
|
||||||
proto: string):
|
proto: string):
|
||||||
Future[void] {.async, gcsafe.} = discard
|
Future[void] {.async.} = discard
|
||||||
protocol.handler = testHandler
|
protocol.handler = testHandler
|
||||||
ms.addHandler("/unabvailable/proto/1.0.0", protocol)
|
ms.addHandler("/unabvailable/proto/1.0.0", protocol)
|
||||||
|
|
||||||
@ -250,7 +250,7 @@ suite "Multistream select":
|
|||||||
var protocol: LPProtocol = new LPProtocol
|
var protocol: LPProtocol = new LPProtocol
|
||||||
proc testHandler(conn: Connection,
|
proc testHandler(conn: Connection,
|
||||||
proto: string):
|
proto: string):
|
||||||
Future[void] {.async, gcsafe.} =
|
Future[void] {.async.} =
|
||||||
check proto == "/test/proto/1.0.0"
|
check proto == "/test/proto/1.0.0"
|
||||||
await conn.writeLp("Hello!")
|
await conn.writeLp("Hello!")
|
||||||
await conn.close()
|
await conn.close()
|
||||||
@ -262,7 +262,7 @@ suite "Multistream select":
|
|||||||
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
let transport1 = TcpTransport.new(upgrade = Upgrade())
|
||||||
asyncSpawn transport1.start(ma)
|
asyncSpawn transport1.start(ma)
|
||||||
|
|
||||||
proc acceptHandler(): Future[void] {.async, gcsafe.} =
|
proc acceptHandler(): Future[void] {.async.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
await msListen.handle(conn)
|
await msListen.handle(conn)
|
||||||
await conn.close()
|
await conn.close()
|
||||||
@ -293,7 +293,7 @@ suite "Multistream select":
|
|||||||
# Unblock the 5 streams, check that we can open a new one
|
# Unblock the 5 streams, check that we can open a new one
|
||||||
proc testHandler(conn: Connection,
|
proc testHandler(conn: Connection,
|
||||||
proto: string):
|
proto: string):
|
||||||
Future[void] {.async, gcsafe.} =
|
Future[void] {.async.} =
|
||||||
await blocker
|
await blocker
|
||||||
await conn.writeLp("Hello!")
|
await conn.writeLp("Hello!")
|
||||||
await conn.close()
|
await conn.close()
|
||||||
@ -315,7 +315,7 @@ suite "Multistream select":
|
|||||||
await msListen.handle(c)
|
await msListen.handle(c)
|
||||||
await c.close()
|
await c.close()
|
||||||
|
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
while true:
|
while true:
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
asyncSpawn acceptedOne(conn)
|
asyncSpawn acceptedOne(conn)
|
||||||
@ -362,7 +362,7 @@ suite "Multistream select":
|
|||||||
|
|
||||||
let msListen = MultistreamSelect.new()
|
let msListen = MultistreamSelect.new()
|
||||||
var protocol: LPProtocol = new LPProtocol
|
var protocol: LPProtocol = new LPProtocol
|
||||||
protocol.handler = proc(conn: Connection, proto: string) {.async, gcsafe.} =
|
protocol.handler = proc(conn: Connection, proto: string) {.async.} =
|
||||||
# never reached
|
# never reached
|
||||||
discard
|
discard
|
||||||
|
|
||||||
@ -379,7 +379,7 @@ suite "Multistream select":
|
|||||||
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
let listenFut = transport1.start(ma)
|
let listenFut = transport1.start(ma)
|
||||||
|
|
||||||
proc acceptHandler(): Future[void] {.async, gcsafe.} =
|
proc acceptHandler(): Future[void] {.async.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
try:
|
try:
|
||||||
await msListen.handle(conn)
|
await msListen.handle(conn)
|
||||||
@ -412,7 +412,7 @@ suite "Multistream select":
|
|||||||
var protocol: LPProtocol = new LPProtocol
|
var protocol: LPProtocol = new LPProtocol
|
||||||
proc testHandler(conn: Connection,
|
proc testHandler(conn: Connection,
|
||||||
proto: string):
|
proto: string):
|
||||||
Future[void] {.async, gcsafe.} =
|
Future[void] {.async.} =
|
||||||
check proto == "/test/proto/1.0.0"
|
check proto == "/test/proto/1.0.0"
|
||||||
await conn.writeLp("Hello!")
|
await conn.writeLp("Hello!")
|
||||||
await conn.close()
|
await conn.close()
|
||||||
@ -424,7 +424,7 @@ suite "Multistream select":
|
|||||||
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
asyncSpawn transport1.start(ma)
|
asyncSpawn transport1.start(ma)
|
||||||
|
|
||||||
proc acceptHandler(): Future[void] {.async, gcsafe.} =
|
proc acceptHandler(): Future[void] {.async.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
await msListen.handle(conn)
|
await msListen.handle(conn)
|
||||||
|
|
||||||
@ -450,7 +450,7 @@ suite "Multistream select":
|
|||||||
var protocol: LPProtocol = new LPProtocol
|
var protocol: LPProtocol = new LPProtocol
|
||||||
proc testHandler(conn: Connection,
|
proc testHandler(conn: Connection,
|
||||||
proto: string):
|
proto: string):
|
||||||
Future[void] {.async, gcsafe.} =
|
Future[void] {.async.} =
|
||||||
await conn.writeLp(&"Hello from {proto}!")
|
await conn.writeLp(&"Hello from {proto}!")
|
||||||
await conn.close()
|
await conn.close()
|
||||||
|
|
||||||
@ -462,7 +462,7 @@ suite "Multistream select":
|
|||||||
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
asyncSpawn transport1.start(ma)
|
asyncSpawn transport1.start(ma)
|
||||||
|
|
||||||
proc acceptHandler(): Future[void] {.async, gcsafe.} =
|
proc acceptHandler(): Future[void] {.async.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
await msListen.handle(conn)
|
await msListen.handle(conn)
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ type
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
method init(p: TestProto) {.gcsafe.} =
|
method init(p: TestProto) {.gcsafe.} =
|
||||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
proc handle(conn: Connection, proto: string) {.async.} =
|
||||||
let msg = string.fromBytes(await conn.readLp(1024))
|
let msg = string.fromBytes(await conn.readLp(1024))
|
||||||
check "Hello!" == msg
|
check "Hello!" == msg
|
||||||
await conn.writeLp("Hello!")
|
await conn.writeLp("Hello!")
|
||||||
@ -100,7 +100,7 @@ suite "Noise":
|
|||||||
|
|
||||||
proc acceptHandler() {.async.} =
|
proc acceptHandler() {.async.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let sconn = await serverNoise.secure(conn, false, Opt.none(PeerId))
|
let sconn = await serverNoise.secure(conn, Opt.none(PeerId))
|
||||||
try:
|
try:
|
||||||
await sconn.write("Hello!")
|
await sconn.write("Hello!")
|
||||||
finally:
|
finally:
|
||||||
@ -115,7 +115,7 @@ suite "Noise":
|
|||||||
clientNoise = Noise.new(rng, clientPrivKey, outgoing = true)
|
clientNoise = Noise.new(rng, clientPrivKey, outgoing = true)
|
||||||
conn = await transport2.dial(transport1.addrs[0])
|
conn = await transport2.dial(transport1.addrs[0])
|
||||||
|
|
||||||
let sconn = await clientNoise.secure(conn, true, Opt.some(serverInfo.peerId))
|
let sconn = await clientNoise.secure(conn, Opt.some(serverInfo.peerId))
|
||||||
|
|
||||||
var msg = newSeq[byte](6)
|
var msg = newSeq[byte](6)
|
||||||
await sconn.readExactly(addr msg[0], 6)
|
await sconn.readExactly(addr msg[0], 6)
|
||||||
@ -140,11 +140,11 @@ suite "Noise":
|
|||||||
|
|
||||||
asyncSpawn transport1.start(server)
|
asyncSpawn transport1.start(server)
|
||||||
|
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
var conn: Connection
|
var conn: Connection
|
||||||
try:
|
try:
|
||||||
conn = await transport1.accept()
|
conn = await transport1.accept()
|
||||||
discard await serverNoise.secure(conn, false, Opt.none(PeerId))
|
discard await serverNoise.secure(conn, Opt.none(PeerId))
|
||||||
except CatchableError:
|
except CatchableError:
|
||||||
discard
|
discard
|
||||||
finally:
|
finally:
|
||||||
@ -160,7 +160,7 @@ suite "Noise":
|
|||||||
|
|
||||||
var sconn: Connection = nil
|
var sconn: Connection = nil
|
||||||
expect(NoiseDecryptTagError):
|
expect(NoiseDecryptTagError):
|
||||||
sconn = await clientNoise.secure(conn, true, Opt.some(conn.peerId))
|
sconn = await clientNoise.secure(conn, Opt.some(conn.peerId))
|
||||||
|
|
||||||
await conn.close()
|
await conn.close()
|
||||||
await handlerWait
|
await handlerWait
|
||||||
@ -178,9 +178,9 @@ suite "Noise":
|
|||||||
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
asyncSpawn transport1.start(server)
|
asyncSpawn transport1.start(server)
|
||||||
|
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let sconn = await serverNoise.secure(conn, false, Opt.none(PeerId))
|
let sconn = await serverNoise.secure(conn, Opt.none(PeerId))
|
||||||
defer:
|
defer:
|
||||||
await sconn.close()
|
await sconn.close()
|
||||||
await conn.close()
|
await conn.close()
|
||||||
@ -196,7 +196,7 @@ suite "Noise":
|
|||||||
clientInfo = PeerInfo.new(clientPrivKey, transport1.addrs)
|
clientInfo = PeerInfo.new(clientPrivKey, transport1.addrs)
|
||||||
clientNoise = Noise.new(rng, clientPrivKey, outgoing = true)
|
clientNoise = Noise.new(rng, clientPrivKey, outgoing = true)
|
||||||
conn = await transport2.dial(transport1.addrs[0])
|
conn = await transport2.dial(transport1.addrs[0])
|
||||||
let sconn = await clientNoise.secure(conn, true, Opt.some(serverInfo.peerId))
|
let sconn = await clientNoise.secure(conn, Opt.some(serverInfo.peerId))
|
||||||
|
|
||||||
await sconn.write("Hello!")
|
await sconn.write("Hello!")
|
||||||
await acceptFut
|
await acceptFut
|
||||||
@ -221,9 +221,9 @@ suite "Noise":
|
|||||||
transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
listenFut = transport1.start(server)
|
listenFut = transport1.start(server)
|
||||||
|
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
let sconn = await serverNoise.secure(conn, false, Opt.none(PeerId))
|
let sconn = await serverNoise.secure(conn, Opt.none(PeerId))
|
||||||
defer:
|
defer:
|
||||||
await sconn.close()
|
await sconn.close()
|
||||||
let msg = await sconn.readLp(1024*1024)
|
let msg = await sconn.readLp(1024*1024)
|
||||||
@ -237,7 +237,7 @@ suite "Noise":
|
|||||||
clientInfo = PeerInfo.new(clientPrivKey, transport1.addrs)
|
clientInfo = PeerInfo.new(clientPrivKey, transport1.addrs)
|
||||||
clientNoise = Noise.new(rng, clientPrivKey, outgoing = true)
|
clientNoise = Noise.new(rng, clientPrivKey, outgoing = true)
|
||||||
conn = await transport2.dial(transport1.addrs[0])
|
conn = await transport2.dial(transport1.addrs[0])
|
||||||
let sconn = await clientNoise.secure(conn, true, Opt.some(serverInfo.peerId))
|
let sconn = await clientNoise.secure(conn, Opt.some(serverInfo.peerId))
|
||||||
|
|
||||||
await sconn.writeLp(hugePayload)
|
await sconn.writeLp(hugePayload)
|
||||||
await readTask
|
await readTask
|
||||||
|
@ -42,7 +42,7 @@ suite "Ping":
|
|||||||
transport1 = TcpTransport.new(upgrade = Upgrade())
|
transport1 = TcpTransport.new(upgrade = Upgrade())
|
||||||
transport2 = TcpTransport.new(upgrade = Upgrade())
|
transport2 = TcpTransport.new(upgrade = Upgrade())
|
||||||
|
|
||||||
proc handlePing(peer: PeerId) {.async, gcsafe, closure.} =
|
proc handlePing(peer: PeerId) {.async, closure.} =
|
||||||
inc pingReceivedCount
|
inc pingReceivedCount
|
||||||
pingProto1 = Ping.new()
|
pingProto1 = Ping.new()
|
||||||
pingProto2 = Ping.new(handlePing)
|
pingProto2 = Ping.new(handlePing)
|
||||||
@ -63,7 +63,7 @@ suite "Ping":
|
|||||||
asyncTest "simple ping":
|
asyncTest "simple ping":
|
||||||
msListen.addHandler(PingCodec, pingProto1)
|
msListen.addHandler(PingCodec, pingProto1)
|
||||||
serverFut = transport1.start(@[ma])
|
serverFut = transport1.start(@[ma])
|
||||||
proc acceptHandler(): Future[void] {.async, gcsafe.} =
|
proc acceptHandler(): Future[void] {.async.} =
|
||||||
let c = await transport1.accept()
|
let c = await transport1.accept()
|
||||||
await msListen.handle(c)
|
await msListen.handle(c)
|
||||||
|
|
||||||
@ -78,7 +78,7 @@ suite "Ping":
|
|||||||
asyncTest "ping callback":
|
asyncTest "ping callback":
|
||||||
msDial.addHandler(PingCodec, pingProto2)
|
msDial.addHandler(PingCodec, pingProto2)
|
||||||
serverFut = transport1.start(@[ma])
|
serverFut = transport1.start(@[ma])
|
||||||
proc acceptHandler(): Future[void] {.async, gcsafe.} =
|
proc acceptHandler(): Future[void] {.async.} =
|
||||||
let c = await transport1.accept()
|
let c = await transport1.accept()
|
||||||
discard await msListen.select(c, PingCodec)
|
discard await msListen.select(c, PingCodec)
|
||||||
discard await pingProto1.ping(c)
|
discard await pingProto1.ping(c)
|
||||||
@ -92,7 +92,7 @@ suite "Ping":
|
|||||||
asyncTest "bad ping data ack":
|
asyncTest "bad ping data ack":
|
||||||
type FakePing = ref object of LPProtocol
|
type FakePing = ref object of LPProtocol
|
||||||
let fakePingProto = FakePing()
|
let fakePingProto = FakePing()
|
||||||
proc fakeHandle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
proc fakeHandle(conn: Connection, proto: string) {.async, closure.} =
|
||||||
var
|
var
|
||||||
buf: array[32, byte]
|
buf: array[32, byte]
|
||||||
fakebuf: array[32, byte]
|
fakebuf: array[32, byte]
|
||||||
@ -103,7 +103,7 @@ suite "Ping":
|
|||||||
|
|
||||||
msListen.addHandler(PingCodec, fakePingProto)
|
msListen.addHandler(PingCodec, fakePingProto)
|
||||||
serverFut = transport1.start(@[ma])
|
serverFut = transport1.start(@[ma])
|
||||||
proc acceptHandler(): Future[void] {.async, gcsafe.} =
|
proc acceptHandler(): Future[void] {.async.} =
|
||||||
let c = await transport1.accept()
|
let c = await transport1.accept()
|
||||||
await msListen.handle(c)
|
await msListen.handle(c)
|
||||||
|
|
||||||
|
@ -19,14 +19,22 @@ import ./helpers
|
|||||||
import std/times
|
import std/times
|
||||||
import stew/byteutils
|
import stew/byteutils
|
||||||
|
|
||||||
proc createSwitch(r: Relay): Switch =
|
proc createSwitch(r: Relay = nil, useYamux: bool = false): Switch =
|
||||||
result = SwitchBuilder.new()
|
var builder = SwitchBuilder.new()
|
||||||
.withRng(newRng())
|
.withRng(newRng())
|
||||||
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
|
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
|
||||||
.withTcpTransport()
|
.withTcpTransport()
|
||||||
.withMplex()
|
|
||||||
|
if useYamux:
|
||||||
|
builder = builder.withYamux()
|
||||||
|
else:
|
||||||
|
builder = builder.withMplex()
|
||||||
|
|
||||||
|
if r != nil:
|
||||||
|
builder = builder.withCircuitRelay(r)
|
||||||
|
|
||||||
|
return builder
|
||||||
.withNoise()
|
.withNoise()
|
||||||
.withCircuitRelay(r)
|
|
||||||
.build()
|
.build()
|
||||||
|
|
||||||
suite "Circuit Relay V2":
|
suite "Circuit Relay V2":
|
||||||
@ -122,308 +130,310 @@ suite "Circuit Relay V2":
|
|||||||
expect(ReservationError):
|
expect(ReservationError):
|
||||||
discard await cl1.reserve(src2.peerInfo.peerId, addrs)
|
discard await cl1.reserve(src2.peerInfo.peerId, addrs)
|
||||||
|
|
||||||
suite "Connection":
|
for (useYamux, muxName) in [(false, "Mplex"), (true, "Yamux")]:
|
||||||
asyncTeardown:
|
suite "Circuit Relay V2 Connection using " & muxName:
|
||||||
checkTrackers()
|
asyncTeardown:
|
||||||
var
|
checkTrackers()
|
||||||
customProtoCodec {.threadvar.}: string
|
|
||||||
proto {.threadvar.}: LPProtocol
|
|
||||||
ttl {.threadvar.}: int
|
|
||||||
ldur {.threadvar.}: uint32
|
|
||||||
ldata {.threadvar.}: uint64
|
|
||||||
srcCl {.threadvar.}: RelayClient
|
|
||||||
dstCl {.threadvar.}: RelayClient
|
|
||||||
rv2 {.threadvar.}: Relay
|
|
||||||
src {.threadvar.}: Switch
|
|
||||||
dst {.threadvar.}: Switch
|
|
||||||
rel {.threadvar.}: Switch
|
|
||||||
rsvp {.threadvar.}: Rsvp
|
|
||||||
conn {.threadvar.}: Connection
|
|
||||||
|
|
||||||
asyncSetup:
|
|
||||||
customProtoCodec = "/test"
|
|
||||||
proto = new LPProtocol
|
|
||||||
proto.codec = customProtoCodec
|
|
||||||
ttl = 60
|
|
||||||
ldur = 120
|
|
||||||
ldata = 16384
|
|
||||||
srcCl = RelayClient.new()
|
|
||||||
dstCl = RelayClient.new()
|
|
||||||
src = createSwitch(srcCl)
|
|
||||||
dst = createSwitch(dstCl)
|
|
||||||
rel = newStandardSwitch()
|
|
||||||
|
|
||||||
asyncTest "Connection succeed":
|
|
||||||
proto.handler = proc(conn: Connection, proto: string) {.async.} =
|
|
||||||
check: "test1" == string.fromBytes(await conn.readLp(1024))
|
|
||||||
await conn.writeLp("test2")
|
|
||||||
check: "test3" == string.fromBytes(await conn.readLp(1024))
|
|
||||||
await conn.writeLp("test4")
|
|
||||||
await conn.close()
|
|
||||||
rv2 = Relay.new(reservationTTL=initDuration(seconds=ttl),
|
|
||||||
limitDuration=ldur,
|
|
||||||
limitData=ldata)
|
|
||||||
rv2.setup(rel)
|
|
||||||
rel.mount(rv2)
|
|
||||||
dst.mount(proto)
|
|
||||||
|
|
||||||
await rel.start()
|
|
||||||
await src.start()
|
|
||||||
await dst.start()
|
|
||||||
|
|
||||||
let addrs = MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
|
|
||||||
$rel.peerInfo.peerId & "/p2p-circuit").get()
|
|
||||||
|
|
||||||
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
|
||||||
await dst.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
|
||||||
|
|
||||||
rsvp = await dstCl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
|
||||||
|
|
||||||
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
|
|
||||||
await conn.writeLp("test1")
|
|
||||||
check: "test2" == string.fromBytes(await conn.readLp(1024))
|
|
||||||
await conn.writeLp("test3")
|
|
||||||
check: "test4" == string.fromBytes(await conn.readLp(1024))
|
|
||||||
await allFutures(conn.close())
|
|
||||||
await allFutures(src.stop(), dst.stop(), rel.stop())
|
|
||||||
|
|
||||||
asyncTest "Connection duration exceeded":
|
|
||||||
ldur = 3
|
|
||||||
proto.handler = proc(conn: Connection, proto: string) {.async.} =
|
|
||||||
check "wanna sleep?" == string.fromBytes(await conn.readLp(1024))
|
|
||||||
await conn.writeLp("yeah!")
|
|
||||||
check "go!" == string.fromBytes(await conn.readLp(1024))
|
|
||||||
await sleepAsync(chronos.timer.seconds(ldur + 1))
|
|
||||||
await conn.writeLp("that was a cool power nap")
|
|
||||||
await conn.close()
|
|
||||||
rv2 = Relay.new(reservationTTL=initDuration(seconds=ttl),
|
|
||||||
limitDuration=ldur,
|
|
||||||
limitData=ldata)
|
|
||||||
rv2.setup(rel)
|
|
||||||
rel.mount(rv2)
|
|
||||||
dst.mount(proto)
|
|
||||||
|
|
||||||
await rel.start()
|
|
||||||
await src.start()
|
|
||||||
await dst.start()
|
|
||||||
|
|
||||||
let addrs = MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
|
|
||||||
$rel.peerInfo.peerId & "/p2p-circuit").get()
|
|
||||||
|
|
||||||
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
|
||||||
await dst.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
|
||||||
|
|
||||||
rsvp = await dstCl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
|
||||||
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
|
|
||||||
await conn.writeLp("wanna sleep?")
|
|
||||||
check: "yeah!" == string.fromBytes(await conn.readLp(1024))
|
|
||||||
await conn.writeLp("go!")
|
|
||||||
expect(LPStreamEOFError):
|
|
||||||
discard await conn.readLp(1024)
|
|
||||||
await allFutures(conn.close())
|
|
||||||
await allFutures(src.stop(), dst.stop(), rel.stop())
|
|
||||||
|
|
||||||
asyncTest "Connection data exceeded":
|
|
||||||
ldata = 1000
|
|
||||||
proto.handler = proc(conn: Connection, proto: string) {.async.} =
|
|
||||||
check "count me the better story you know" == string.fromBytes(await conn.readLp(1024))
|
|
||||||
await conn.writeLp("do you expect a lorem ipsum or...?")
|
|
||||||
check "surprise me!" == string.fromBytes(await conn.readLp(1024))
|
|
||||||
await conn.writeLp("""Call me Ishmael. Some years ago--never mind how long
|
|
||||||
precisely--having little or no money in my purse, and nothing
|
|
||||||
particular to interest me on shore, I thought I would sail about a
|
|
||||||
little and see the watery part of the world. It is a way I have of
|
|
||||||
driving off the spleen and regulating the circulation. Whenever I
|
|
||||||
find myself growing grim about the mouth; whenever it is a damp,
|
|
||||||
drizzly November in my soul; whenever I find myself involuntarily
|
|
||||||
pausing before coffin warehouses, and bringing up the rear of every
|
|
||||||
funeral I meet; and especially whenever my hypos get such an upper
|
|
||||||
hand of me, that it requires a strong moral principle to prevent me
|
|
||||||
from deliberately stepping into the street, and methodically knocking
|
|
||||||
people's hats off--then, I account it high time to get to sea as soon
|
|
||||||
as I can. This is my substitute for pistol and ball. With a
|
|
||||||
philosophical flourish Cato throws himself upon his sword; I quietly
|
|
||||||
take to the ship.""")
|
|
||||||
rv2 = Relay.new(reservationTTL=initDuration(seconds=ttl),
|
|
||||||
limitDuration=ldur,
|
|
||||||
limitData=ldata)
|
|
||||||
rv2.setup(rel)
|
|
||||||
rel.mount(rv2)
|
|
||||||
dst.mount(proto)
|
|
||||||
|
|
||||||
await rel.start()
|
|
||||||
await src.start()
|
|
||||||
await dst.start()
|
|
||||||
|
|
||||||
let addrs = MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
|
|
||||||
$rel.peerInfo.peerId & "/p2p-circuit").get()
|
|
||||||
|
|
||||||
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
|
||||||
await dst.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
|
||||||
|
|
||||||
rsvp = await dstCl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
|
||||||
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
|
|
||||||
await conn.writeLp("count me the better story you know")
|
|
||||||
check: "do you expect a lorem ipsum or...?" == string.fromBytes(await conn.readLp(1024))
|
|
||||||
await conn.writeLp("surprise me!")
|
|
||||||
expect(LPStreamEOFError):
|
|
||||||
discard await conn.readLp(1024)
|
|
||||||
await allFutures(conn.close())
|
|
||||||
await allFutures(src.stop(), dst.stop(), rel.stop())
|
|
||||||
|
|
||||||
asyncTest "Reservation ttl expire during connection":
|
|
||||||
ttl = 3
|
|
||||||
proto.handler = proc(conn: Connection, proto: string) {.async.} =
|
|
||||||
check: "test1" == string.fromBytes(await conn.readLp(1024))
|
|
||||||
await conn.writeLp("test2")
|
|
||||||
check: "test3" == string.fromBytes(await conn.readLp(1024))
|
|
||||||
await conn.writeLp("test4")
|
|
||||||
await conn.close()
|
|
||||||
rv2 = Relay.new(reservationTTL=initDuration(seconds=ttl),
|
|
||||||
limitDuration=ldur,
|
|
||||||
limitData=ldata)
|
|
||||||
rv2.setup(rel)
|
|
||||||
rel.mount(rv2)
|
|
||||||
dst.mount(proto)
|
|
||||||
|
|
||||||
await rel.start()
|
|
||||||
await src.start()
|
|
||||||
await dst.start()
|
|
||||||
|
|
||||||
let addrs = MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
|
|
||||||
$rel.peerInfo.peerId & "/p2p-circuit").get()
|
|
||||||
|
|
||||||
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
|
||||||
await dst.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
|
||||||
|
|
||||||
rsvp = await dstCl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
|
||||||
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
|
|
||||||
await conn.writeLp("test1")
|
|
||||||
check: "test2" == string.fromBytes(await conn.readLp(1024))
|
|
||||||
await conn.writeLp("test3")
|
|
||||||
check: "test4" == string.fromBytes(await conn.readLp(1024))
|
|
||||||
await src.disconnect(rel.peerInfo.peerId)
|
|
||||||
await sleepAsync(chronos.timer.seconds(ttl + 1))
|
|
||||||
|
|
||||||
expect(DialFailedError):
|
|
||||||
check: conn.atEof()
|
|
||||||
await conn.close()
|
|
||||||
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
|
||||||
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
|
|
||||||
await allFutures(conn.close())
|
|
||||||
await allFutures(src.stop(), dst.stop(), rel.stop())
|
|
||||||
|
|
||||||
asyncTest "Connection over relay":
|
|
||||||
# src => rel => rel2 => dst
|
|
||||||
# rel2 reserve rel
|
|
||||||
# dst reserve rel2
|
|
||||||
# src try to connect with dst
|
|
||||||
proto.handler = proc(conn: Connection, proto: string) {.async.} =
|
|
||||||
raise newException(CatchableError, "Should not be here")
|
|
||||||
let
|
|
||||||
rel2Cl = RelayClient.new(canHop = true)
|
|
||||||
rel2 = createSwitch(rel2Cl)
|
|
||||||
rv2 = Relay.new()
|
|
||||||
rv2.setup(rel)
|
|
||||||
rel.mount(rv2)
|
|
||||||
dst.mount(proto)
|
|
||||||
await rel.start()
|
|
||||||
await rel2.start()
|
|
||||||
await src.start()
|
|
||||||
await dst.start()
|
|
||||||
|
|
||||||
let
|
|
||||||
addrs = @[ MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
|
|
||||||
$rel.peerInfo.peerId & "/p2p-circuit/p2p/" &
|
|
||||||
$rel2.peerInfo.peerId & "/p2p/" &
|
|
||||||
$rel2.peerInfo.peerId & "/p2p-circuit").get() ]
|
|
||||||
|
|
||||||
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
|
||||||
await rel2.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
|
||||||
await dst.connect(rel2.peerInfo.peerId, rel2.peerInfo.addrs)
|
|
||||||
|
|
||||||
rsvp = await rel2Cl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
|
||||||
let rsvp2 = await dstCl.reserve(rel2.peerInfo.peerId, rel2.peerInfo.addrs)
|
|
||||||
|
|
||||||
expect(DialFailedError):
|
|
||||||
conn = await src.dial(dst.peerInfo.peerId, addrs, customProtoCodec)
|
|
||||||
await allFutures(conn.close())
|
|
||||||
await allFutures(src.stop(), dst.stop(), rel.stop(), rel2.stop())
|
|
||||||
|
|
||||||
asyncTest "Connection using ClientRelay":
|
|
||||||
var
|
var
|
||||||
protoABC = new LPProtocol
|
customProtoCodec {.threadvar.}: string
|
||||||
protoBCA = new LPProtocol
|
proto {.threadvar.}: LPProtocol
|
||||||
protoCAB = new LPProtocol
|
ttl {.threadvar.}: int
|
||||||
protoABC.codec = "/abctest"
|
ldur {.threadvar.}: uint32
|
||||||
protoABC.handler = proc(conn: Connection, proto: string) {.async.} =
|
ldata {.threadvar.}: uint64
|
||||||
check: "testABC1" == string.fromBytes(await conn.readLp(1024))
|
srcCl {.threadvar.}: RelayClient
|
||||||
await conn.writeLp("testABC2")
|
dstCl {.threadvar.}: RelayClient
|
||||||
check: "testABC3" == string.fromBytes(await conn.readLp(1024))
|
rv2 {.threadvar.}: Relay
|
||||||
await conn.writeLp("testABC4")
|
src {.threadvar.}: Switch
|
||||||
await conn.close()
|
dst {.threadvar.}: Switch
|
||||||
protoBCA.codec = "/bcatest"
|
rel {.threadvar.}: Switch
|
||||||
protoBCA.handler = proc(conn: Connection, proto: string) {.async.} =
|
rsvp {.threadvar.}: Rsvp
|
||||||
check: "testBCA1" == string.fromBytes(await conn.readLp(1024))
|
conn {.threadvar.}: Connection
|
||||||
await conn.writeLp("testBCA2")
|
|
||||||
check: "testBCA3" == string.fromBytes(await conn.readLp(1024))
|
|
||||||
await conn.writeLp("testBCA4")
|
|
||||||
await conn.close()
|
|
||||||
protoCAB.codec = "/cabtest"
|
|
||||||
protoCAB.handler = proc(conn: Connection, proto: string) {.async.} =
|
|
||||||
check: "testCAB1" == string.fromBytes(await conn.readLp(1024))
|
|
||||||
await conn.writeLp("testCAB2")
|
|
||||||
check: "testCAB3" == string.fromBytes(await conn.readLp(1024))
|
|
||||||
await conn.writeLp("testCAB4")
|
|
||||||
await conn.close()
|
|
||||||
|
|
||||||
let
|
asyncSetup:
|
||||||
clientA = RelayClient.new(canHop = true)
|
customProtoCodec = "/test"
|
||||||
clientB = RelayClient.new(canHop = true)
|
proto = new LPProtocol
|
||||||
clientC = RelayClient.new(canHop = true)
|
proto.codec = customProtoCodec
|
||||||
switchA = createSwitch(clientA)
|
ttl = 60
|
||||||
switchB = createSwitch(clientB)
|
ldur = 120
|
||||||
switchC = createSwitch(clientC)
|
ldata = 16384
|
||||||
|
srcCl = RelayClient.new()
|
||||||
|
dstCl = RelayClient.new()
|
||||||
|
src = createSwitch(srcCl, useYamux)
|
||||||
|
dst = createSwitch(dstCl, useYamux)
|
||||||
|
rel = createSwitch(nil, useYamux)
|
||||||
|
|
||||||
switchA.mount(protoBCA)
|
asyncTest "Connection succeed":
|
||||||
switchB.mount(protoCAB)
|
proto.handler = proc(conn: Connection, proto: string) {.async.} =
|
||||||
switchC.mount(protoABC)
|
check: "test1" == string.fromBytes(await conn.readLp(1024))
|
||||||
|
await conn.writeLp("test2")
|
||||||
|
check: "test3" == string.fromBytes(await conn.readLp(1024))
|
||||||
|
await conn.writeLp("test4")
|
||||||
|
await conn.close()
|
||||||
|
rv2 = Relay.new(reservationTTL=initDuration(seconds=ttl),
|
||||||
|
limitDuration=ldur,
|
||||||
|
limitData=ldata)
|
||||||
|
rv2.setup(rel)
|
||||||
|
rel.mount(rv2)
|
||||||
|
dst.mount(proto)
|
||||||
|
|
||||||
await switchA.start()
|
await rel.start()
|
||||||
await switchB.start()
|
await src.start()
|
||||||
await switchC.start()
|
await dst.start()
|
||||||
|
|
||||||
let
|
let addrs = MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
|
||||||
addrsABC = MultiAddress.init($switchB.peerInfo.addrs[0] & "/p2p/" &
|
$rel.peerInfo.peerId & "/p2p-circuit").get()
|
||||||
$switchB.peerInfo.peerId & "/p2p-circuit").get()
|
|
||||||
addrsBCA = MultiAddress.init($switchC.peerInfo.addrs[0] & "/p2p/" &
|
|
||||||
$switchC.peerInfo.peerId & "/p2p-circuit").get()
|
|
||||||
addrsCAB = MultiAddress.init($switchA.peerInfo.addrs[0] & "/p2p/" &
|
|
||||||
$switchA.peerInfo.peerId & "/p2p-circuit").get()
|
|
||||||
|
|
||||||
await switchA.connect(switchB.peerInfo.peerId, switchB.peerInfo.addrs)
|
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
||||||
await switchB.connect(switchC.peerInfo.peerId, switchC.peerInfo.addrs)
|
await dst.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
||||||
await switchC.connect(switchA.peerInfo.peerId, switchA.peerInfo.addrs)
|
|
||||||
let rsvpABC = await clientA.reserve(switchC.peerInfo.peerId, switchC.peerInfo.addrs)
|
|
||||||
let rsvpBCA = await clientB.reserve(switchA.peerInfo.peerId, switchA.peerInfo.addrs)
|
|
||||||
let rsvpCAB = await clientC.reserve(switchB.peerInfo.peerId, switchB.peerInfo.addrs)
|
|
||||||
let connABC = await switchA.dial(switchC.peerInfo.peerId, @[ addrsABC ], "/abctest")
|
|
||||||
let connBCA = await switchB.dial(switchA.peerInfo.peerId, @[ addrsBCA ], "/bcatest")
|
|
||||||
let connCAB = await switchC.dial(switchB.peerInfo.peerId, @[ addrsCAB ], "/cabtest")
|
|
||||||
|
|
||||||
await connABC.writeLp("testABC1")
|
rsvp = await dstCl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
||||||
await connBCA.writeLp("testBCA1")
|
|
||||||
await connCAB.writeLp("testCAB1")
|
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
|
||||||
check:
|
await conn.writeLp("test1")
|
||||||
"testABC2" == string.fromBytes(await connABC.readLp(1024))
|
check: "test2" == string.fromBytes(await conn.readLp(1024))
|
||||||
"testBCA2" == string.fromBytes(await connBCA.readLp(1024))
|
await conn.writeLp("test3")
|
||||||
"testCAB2" == string.fromBytes(await connCAB.readLp(1024))
|
check: "test4" == string.fromBytes(await conn.readLp(1024))
|
||||||
await connABC.writeLp("testABC3")
|
await allFutures(conn.close())
|
||||||
await connBCA.writeLp("testBCA3")
|
await allFutures(src.stop(), dst.stop(), rel.stop())
|
||||||
await connCAB.writeLp("testCAB3")
|
|
||||||
check:
|
asyncTest "Connection duration exceeded":
|
||||||
"testABC4" == string.fromBytes(await connABC.readLp(1024))
|
ldur = 3
|
||||||
"testBCA4" == string.fromBytes(await connBCA.readLp(1024))
|
proto.handler = proc(conn: Connection, proto: string) {.async.} =
|
||||||
"testCAB4" == string.fromBytes(await connCAB.readLp(1024))
|
check "wanna sleep?" == string.fromBytes(await conn.readLp(1024))
|
||||||
await allFutures(connABC.close(), connBCA.close(), connCAB.close())
|
await conn.writeLp("yeah!")
|
||||||
await allFutures(switchA.stop(), switchB.stop(), switchC.stop())
|
check "go!" == string.fromBytes(await conn.readLp(1024))
|
||||||
|
await sleepAsync(chronos.timer.seconds(ldur + 1))
|
||||||
|
await conn.writeLp("that was a cool power nap")
|
||||||
|
await conn.close()
|
||||||
|
rv2 = Relay.new(reservationTTL=initDuration(seconds=ttl),
|
||||||
|
limitDuration=ldur,
|
||||||
|
limitData=ldata)
|
||||||
|
rv2.setup(rel)
|
||||||
|
rel.mount(rv2)
|
||||||
|
dst.mount(proto)
|
||||||
|
|
||||||
|
await rel.start()
|
||||||
|
await src.start()
|
||||||
|
await dst.start()
|
||||||
|
|
||||||
|
let addrs = MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
|
||||||
|
$rel.peerInfo.peerId & "/p2p-circuit").get()
|
||||||
|
|
||||||
|
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
||||||
|
await dst.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
||||||
|
|
||||||
|
rsvp = await dstCl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
||||||
|
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
|
||||||
|
await conn.writeLp("wanna sleep?")
|
||||||
|
check: "yeah!" == string.fromBytes(await conn.readLp(1024))
|
||||||
|
await conn.writeLp("go!")
|
||||||
|
expect(LPStreamEOFError):
|
||||||
|
discard await conn.readLp(1024)
|
||||||
|
await allFutures(conn.close())
|
||||||
|
await allFutures(src.stop(), dst.stop(), rel.stop())
|
||||||
|
|
||||||
|
asyncTest "Connection data exceeded":
|
||||||
|
ldata = 1000
|
||||||
|
proto.handler = proc(conn: Connection, proto: string) {.async.} =
|
||||||
|
check "count me the better story you know" == string.fromBytes(await conn.readLp(1024))
|
||||||
|
await conn.writeLp("do you expect a lorem ipsum or...?")
|
||||||
|
check "surprise me!" == string.fromBytes(await conn.readLp(1024))
|
||||||
|
await conn.writeLp("""Call me Ishmael. Some years ago--never mind how long
|
||||||
|
precisely--having little or no money in my purse, and nothing
|
||||||
|
particular to interest me on shore, I thought I would sail about a
|
||||||
|
little and see the watery part of the world. It is a way I have of
|
||||||
|
driving off the spleen and regulating the circulation. Whenever I
|
||||||
|
find myself growing grim about the mouth; whenever it is a damp,
|
||||||
|
drizzly November in my soul; whenever I find myself involuntarily
|
||||||
|
pausing before coffin warehouses, and bringing up the rear of every
|
||||||
|
funeral I meet; and especially whenever my hypos get such an upper
|
||||||
|
hand of me, that it requires a strong moral principle to prevent me
|
||||||
|
from deliberately stepping into the street, and methodically knocking
|
||||||
|
people's hats off--then, I account it high time to get to sea as soon
|
||||||
|
as I can. This is my substitute for pistol and ball. With a
|
||||||
|
philosophical flourish Cato throws himself upon his sword; I quietly
|
||||||
|
take to the ship.""")
|
||||||
|
rv2 = Relay.new(reservationTTL=initDuration(seconds=ttl),
|
||||||
|
limitDuration=ldur,
|
||||||
|
limitData=ldata)
|
||||||
|
rv2.setup(rel)
|
||||||
|
rel.mount(rv2)
|
||||||
|
dst.mount(proto)
|
||||||
|
|
||||||
|
await rel.start()
|
||||||
|
await src.start()
|
||||||
|
await dst.start()
|
||||||
|
|
||||||
|
let addrs = MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
|
||||||
|
$rel.peerInfo.peerId & "/p2p-circuit").get()
|
||||||
|
|
||||||
|
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
||||||
|
await dst.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
||||||
|
|
||||||
|
rsvp = await dstCl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
||||||
|
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
|
||||||
|
await conn.writeLp("count me the better story you know")
|
||||||
|
check: "do you expect a lorem ipsum or...?" == string.fromBytes(await conn.readLp(1024))
|
||||||
|
await conn.writeLp("surprise me!")
|
||||||
|
expect(LPStreamEOFError):
|
||||||
|
discard await conn.readLp(1024)
|
||||||
|
await allFutures(conn.close())
|
||||||
|
await allFutures(src.stop(), dst.stop(), rel.stop())
|
||||||
|
|
||||||
|
asyncTest "Reservation ttl expire during connection":
|
||||||
|
ttl = 3
|
||||||
|
proto.handler = proc(conn: Connection, proto: string) {.async.} =
|
||||||
|
check: "test1" == string.fromBytes(await conn.readLp(1024))
|
||||||
|
await conn.writeLp("test2")
|
||||||
|
check: "test3" == string.fromBytes(await conn.readLp(1024))
|
||||||
|
await conn.writeLp("test4")
|
||||||
|
await conn.close()
|
||||||
|
rv2 = Relay.new(reservationTTL=initDuration(seconds=ttl),
|
||||||
|
limitDuration=ldur,
|
||||||
|
limitData=ldata)
|
||||||
|
rv2.setup(rel)
|
||||||
|
rel.mount(rv2)
|
||||||
|
dst.mount(proto)
|
||||||
|
|
||||||
|
await rel.start()
|
||||||
|
await src.start()
|
||||||
|
await dst.start()
|
||||||
|
|
||||||
|
let addrs = MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
|
||||||
|
$rel.peerInfo.peerId & "/p2p-circuit").get()
|
||||||
|
|
||||||
|
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
||||||
|
await dst.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
||||||
|
|
||||||
|
rsvp = await dstCl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
||||||
|
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
|
||||||
|
await conn.writeLp("test1")
|
||||||
|
check: "test2" == string.fromBytes(await conn.readLp(1024))
|
||||||
|
await conn.writeLp("test3")
|
||||||
|
check: "test4" == string.fromBytes(await conn.readLp(1024))
|
||||||
|
await src.disconnect(rel.peerInfo.peerId)
|
||||||
|
await sleepAsync(chronos.timer.seconds(ttl + 1))
|
||||||
|
|
||||||
|
expect(DialFailedError):
|
||||||
|
check: conn.atEof()
|
||||||
|
await conn.close()
|
||||||
|
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
||||||
|
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
|
||||||
|
await allFutures(conn.close())
|
||||||
|
await allFutures(src.stop(), dst.stop(), rel.stop())
|
||||||
|
|
||||||
|
asyncTest "Connection over relay":
|
||||||
|
# src => rel => rel2 => dst
|
||||||
|
# rel2 reserve rel
|
||||||
|
# dst reserve rel2
|
||||||
|
# src try to connect with dst
|
||||||
|
proto.handler = proc(conn: Connection, proto: string) {.async.} =
|
||||||
|
raise newException(CatchableError, "Should not be here")
|
||||||
|
let
|
||||||
|
rel2Cl = RelayClient.new(canHop = true)
|
||||||
|
rel2 = createSwitch(rel2Cl, useYamux)
|
||||||
|
rv2 = Relay.new()
|
||||||
|
rv2.setup(rel)
|
||||||
|
rel.mount(rv2)
|
||||||
|
dst.mount(proto)
|
||||||
|
await rel.start()
|
||||||
|
await rel2.start()
|
||||||
|
await src.start()
|
||||||
|
await dst.start()
|
||||||
|
|
||||||
|
let
|
||||||
|
addrs = @[ MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
|
||||||
|
$rel.peerInfo.peerId & "/p2p-circuit/p2p/" &
|
||||||
|
$rel2.peerInfo.peerId & "/p2p/" &
|
||||||
|
$rel2.peerInfo.peerId & "/p2p-circuit").get() ]
|
||||||
|
|
||||||
|
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
||||||
|
await rel2.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
||||||
|
await dst.connect(rel2.peerInfo.peerId, rel2.peerInfo.addrs)
|
||||||
|
|
||||||
|
rsvp = await rel2Cl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
|
||||||
|
let rsvp2 = await dstCl.reserve(rel2.peerInfo.peerId, rel2.peerInfo.addrs)
|
||||||
|
|
||||||
|
expect(DialFailedError):
|
||||||
|
conn = await src.dial(dst.peerInfo.peerId, addrs, customProtoCodec)
|
||||||
|
if not conn.isNil():
|
||||||
|
await allFutures(conn.close())
|
||||||
|
await allFutures(src.stop(), dst.stop(), rel.stop(), rel2.stop())
|
||||||
|
|
||||||
|
asyncTest "Connection using ClientRelay":
|
||||||
|
var
|
||||||
|
protoABC = new LPProtocol
|
||||||
|
protoBCA = new LPProtocol
|
||||||
|
protoCAB = new LPProtocol
|
||||||
|
protoABC.codec = "/abctest"
|
||||||
|
protoABC.handler = proc(conn: Connection, proto: string) {.async.} =
|
||||||
|
check: "testABC1" == string.fromBytes(await conn.readLp(1024))
|
||||||
|
await conn.writeLp("testABC2")
|
||||||
|
check: "testABC3" == string.fromBytes(await conn.readLp(1024))
|
||||||
|
await conn.writeLp("testABC4")
|
||||||
|
await conn.close()
|
||||||
|
protoBCA.codec = "/bcatest"
|
||||||
|
protoBCA.handler = proc(conn: Connection, proto: string) {.async.} =
|
||||||
|
check: "testBCA1" == string.fromBytes(await conn.readLp(1024))
|
||||||
|
await conn.writeLp("testBCA2")
|
||||||
|
check: "testBCA3" == string.fromBytes(await conn.readLp(1024))
|
||||||
|
await conn.writeLp("testBCA4")
|
||||||
|
await conn.close()
|
||||||
|
protoCAB.codec = "/cabtest"
|
||||||
|
protoCAB.handler = proc(conn: Connection, proto: string) {.async.} =
|
||||||
|
check: "testCAB1" == string.fromBytes(await conn.readLp(1024))
|
||||||
|
await conn.writeLp("testCAB2")
|
||||||
|
check: "testCAB3" == string.fromBytes(await conn.readLp(1024))
|
||||||
|
await conn.writeLp("testCAB4")
|
||||||
|
await conn.close()
|
||||||
|
|
||||||
|
let
|
||||||
|
clientA = RelayClient.new(canHop = true)
|
||||||
|
clientB = RelayClient.new(canHop = true)
|
||||||
|
clientC = RelayClient.new(canHop = true)
|
||||||
|
switchA = createSwitch(clientA, useYamux)
|
||||||
|
switchB = createSwitch(clientB, useYamux)
|
||||||
|
switchC = createSwitch(clientC, useYamux)
|
||||||
|
|
||||||
|
switchA.mount(protoBCA)
|
||||||
|
switchB.mount(protoCAB)
|
||||||
|
switchC.mount(protoABC)
|
||||||
|
|
||||||
|
await switchA.start()
|
||||||
|
await switchB.start()
|
||||||
|
await switchC.start()
|
||||||
|
|
||||||
|
let
|
||||||
|
addrsABC = MultiAddress.init($switchB.peerInfo.addrs[0] & "/p2p/" &
|
||||||
|
$switchB.peerInfo.peerId & "/p2p-circuit").get()
|
||||||
|
addrsBCA = MultiAddress.init($switchC.peerInfo.addrs[0] & "/p2p/" &
|
||||||
|
$switchC.peerInfo.peerId & "/p2p-circuit").get()
|
||||||
|
addrsCAB = MultiAddress.init($switchA.peerInfo.addrs[0] & "/p2p/" &
|
||||||
|
$switchA.peerInfo.peerId & "/p2p-circuit").get()
|
||||||
|
|
||||||
|
await switchA.connect(switchB.peerInfo.peerId, switchB.peerInfo.addrs)
|
||||||
|
await switchB.connect(switchC.peerInfo.peerId, switchC.peerInfo.addrs)
|
||||||
|
await switchC.connect(switchA.peerInfo.peerId, switchA.peerInfo.addrs)
|
||||||
|
let rsvpABC = await clientA.reserve(switchC.peerInfo.peerId, switchC.peerInfo.addrs)
|
||||||
|
let rsvpBCA = await clientB.reserve(switchA.peerInfo.peerId, switchA.peerInfo.addrs)
|
||||||
|
let rsvpCAB = await clientC.reserve(switchB.peerInfo.peerId, switchB.peerInfo.addrs)
|
||||||
|
let connABC = await switchA.dial(switchC.peerInfo.peerId, @[ addrsABC ], "/abctest")
|
||||||
|
let connBCA = await switchB.dial(switchA.peerInfo.peerId, @[ addrsBCA ], "/bcatest")
|
||||||
|
let connCAB = await switchC.dial(switchB.peerInfo.peerId, @[ addrsCAB ], "/cabtest")
|
||||||
|
|
||||||
|
await connABC.writeLp("testABC1")
|
||||||
|
await connBCA.writeLp("testBCA1")
|
||||||
|
await connCAB.writeLp("testCAB1")
|
||||||
|
check:
|
||||||
|
"testABC2" == string.fromBytes(await connABC.readLp(1024))
|
||||||
|
"testBCA2" == string.fromBytes(await connBCA.readLp(1024))
|
||||||
|
"testCAB2" == string.fromBytes(await connCAB.readLp(1024))
|
||||||
|
await connABC.writeLp("testABC3")
|
||||||
|
await connBCA.writeLp("testBCA3")
|
||||||
|
await connCAB.writeLp("testCAB3")
|
||||||
|
check:
|
||||||
|
"testABC4" == string.fromBytes(await connABC.readLp(1024))
|
||||||
|
"testBCA4" == string.fromBytes(await connBCA.readLp(1024))
|
||||||
|
"testCAB4" == string.fromBytes(await connCAB.readLp(1024))
|
||||||
|
await allFutures(connABC.close(), connBCA.close(), connCAB.close())
|
||||||
|
await allFutures(switchA.stop(), switchB.stop(), switchC.stop())
|
||||||
|
@ -46,7 +46,7 @@ suite "Switch":
|
|||||||
|
|
||||||
asyncTest "e2e use switch dial proto string":
|
asyncTest "e2e use switch dial proto string":
|
||||||
let done = newFuture[void]()
|
let done = newFuture[void]()
|
||||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
proc handle(conn: Connection, proto: string) {.async.} =
|
||||||
try:
|
try:
|
||||||
let msg = string.fromBytes(await conn.readLp(1024))
|
let msg = string.fromBytes(await conn.readLp(1024))
|
||||||
check "Hello!" == msg
|
check "Hello!" == msg
|
||||||
@ -86,7 +86,7 @@ suite "Switch":
|
|||||||
|
|
||||||
asyncTest "e2e use switch dial proto string with custom matcher":
|
asyncTest "e2e use switch dial proto string with custom matcher":
|
||||||
let done = newFuture[void]()
|
let done = newFuture[void]()
|
||||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
proc handle(conn: Connection, proto: string) {.async.} =
|
||||||
try:
|
try:
|
||||||
let msg = string.fromBytes(await conn.readLp(1024))
|
let msg = string.fromBytes(await conn.readLp(1024))
|
||||||
check "Hello!" == msg
|
check "Hello!" == msg
|
||||||
@ -131,7 +131,7 @@ suite "Switch":
|
|||||||
|
|
||||||
asyncTest "e2e should not leak bufferstreams and connections on channel close":
|
asyncTest "e2e should not leak bufferstreams and connections on channel close":
|
||||||
let done = newFuture[void]()
|
let done = newFuture[void]()
|
||||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
proc handle(conn: Connection, proto: string) {.async.} =
|
||||||
try:
|
try:
|
||||||
let msg = string.fromBytes(await conn.readLp(1024))
|
let msg = string.fromBytes(await conn.readLp(1024))
|
||||||
check "Hello!" == msg
|
check "Hello!" == msg
|
||||||
@ -171,7 +171,7 @@ suite "Switch":
|
|||||||
check not switch2.isConnected(switch1.peerInfo.peerId)
|
check not switch2.isConnected(switch1.peerInfo.peerId)
|
||||||
|
|
||||||
asyncTest "e2e use connect then dial":
|
asyncTest "e2e use connect then dial":
|
||||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
proc handle(conn: Connection, proto: string) {.async.} =
|
||||||
try:
|
try:
|
||||||
let msg = string.fromBytes(await conn.readLp(1024))
|
let msg = string.fromBytes(await conn.readLp(1024))
|
||||||
check "Hello!" == msg
|
check "Hello!" == msg
|
||||||
@ -305,7 +305,7 @@ suite "Switch":
|
|||||||
|
|
||||||
var step = 0
|
var step = 0
|
||||||
var kinds: set[ConnEventKind]
|
var kinds: set[ConnEventKind]
|
||||||
proc hook(peerId: PeerId, event: ConnEvent) {.async, gcsafe.} =
|
proc hook(peerId: PeerId, event: ConnEvent) {.async.} =
|
||||||
kinds = kinds + {event.kind}
|
kinds = kinds + {event.kind}
|
||||||
case step:
|
case step:
|
||||||
of 0:
|
of 0:
|
||||||
@ -357,7 +357,7 @@ suite "Switch":
|
|||||||
|
|
||||||
var step = 0
|
var step = 0
|
||||||
var kinds: set[ConnEventKind]
|
var kinds: set[ConnEventKind]
|
||||||
proc hook(peerId: PeerId, event: ConnEvent) {.async, gcsafe.} =
|
proc hook(peerId: PeerId, event: ConnEvent) {.async.} =
|
||||||
kinds = kinds + {event.kind}
|
kinds = kinds + {event.kind}
|
||||||
case step:
|
case step:
|
||||||
of 0:
|
of 0:
|
||||||
@ -409,7 +409,7 @@ suite "Switch":
|
|||||||
|
|
||||||
var step = 0
|
var step = 0
|
||||||
var kinds: set[PeerEventKind]
|
var kinds: set[PeerEventKind]
|
||||||
proc handler(peerId: PeerId, event: PeerEvent) {.async, gcsafe.} =
|
proc handler(peerId: PeerId, event: PeerEvent) {.async.} =
|
||||||
kinds = kinds + {event.kind}
|
kinds = kinds + {event.kind}
|
||||||
case step:
|
case step:
|
||||||
of 0:
|
of 0:
|
||||||
@ -460,7 +460,7 @@ suite "Switch":
|
|||||||
|
|
||||||
var step = 0
|
var step = 0
|
||||||
var kinds: set[PeerEventKind]
|
var kinds: set[PeerEventKind]
|
||||||
proc handler(peerId: PeerId, event: PeerEvent) {.async, gcsafe.} =
|
proc handler(peerId: PeerId, event: PeerEvent) {.async.} =
|
||||||
kinds = kinds + {event.kind}
|
kinds = kinds + {event.kind}
|
||||||
case step:
|
case step:
|
||||||
of 0:
|
of 0:
|
||||||
@ -521,7 +521,7 @@ suite "Switch":
|
|||||||
|
|
||||||
var step = 0
|
var step = 0
|
||||||
var kinds: set[PeerEventKind]
|
var kinds: set[PeerEventKind]
|
||||||
proc handler(peerId: PeerId, event: PeerEvent) {.async, gcsafe.} =
|
proc handler(peerId: PeerId, event: PeerEvent) {.async.} =
|
||||||
kinds = kinds + {event.kind}
|
kinds = kinds + {event.kind}
|
||||||
case step:
|
case step:
|
||||||
of 0:
|
of 0:
|
||||||
@ -581,7 +581,7 @@ suite "Switch":
|
|||||||
var switches: seq[Switch]
|
var switches: seq[Switch]
|
||||||
var done = newFuture[void]()
|
var done = newFuture[void]()
|
||||||
var onConnect: Future[void]
|
var onConnect: Future[void]
|
||||||
proc hook(peerId: PeerId, event: ConnEvent) {.async, gcsafe.} =
|
proc hook(peerId: PeerId, event: ConnEvent) {.async.} =
|
||||||
case event.kind:
|
case event.kind:
|
||||||
of ConnEventKind.Connected:
|
of ConnEventKind.Connected:
|
||||||
await onConnect
|
await onConnect
|
||||||
@ -619,7 +619,7 @@ suite "Switch":
|
|||||||
var switches: seq[Switch]
|
var switches: seq[Switch]
|
||||||
var done = newFuture[void]()
|
var done = newFuture[void]()
|
||||||
var onConnect: Future[void]
|
var onConnect: Future[void]
|
||||||
proc hook(peerId2: PeerId, event: ConnEvent) {.async, gcsafe.} =
|
proc hook(peerId2: PeerId, event: ConnEvent) {.async.} =
|
||||||
case event.kind:
|
case event.kind:
|
||||||
of ConnEventKind.Connected:
|
of ConnEventKind.Connected:
|
||||||
if conns == 5:
|
if conns == 5:
|
||||||
@ -662,7 +662,7 @@ suite "Switch":
|
|||||||
let transport = TcpTransport.new(upgrade = Upgrade())
|
let transport = TcpTransport.new(upgrade = Upgrade())
|
||||||
await transport.start(ma)
|
await transport.start(ma)
|
||||||
|
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
let conn = await transport.accept()
|
let conn = await transport.accept()
|
||||||
await conn.closeWithEOF()
|
await conn.closeWithEOF()
|
||||||
|
|
||||||
@ -686,7 +686,7 @@ suite "Switch":
|
|||||||
switch.stop())
|
switch.stop())
|
||||||
|
|
||||||
asyncTest "e2e calling closeWithEOF on the same stream should not assert":
|
asyncTest "e2e calling closeWithEOF on the same stream should not assert":
|
||||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
proc handle(conn: Connection, proto: string) {.async.} =
|
||||||
discard await conn.readLp(100)
|
discard await conn.readLp(100)
|
||||||
|
|
||||||
let testProto = new TestProto
|
let testProto = new TestProto
|
||||||
@ -832,7 +832,7 @@ suite "Switch":
|
|||||||
|
|
||||||
asyncTest "e2e peer store":
|
asyncTest "e2e peer store":
|
||||||
let done = newFuture[void]()
|
let done = newFuture[void]()
|
||||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
proc handle(conn: Connection, proto: string) {.async.} =
|
||||||
try:
|
try:
|
||||||
let msg = string.fromBytes(await conn.readLp(1024))
|
let msg = string.fromBytes(await conn.readLp(1024))
|
||||||
check "Hello!" == msg
|
check "Hello!" == msg
|
||||||
@ -882,7 +882,7 @@ suite "Switch":
|
|||||||
# this randomly locks the Windows CI job
|
# this randomly locks the Windows CI job
|
||||||
skip()
|
skip()
|
||||||
return
|
return
|
||||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
proc handle(conn: Connection, proto: string) {.async.} =
|
||||||
try:
|
try:
|
||||||
let msg = string.fromBytes(await conn.readLp(1024))
|
let msg = string.fromBytes(await conn.readLp(1024))
|
||||||
check "Hello!" == msg
|
check "Hello!" == msg
|
||||||
@ -1019,7 +1019,7 @@ suite "Switch":
|
|||||||
await srcTcpSwitch.stop()
|
await srcTcpSwitch.stop()
|
||||||
|
|
||||||
asyncTest "mount unstarted protocol":
|
asyncTest "mount unstarted protocol":
|
||||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
proc handle(conn: Connection, proto: string) {.async.} =
|
||||||
check "test123" == string.fromBytes(await conn.readLp(1024))
|
check "test123" == string.fromBytes(await conn.readLp(1024))
|
||||||
await conn.writeLp("test456")
|
await conn.writeLp("test456")
|
||||||
await conn.close()
|
await conn.close()
|
||||||
|
@ -30,7 +30,7 @@ suite "TCP transport":
|
|||||||
let transport: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
asyncSpawn transport.start(ma)
|
asyncSpawn transport.start(ma)
|
||||||
|
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
let conn = await transport.accept()
|
let conn = await transport.accept()
|
||||||
await conn.write("Hello!")
|
await conn.write("Hello!")
|
||||||
await conn.close()
|
await conn.close()
|
||||||
@ -52,7 +52,7 @@ suite "TCP transport":
|
|||||||
let transport: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
let transport: TcpTransport = TcpTransport.new(upgrade = Upgrade())
|
||||||
asyncSpawn transport.start(ma)
|
asyncSpawn transport.start(ma)
|
||||||
|
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
var msg = newSeq[byte](6)
|
var msg = newSeq[byte](6)
|
||||||
let conn = await transport.accept()
|
let conn = await transport.accept()
|
||||||
await conn.readExactly(addr msg[0], 6)
|
await conn.readExactly(addr msg[0], 6)
|
||||||
@ -73,7 +73,7 @@ suite "TCP transport":
|
|||||||
let address = initTAddress("0.0.0.0:0")
|
let address = initTAddress("0.0.0.0:0")
|
||||||
let handlerWait = newFuture[void]()
|
let handlerWait = newFuture[void]()
|
||||||
proc serveClient(server: StreamServer,
|
proc serveClient(server: StreamServer,
|
||||||
transp: StreamTransport) {.async, gcsafe.} =
|
transp: StreamTransport) {.async.} =
|
||||||
var wstream = newAsyncStreamWriter(transp)
|
var wstream = newAsyncStreamWriter(transp)
|
||||||
await wstream.write("Hello!")
|
await wstream.write("Hello!")
|
||||||
await wstream.finish()
|
await wstream.finish()
|
||||||
@ -106,7 +106,7 @@ suite "TCP transport":
|
|||||||
let address = initTAddress("0.0.0.0:0")
|
let address = initTAddress("0.0.0.0:0")
|
||||||
let handlerWait = newFuture[void]()
|
let handlerWait = newFuture[void]()
|
||||||
proc serveClient(server: StreamServer,
|
proc serveClient(server: StreamServer,
|
||||||
transp: StreamTransport) {.async, gcsafe.} =
|
transp: StreamTransport) {.async.} =
|
||||||
var rstream = newAsyncStreamReader(transp)
|
var rstream = newAsyncStreamReader(transp)
|
||||||
let msg = await rstream.read(6)
|
let msg = await rstream.read(6)
|
||||||
check string.fromBytes(msg) == "Hello!"
|
check string.fromBytes(msg) == "Hello!"
|
||||||
@ -179,7 +179,7 @@ suite "TCP transport":
|
|||||||
let transport: TcpTransport = TcpTransport.new(upgrade = Upgrade(), connectionsTimeout=1.milliseconds)
|
let transport: TcpTransport = TcpTransport.new(upgrade = Upgrade(), connectionsTimeout=1.milliseconds)
|
||||||
asyncSpawn transport.start(ma)
|
asyncSpawn transport.start(ma)
|
||||||
|
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
let conn = await transport.accept()
|
let conn = await transport.accept()
|
||||||
await conn.join()
|
await conn.join()
|
||||||
|
|
||||||
|
@ -56,7 +56,7 @@ suite "Tor transport":
|
|||||||
check string.fromBytes(resp) == "server"
|
check string.fromBytes(resp) == "server"
|
||||||
await client.stop()
|
await client.stop()
|
||||||
|
|
||||||
proc serverAcceptHandler() {.async, gcsafe.} =
|
proc serverAcceptHandler() {.async.} =
|
||||||
let conn = await server.accept()
|
let conn = await server.accept()
|
||||||
var resp: array[6, byte]
|
var resp: array[6, byte]
|
||||||
await conn.readExactly(addr resp, 6)
|
await conn.readExactly(addr resp, 6)
|
||||||
@ -87,7 +87,7 @@ suite "Tor transport":
|
|||||||
proc new(T: typedesc[TestProto]): T =
|
proc new(T: typedesc[TestProto]): T =
|
||||||
|
|
||||||
# every incoming connections will be in handled in this closure
|
# every incoming connections will be in handled in this closure
|
||||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
proc handle(conn: Connection, proto: string) {.async.} =
|
||||||
|
|
||||||
var resp: array[6, byte]
|
var resp: array[6, byte]
|
||||||
await conn.readExactly(addr resp, 6)
|
await conn.readExactly(addr resp, 6)
|
||||||
|
@ -89,7 +89,7 @@ suite "WebSocket transport":
|
|||||||
const correctPattern = mapAnd(TCP, mapEq("wss"))
|
const correctPattern = mapAnd(TCP, mapEq("wss"))
|
||||||
await transport1.start(ma)
|
await transport1.start(ma)
|
||||||
check correctPattern.match(transport1.addrs[0])
|
check correctPattern.match(transport1.addrs[0])
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async.} =
|
||||||
while true:
|
while true:
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
if not isNil(conn):
|
if not isNil(conn):
|
||||||
|
@ -22,11 +22,12 @@ suite "Yamux":
|
|||||||
teardown:
|
teardown:
|
||||||
checkTrackers()
|
checkTrackers()
|
||||||
|
|
||||||
template mSetup {.inject.} =
|
template mSetup(ws: int = YamuxDefaultWindowSize) {.inject.} =
|
||||||
#TODO in a template to avoid threadvar
|
#TODO in a template to avoid threadvar
|
||||||
let
|
let
|
||||||
(conna {.inject.}, connb {.inject.}) = bridgedConnections()
|
(conna {.inject.}, connb {.inject.}) = bridgedConnections()
|
||||||
(yamuxa {.inject.}, yamuxb {.inject.}) = (Yamux.new(conna), Yamux.new(connb))
|
yamuxa {.inject.} = Yamux.new(conna, windowSize = ws)
|
||||||
|
yamuxb {.inject.} = Yamux.new(connb, windowSize = ws)
|
||||||
(handlera, handlerb) = (yamuxa.handle(), yamuxb.handle())
|
(handlera, handlerb) = (yamuxa.handle(), yamuxb.handle())
|
||||||
|
|
||||||
defer:
|
defer:
|
||||||
@ -179,6 +180,63 @@ suite "Yamux":
|
|||||||
writerBlocker.complete()
|
writerBlocker.complete()
|
||||||
await streamA.close()
|
await streamA.close()
|
||||||
|
|
||||||
|
asyncTest "Increase window size":
|
||||||
|
mSetup(512000)
|
||||||
|
let readerBlocker = newFuture[void]()
|
||||||
|
yamuxb.streamHandler = proc(conn: Connection) {.async.} =
|
||||||
|
await readerBlocker
|
||||||
|
var buffer: array[260000, byte]
|
||||||
|
discard await conn.readOnce(addr buffer[0], 260000)
|
||||||
|
await conn.close()
|
||||||
|
|
||||||
|
let streamA = await yamuxa.newStream()
|
||||||
|
check streamA == yamuxa.getStreams()[0]
|
||||||
|
|
||||||
|
await wait(streamA.write(newSeq[byte](512000)), 1.seconds) # shouldn't block
|
||||||
|
|
||||||
|
let secondWriter = streamA.write(newSeq[byte](10000))
|
||||||
|
await sleepAsync(10.milliseconds)
|
||||||
|
check: not secondWriter.finished()
|
||||||
|
|
||||||
|
readerBlocker.complete()
|
||||||
|
await wait(secondWriter, 1.seconds)
|
||||||
|
|
||||||
|
await streamA.close()
|
||||||
|
|
||||||
|
asyncTest "Reduce window size":
|
||||||
|
mSetup(64000)
|
||||||
|
let readerBlocker1 = newFuture[void]()
|
||||||
|
let readerBlocker2 = newFuture[void]()
|
||||||
|
yamuxb.streamHandler = proc(conn: Connection) {.async.} =
|
||||||
|
await readerBlocker1
|
||||||
|
var buffer: array[256000, byte]
|
||||||
|
# For the first roundtrip, the send window size is assumed to be 256k
|
||||||
|
discard await conn.readOnce(addr buffer[0], 256000)
|
||||||
|
await readerBlocker2
|
||||||
|
discard await conn.readOnce(addr buffer[0], 40000)
|
||||||
|
|
||||||
|
await conn.close()
|
||||||
|
|
||||||
|
let streamA = await yamuxa.newStream()
|
||||||
|
check streamA == yamuxa.getStreams()[0]
|
||||||
|
|
||||||
|
await wait(streamA.write(newSeq[byte](256000)), 1.seconds) # shouldn't block
|
||||||
|
|
||||||
|
let secondWriter = streamA.write(newSeq[byte](64000))
|
||||||
|
await sleepAsync(10.milliseconds)
|
||||||
|
check: not secondWriter.finished()
|
||||||
|
|
||||||
|
readerBlocker1.complete()
|
||||||
|
await wait(secondWriter, 1.seconds)
|
||||||
|
|
||||||
|
let thirdWriter = streamA.write(newSeq[byte](10))
|
||||||
|
await sleepAsync(10.milliseconds)
|
||||||
|
check: not thirdWriter.finished()
|
||||||
|
|
||||||
|
readerBlocker2.complete()
|
||||||
|
await wait(thirdWriter, 1.seconds)
|
||||||
|
await streamA.close()
|
||||||
|
|
||||||
suite "Exception testing":
|
suite "Exception testing":
|
||||||
asyncTest "Local & Remote close":
|
asyncTest "Local & Remote close":
|
||||||
mSetup()
|
mSetup()
|
||||||
|
Loading…
x
Reference in New Issue
Block a user