Merge branch 'master' into quic
# Conflicts: # libp2p.nimble
This commit is contained in:
commit
6681116716
|
@ -61,7 +61,7 @@ runs:
|
|||
- name: Restore Nim DLLs dependencies (Windows) from cache
|
||||
if: inputs.os == 'Windows'
|
||||
id: windows-dlls-cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: external/dlls
|
||||
key: 'dlls'
|
||||
|
@ -114,7 +114,7 @@ runs:
|
|||
|
||||
- name: Restore Nim from cache
|
||||
id: nim-cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: '${{ github.workspace }}/nim'
|
||||
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_branch }}-cache-${{ env.cache_nonce }}
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
name: CI
|
||||
name: Continuous Integration
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- unstable
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
|
@ -12,60 +12,67 @@ concurrency:
|
|||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
test:
|
||||
timeout-minutes: 90
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target:
|
||||
platform:
|
||||
- os: linux
|
||||
cpu: amd64
|
||||
- os: linux
|
||||
cpu: i386
|
||||
- os: linux-gcc-14
|
||||
cpu: amd64
|
||||
- os: macos
|
||||
cpu: amd64
|
||||
- os: windows
|
||||
cpu: amd64
|
||||
#- os: windows
|
||||
#cpu: i386
|
||||
branch: [version-1-6]
|
||||
nim:
|
||||
- branch: version-1-6
|
||||
memory_management: refc
|
||||
- branch: version-2-0
|
||||
memory_management: refc
|
||||
include:
|
||||
- target:
|
||||
- platform:
|
||||
os: linux
|
||||
builder: ubuntu-20.04
|
||||
builder: ubuntu-22.04
|
||||
shell: bash
|
||||
- target:
|
||||
- platform:
|
||||
os: linux-gcc-14
|
||||
builder: ubuntu-24.04
|
||||
shell: bash
|
||||
- platform:
|
||||
os: macos
|
||||
builder: macos-12
|
||||
builder: macos-13
|
||||
shell: bash
|
||||
- target:
|
||||
- platform:
|
||||
os: windows
|
||||
builder: windows-2019
|
||||
builder: windows-2022
|
||||
shell: msys2 {0}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.shell }}
|
||||
|
||||
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
|
||||
name: '${{ matrix.platform.os }}-${{ matrix.platform.cpu }} (Nim ${{ matrix.nim.branch }})'
|
||||
runs-on: ${{ matrix.builder }}
|
||||
continue-on-error: ${{ matrix.branch == 'devel' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- name: Setup Nim
|
||||
uses: "./.github/actions/install_nim"
|
||||
with:
|
||||
os: ${{ matrix.target.os }}
|
||||
cpu: ${{ matrix.target.cpu }}
|
||||
os: ${{ matrix.platform.os }}
|
||||
cpu: ${{ matrix.platform.cpu }}
|
||||
shell: ${{ matrix.shell }}
|
||||
nim_branch: ${{ matrix.branch }}
|
||||
nim_branch: ${{ matrix.nim.branch }}
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '~1.15.5'
|
||||
|
||||
|
@ -78,37 +85,28 @@ jobs:
|
|||
uses: actions/cache@v3
|
||||
with:
|
||||
path: nimbledeps
|
||||
key: nimbledeps-${{ hashFiles('.pinned') }}
|
||||
# Using nim.branch as a simple way to differentiate between nimble using the "pkgs" or "pkgs2" directories.
|
||||
# The change happened on Nimble v0.14.0.
|
||||
key: nimbledeps-${{ matrix.nim.branch }}-${{ hashFiles('.pinned') }} # hashFiles returns a different value on windows
|
||||
|
||||
- name: Install deps
|
||||
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
nimble install_pinned
|
||||
|
||||
- name: Use gcc 14
|
||||
if : ${{ matrix.platform.os == 'linux-gcc-14'}}
|
||||
run: |
|
||||
# Add GCC-14 to alternatives
|
||||
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-14 14
|
||||
|
||||
# Set GCC-14 as the default
|
||||
sudo update-alternatives --set gcc /usr/bin/gcc-14
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
gcc --version
|
||||
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
|
||||
nimble test
|
||||
|
||||
lint:
|
||||
name: "Lint"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2 # In PR, has extra merge commit: ^1 = PR, ^2 = base
|
||||
|
||||
- name: Check nph formatting
|
||||
# Pin nph to a specific version to avoid sudden style differences.
|
||||
# Updating nph version should be accompanied with running the new
|
||||
# version on the fluffy directory.
|
||||
run: |
|
||||
VERSION="v0.5.1"
|
||||
ARCHIVE="nph-linux_x64.tar.gz"
|
||||
curl -L "https://github.com/arnetheduck/nph/releases/download/${VERSION}/${ARCHIVE}" -o ${ARCHIVE}
|
||||
tar -xzf ${ARCHIVE}
|
||||
shopt -s extglob # Enable extended globbing
|
||||
./nph examples libp2p tests tools *.@(nim|nims|nimble)
|
||||
git diff --exit-code
|
||||
|
|
|
@ -1,11 +1,10 @@
|
|||
name: nim-libp2p codecov builds
|
||||
name: Coverage
|
||||
|
||||
on:
|
||||
#On push to common branches, this computes the "bases stats" for PRs
|
||||
# On push to common branches, this computes the coverage that PRs will use for diff
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- unstable
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
|
@ -14,12 +13,13 @@ concurrency:
|
|||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
Coverage:
|
||||
runs-on: ubuntu-20.04
|
||||
codecov:
|
||||
name: Run coverage and upload to codecov
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
CICOV: YES
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
@ -32,7 +32,7 @@ jobs:
|
|||
|
||||
- name: Restore deps from cache
|
||||
id: deps-cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: nimbledeps
|
||||
key: nimbledeps-${{ hashFiles('.pinned') }}
|
||||
|
@ -42,24 +42,28 @@ jobs:
|
|||
run: |
|
||||
nimble install_pinned
|
||||
|
||||
- name: Run
|
||||
- name: Setup coverage
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y lcov build-essential git curl
|
||||
mkdir coverage
|
||||
|
||||
- name: Run test suite with coverage flags
|
||||
run: |
|
||||
export NIMFLAGS="--lineDir:on --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage"
|
||||
nimble testnative
|
||||
nimble testpubsub
|
||||
nimble testfilter
|
||||
|
||||
- name: Run coverage
|
||||
run: |
|
||||
find nimcache -name *.c -delete
|
||||
lcov --capture --directory nimcache --output-file coverage/coverage.info
|
||||
shopt -s globstar
|
||||
ls `pwd`/libp2p/{*,**/*}.nim
|
||||
lcov --extract coverage/coverage.info `pwd`/libp2p/{*,**/*}.nim --output-file coverage/coverage.f.info
|
||||
genhtml coverage/coverage.f.info --output-directory coverage/output
|
||||
bash <(curl -s https://codecov.io/bash) -f coverage/coverage.f.info || echo "Codecov did not collect coverage reports"
|
||||
|
||||
#- uses: actions/upload-artifact@master
|
||||
# with:
|
||||
# name: coverage
|
||||
# path: coverage
|
||||
- name: Upload coverage to codecov
|
||||
run: |
|
||||
bash <(curl -s https://codecov.io/bash) -f coverage/coverage.f.info || echo "Codecov did not collect coverage reports"
|
|
@ -1,12 +0,0 @@
|
|||
name: Daily
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
call-multi-nim-common:
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim-branch: "['version-1-6','version-2-0']"
|
||||
cpu: "['amd64']"
|
|
@ -0,0 +1,14 @@
|
|||
name: Daily amd64
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test_amd64:
|
||||
name: Daily amd64
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim: "[{'branch': 'version-1-6', 'memory_management': 'refc'}, {'branch': 'version-2-0', 'memory_management': 'refc'}]"
|
||||
cpu: "['amd64']"
|
|
@ -1,12 +1,13 @@
|
|||
name: daily-common
|
||||
name: Daily Common
|
||||
# Serves as base workflow for daily tasks, it's not run by itself.
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
nim-branch:
|
||||
description: 'Nim branch'
|
||||
nim:
|
||||
description: 'Nim Configuration'
|
||||
required: true
|
||||
type: string
|
||||
type: string # Following this format: [{"branch": ..., "memory_management": ...}, ...]
|
||||
cpu:
|
||||
description: 'CPU'
|
||||
required: true
|
||||
|
@ -17,29 +18,34 @@ on:
|
|||
type: string
|
||||
default: "[]"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
delete-cache:
|
||||
delete_cache:
|
||||
name: Delete github action's branch cache
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: snnaplab/delete-branch-cache-action@v1
|
||||
|
||||
build:
|
||||
needs: delete-cache
|
||||
timeout-minutes: 120
|
||||
test:
|
||||
needs: delete_cache
|
||||
timeout-minutes: 90
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform:
|
||||
- os: linux
|
||||
builder: ubuntu-20
|
||||
builder: ubuntu-22.04
|
||||
shell: bash
|
||||
- os: macos
|
||||
builder: macos-12
|
||||
builder: macos-13
|
||||
shell: bash
|
||||
- os: windows
|
||||
builder: windows-2019
|
||||
builder: windows-2022
|
||||
shell: msys2 {0}
|
||||
branch: ${{ fromJSON(inputs.nim-branch) }}
|
||||
nim: ${{ fromJSON(inputs.nim) }}
|
||||
cpu: ${{ fromJSON(inputs.cpu) }}
|
||||
exclude: ${{ fromJSON(inputs.exclude) }}
|
||||
|
||||
|
@ -47,9 +53,9 @@ jobs:
|
|||
run:
|
||||
shell: ${{ matrix.platform.shell }}
|
||||
|
||||
name: '${{ matrix.platform.os }}-${{ matrix.cpu }} (Nim ${{ matrix.branch }})'
|
||||
name: '${{ matrix.platform.os }}-${{ matrix.cpu }} (Nim ${{ matrix.nim.branch }})'
|
||||
runs-on: ${{ matrix.platform.builder }}
|
||||
continue-on-error: ${{ matrix.branch == 'devel' || matrix.branch == 'version-2-0' }}
|
||||
continue-on-error: ${{ matrix.nim.branch == 'devel' || matrix.nim.branch == 'version-2-0' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
@ -59,11 +65,11 @@ jobs:
|
|||
with:
|
||||
os: ${{ matrix.platform.os }}
|
||||
shell: ${{ matrix.platform.shell }}
|
||||
nim_branch: ${{ matrix.branch }}
|
||||
nim_branch: ${{ matrix.nim.branch }}
|
||||
cpu: ${{ matrix.cpu }}
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '~1.15.5'
|
||||
cache: false
|
||||
|
@ -71,14 +77,13 @@ jobs:
|
|||
- name: Install p2pd
|
||||
run: |
|
||||
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
nimble install -y --depsOnly
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
nimble install -y --depsOnly
|
||||
NIMFLAGS="${NIMFLAGS} --mm:refc" nimble test
|
||||
if [[ "${{ matrix.branch }}" == "devel" ]]; then
|
||||
echo -e "\nTesting with '--mm:orc':\n"
|
||||
NIMFLAGS="${NIMFLAGS} --mm:orc" nimble test
|
||||
fi
|
||||
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}" nimble test
|
||||
|
|
|
@ -1,12 +1,14 @@
|
|||
name: Daily Nim Devel
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
call-multi-nim-common:
|
||||
test_nim_devel:
|
||||
name: Daily Nim Devel
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim-branch: "['devel']"
|
||||
nim: "[{'branch': 'devel', 'memory_management': 'orc'}]"
|
||||
cpu: "['amd64']"
|
|
@ -1,13 +1,15 @@
|
|||
name: Daily i386
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
call-multi-nim-common:
|
||||
test_i386:
|
||||
name: Daily i386 (Linux)
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim-branch: "['version-1-6','version-2-0', 'devel']"
|
||||
nim: "[{'branch': 'version-1-6', 'memory_management': 'refc'}, {'branch': 'version-2-0', 'memory_management': 'refc'}, {'branch': 'devel', 'memory_management': 'orc'}]"
|
||||
cpu: "['i386']"
|
||||
exclude: "[{'platform': {'os':'macos'}}, {'platform': {'os':'windows'}}]"
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
name: Bumper
|
||||
name: Dependencies
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
|
@ -6,34 +7,38 @@ on:
|
|||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
bumpProjects:
|
||||
bumper:
|
||||
# Pushes new refs to interested external repositories, so they can do early testing against libp2p's newer versions
|
||||
runs-on: ubuntu-latest
|
||||
name: Bump libp2p's version for ${{ matrix.target.repository }}:${{ matrix.target.ref }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target: [
|
||||
{ repo: status-im/nimbus-eth2, branch: unstable },
|
||||
{ repo: waku-org/nwaku, branch: master },
|
||||
{ repo: codex-storage/nim-codex, branch: master }
|
||||
]
|
||||
target:
|
||||
- repository: status-im/nimbus-eth2
|
||||
ref: unstable
|
||||
- repository: waku-org/nwaku
|
||||
ref: master
|
||||
- repository: codex-storage/nim-codex
|
||||
ref: master
|
||||
steps:
|
||||
- name: Clone repo
|
||||
uses: actions/checkout@v2
|
||||
- name: Clone target repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: ${{ matrix.target.repo }}
|
||||
ref: ${{ matrix.target.branch }}
|
||||
repository: ${{ matrix.target.repository }}
|
||||
ref: ${{ matrix.target.ref}}
|
||||
path: nbc
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
|
||||
- name: Checkout this ref
|
||||
- name: Checkout this ref in target repository
|
||||
run: |
|
||||
cd nbc
|
||||
git submodule update --init vendor/nim-libp2p
|
||||
cd vendor/nim-libp2p
|
||||
git checkout $GITHUB_SHA
|
||||
|
||||
- name: Commit this bump
|
||||
- name: Push this ref to target repository
|
||||
run: |
|
||||
cd nbc
|
||||
git config --global user.email "${{ github.actor }}@users.noreply.github.com"
|
||||
|
@ -42,3 +47,4 @@ jobs:
|
|||
git branch -D nim-libp2p-auto-bump-${GITHUB_REF##*/} || true
|
||||
git switch -c nim-libp2p-auto-bump-${GITHUB_REF##*/}
|
||||
git push -f origin nim-libp2p-auto-bump-${GITHUB_REF##*/}
|
||||
|
|
@ -1,21 +1,21 @@
|
|||
name: Docgen
|
||||
name: Documentation Generation And Publishing
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
workflow_dispatch:
|
||||
|
||||
|
||||
jobs:
|
||||
build:
|
||||
timeout-minutes: 20
|
||||
|
||||
name: 'Generate & upload documentation'
|
||||
runs-on: 'ubuntu-20.04'
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
|
@ -35,7 +35,7 @@ jobs:
|
|||
ls ${GITHUB_REF##*/}
|
||||
|
||||
- name: Clone the gh-pages branch
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: vacp2p/nim-libp2p
|
||||
ref: gh-pages
|
||||
|
@ -66,7 +66,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
|
@ -80,7 +80,7 @@ jobs:
|
|||
run: pip install mkdocs-material && nimble -y website
|
||||
|
||||
- name: Clone the gh-pages branch
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: vacp2p/nim-libp2p
|
||||
ref: gh-pages
|
|
@ -1,9 +1,10 @@
|
|||
name: Interoperability Testing
|
||||
name: Interoperability Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- unstable
|
||||
- master
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
|
@ -15,6 +16,12 @@ jobs:
|
|||
name: Run transport interoperability tests
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
# For some reason the original job (libp2p/test-plans) has enough disk space, but this one doesn't.
|
||||
uses: jlumbroso/free-disk-space@v1.3.1
|
||||
with:
|
||||
tool-cache: true
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
- name: Build image
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
name: Linters
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
nph:
|
||||
name: NPH
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2 # In PR, has extra merge commit: ^1 = PR, ^2 = base
|
||||
|
||||
- name: Setup NPH
|
||||
# Pin nph to a specific version to avoid sudden style differences.
|
||||
# Updating nph version should be accompanied with running the new version on the fluffy directory.
|
||||
run: |
|
||||
VERSION="v0.5.1"
|
||||
ARCHIVE="nph-linux_x64.tar.gz"
|
||||
curl -L "https://github.com/arnetheduck/nph/releases/download/${VERSION}/${ARCHIVE}" -o ${ARCHIVE}
|
||||
tar -xzf ${ARCHIVE}
|
||||
|
||||
- name: Check style
|
||||
run: |
|
||||
shopt -s extglob # Enable extended globbing
|
||||
./nph --check examples libp2p tests tools *.@(nim|nims|nimble)
|
4
.pinned
4
.pinned
|
@ -1,6 +1,6 @@
|
|||
bearssl;https://github.com/status-im/nim-bearssl@#e4157639db180e52727712a47deaefcbbac6ec86
|
||||
bearssl;https://github.com/status-im/nim-bearssl@#667b40440a53a58e9f922e29e20818720c62d9ac
|
||||
chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
|
||||
chronos;https://github.com/status-im/nim-chronos@#672db137b7cad9b384b8f4fb551fb6bbeaabfe1b
|
||||
chronos;https://github.com/status-im/nim-chronos@#dc3847e4d6733dfc3811454c2a9c384b87343e26
|
||||
dnsclient;https://github.com/ba0f3/dnsclient.nim@#23214235d4784d24aceed99bbfe153379ea557c8
|
||||
faststreams;https://github.com/status-im/nim-faststreams@#720fc5e5c8e428d9d0af618e1e27c44b42350309
|
||||
httputils;https://github.com/status-im/nim-http-utils@#3b491a40c60aad9e8d3407443f46f62511e63b18
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
mode = ScriptMode.Verbose
|
||||
|
||||
packageName = "libp2p"
|
||||
version = "1.3.0"
|
||||
version = "1.5.0"
|
||||
author = "Status Research & Development GmbH"
|
||||
description = "LibP2P implementation"
|
||||
license = "MIT"
|
||||
skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
|
||||
|
||||
requires "nim >= 1.6.0",
|
||||
"nimcrypto >= 0.6.0 & < 0.7.0" , "dnsclient >= 0.3.0 & < 0.4.0", "bearssl >= 0.1.4",
|
||||
"chronicles >= 0.10.2", "chronos >= 4.0.0", "metrics", "secp256k1", "stew#head",
|
||||
"nimcrypto >= 0.6.0 & < 0.7.0" , "dnsclient >= 0.3.0 & < 0.4.0", "bearssl >= 0.2.5",
|
||||
"chronicles >= 0.10.2", "chronos >= 4.0.2", "metrics", "secp256k1", "stew#head",
|
||||
"websock", "unittest2", "https://github.com/status-im/nim-quic.git"
|
||||
|
||||
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
|
||||
|
@ -126,7 +126,7 @@ task examples_build, "Build the samples":
|
|||
buildSample("tutorial_5_discovery", true)
|
||||
exec "nimble install -y nimpng@#HEAD"
|
||||
# this is to fix broken build on 1.7.3, remove it when nimpng version 0.3.2 or later is released
|
||||
exec "nimble install -y nico"
|
||||
exec "nimble install -y nico@#af99dd60bf2b395038ece815ea1012330a80d6e6"
|
||||
buildSample("tutorial_6_game", false, "--styleCheck:off")
|
||||
|
||||
# pin system
|
||||
|
|
|
@ -136,7 +136,8 @@ proc triggerConnEvent*(c: ConnManager, peerId: PeerId, event: ConnEvent) {.async
|
|||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Exception in triggerConnEvents", msg = exc.msg, peer = peerId, event = $event
|
||||
warn "Exception in triggerConnEvents",
|
||||
description = exc.msg, peer = peerId, event = $event
|
||||
|
||||
proc addPeerEventHandler*(
|
||||
c: ConnManager, handler: PeerEventHandler, kind: PeerEventKind
|
||||
|
@ -169,7 +170,7 @@ proc triggerPeerEvents*(c: ConnManager, peerId: PeerId, event: PeerEvent) {.asyn
|
|||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc: # handlers should not raise!
|
||||
warn "Exception in triggerPeerEvents", exc = exc.msg, peer = peerId
|
||||
warn "Exception in triggerPeerEvents", description = exc.msg, peer = peerId
|
||||
|
||||
proc expectConnection*(
|
||||
c: ConnManager, p: PeerId, dir: Direction
|
||||
|
@ -212,7 +213,7 @@ proc closeMuxer(muxer: Muxer) {.async.} =
|
|||
try:
|
||||
await muxer.handler # TODO noraises?
|
||||
except CatchableError as exc:
|
||||
trace "Exception in close muxer handler", exc = exc.msg
|
||||
trace "Exception in close muxer handler", description = exc.msg
|
||||
trace "Cleaned up muxer", m = muxer
|
||||
|
||||
proc muxCleanup(c: ConnManager, mux: Muxer) {.async.} =
|
||||
|
@ -235,7 +236,7 @@ proc muxCleanup(c: ConnManager, mux: Muxer) {.async.} =
|
|||
except CatchableError as exc:
|
||||
# This is top-level procedure which will work as separate task, so it
|
||||
# do not need to propagate CancelledError and should handle other errors
|
||||
warn "Unexpected exception peer cleanup handler", mux, msg = exc.msg
|
||||
warn "Unexpected exception peer cleanup handler", mux, description = exc.msg
|
||||
|
||||
proc onClose(c: ConnManager, mux: Muxer) {.async.} =
|
||||
## connection close even handler
|
||||
|
@ -246,7 +247,8 @@ proc onClose(c: ConnManager, mux: Muxer) {.async.} =
|
|||
await mux.connection.join()
|
||||
trace "Connection closed, cleaning up", mux
|
||||
except CatchableError as exc:
|
||||
debug "Unexpected exception in connection manager's cleanup", errMsg = exc.msg, mux
|
||||
debug "Unexpected exception in connection manager's cleanup",
|
||||
description = exc.msg, mux
|
||||
finally:
|
||||
await c.muxCleanup(mux)
|
||||
|
||||
|
@ -358,7 +360,7 @@ proc trackConnection*(cs: ConnectionSlot, conn: Connection) =
|
|||
try:
|
||||
await conn.join()
|
||||
except CatchableError as exc:
|
||||
trace "Exception in semaphore monitor, ignoring", exc = exc.msg
|
||||
trace "Exception in semaphore monitor, ignoring", description = exc.msg
|
||||
|
||||
cs.release()
|
||||
|
||||
|
|
|
@ -75,7 +75,9 @@ proc public*(private: Curve25519Key): Curve25519Key =
|
|||
proc random*(_: type[Curve25519Key], rng: var HmacDrbgContext): Curve25519Key =
|
||||
var res: Curve25519Key
|
||||
let defaultBrEc = ecGetDefault()
|
||||
let len = ecKeygen(addr rng.vtable, defaultBrEc, nil, addr res[0], EC_curve25519)
|
||||
let len = ecKeygen(
|
||||
PrngClassPointerConst(addr rng.vtable), defaultBrEc, nil, addr res[0], EC_curve25519
|
||||
)
|
||||
# Per bearssl documentation, the keygen only fails if the curve is
|
||||
# unrecognised -
|
||||
doAssert len == Curve25519KeySize, "Could not generate curve"
|
||||
|
|
|
@ -234,7 +234,11 @@ proc random*(
|
|||
var ecimp = ecGetDefault()
|
||||
var res = new EcPrivateKey
|
||||
if ecKeygen(
|
||||
addr rng.vtable, ecimp, addr res.key, addr res.buffer[0], safeConvert[cint](kind)
|
||||
PrngClassPointerConst(addr rng.vtable),
|
||||
ecimp,
|
||||
addr res.key,
|
||||
addr res.buffer[0],
|
||||
safeConvert[cint](kind),
|
||||
) == 0:
|
||||
err(EcKeyGenError)
|
||||
else:
|
||||
|
|
|
@ -62,10 +62,12 @@ proc dialAndUpgrade(
|
|||
libp2p_total_dial_attempts.inc()
|
||||
await transport.dial(hostname, address, peerId)
|
||||
except CancelledError as exc:
|
||||
debug "Dialing canceled", err = exc.msg, peerId = peerId.get(default(PeerId))
|
||||
trace "Dialing canceled",
|
||||
description = exc.msg, peerId = peerId.get(default(PeerId))
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "Dialing failed", err = exc.msg, peerId = peerId.get(default(PeerId))
|
||||
debug "Dialing failed",
|
||||
description = exc.msg, peerId = peerId.get(default(PeerId))
|
||||
libp2p_failed_dials.inc()
|
||||
return nil # Try the next address
|
||||
|
||||
|
@ -87,7 +89,7 @@ proc dialAndUpgrade(
|
|||
# we won't succeeded through another - no use in trying again
|
||||
await dialed.close()
|
||||
debug "Connection upgrade failed",
|
||||
err = exc.msg, peerId = peerId.get(default(PeerId))
|
||||
description = exc.msg, peerId = peerId.get(default(PeerId))
|
||||
if dialed.dir == Direction.Out:
|
||||
libp2p_failed_upgrades_outgoing.inc()
|
||||
else:
|
||||
|
@ -200,7 +202,7 @@ proc internalConnect(
|
|||
PeerEvent(kind: PeerEventKind.Identified, initiator: true),
|
||||
)
|
||||
except CatchableError as exc:
|
||||
trace "Failed to finish outgoung upgrade", err = exc.msg
|
||||
trace "Failed to finish outgoung upgrade", description = exc.msg
|
||||
await muxed.close()
|
||||
raise exc
|
||||
|
||||
|
@ -327,7 +329,7 @@ method dial*(
|
|||
await cleanup()
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "Error dialing", conn, err = exc.msg
|
||||
debug "Error dialing", conn, description = exc.msg
|
||||
await cleanup()
|
||||
raise exc
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ method advertise*(self: RendezVousInterface) {.async.} =
|
|||
try:
|
||||
await self.rdv.advertise(toAdv, self.ttl)
|
||||
except CatchableError as error:
|
||||
debug "RendezVous advertise error: ", msg = error.msg
|
||||
debug "RendezVous advertise error: ", description = error.msg
|
||||
|
||||
await sleepAsync(self.timeToAdvertise) or self.advertisementUpdated.wait()
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ macro checkFutures*[F](futs: seq[F], exclude: untyped = []): untyped =
|
|||
# We still don't abort but warn
|
||||
debug "A future has failed, enable trace logging for details",
|
||||
error = exc.name
|
||||
trace "Exception message", msg = exc.msg, stack = getStackTrace()
|
||||
trace "Exception message", description = exc.msg, stack = getStackTrace()
|
||||
else:
|
||||
quote:
|
||||
for res in `futs`:
|
||||
|
@ -40,9 +40,9 @@ macro checkFutures*[F](futs: seq[F], exclude: untyped = []): untyped =
|
|||
let exc = res.readError()
|
||||
for i in 0 ..< `nexclude`:
|
||||
if exc of `exclude`[i]:
|
||||
trace "A future has failed", error = exc.name, msg = exc.msg
|
||||
trace "A future has failed", error = exc.name, description = exc.msg
|
||||
break check
|
||||
# We still don't abort but warn
|
||||
debug "A future has failed, enable trace logging for details",
|
||||
error = exc.name
|
||||
trace "Exception details", msg = exc.msg
|
||||
trace "Exception details", description = exc.msg
|
||||
|
|
|
@ -212,7 +212,7 @@ proc handle*(
|
|||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "Exception in multistream", conn, msg = exc.msg
|
||||
trace "Exception in multistream", conn, description = exc.msg
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ proc reset*(s: LPChannel) {.async: (raises: []).} =
|
|||
trace "sending reset message", s, conn = s.conn
|
||||
await noCancel s.conn.writeMsg(s.id, s.resetCode) # write reset
|
||||
except LPStreamError as exc:
|
||||
trace "Can't send reset message", s, conn = s.conn, msg = exc.msg
|
||||
trace "Can't send reset message", s, conn = s.conn, description = exc.msg
|
||||
await s.conn.close()
|
||||
|
||||
asyncSpawn resetMessage()
|
||||
|
@ -145,7 +145,7 @@ method close*(s: LPChannel) {.async: (raises: []).} =
|
|||
# It's harmless that close message cannot be sent - the connection is
|
||||
# likely down already
|
||||
await s.conn.close()
|
||||
trace "Cannot send close message", s, id = s.id, msg = exc.msg
|
||||
trace "Cannot send close message", s, id = s.id, description = exc.msg
|
||||
|
||||
await s.closeUnderlying() # maybe already eofed
|
||||
|
||||
|
@ -256,7 +256,7 @@ proc completeWrite(
|
|||
except LPStreamEOFError as exc:
|
||||
raise exc
|
||||
except LPStreamError as exc:
|
||||
trace "exception in lpchannel write handler", s, msg = exc.msg
|
||||
trace "exception in lpchannel write handler", s, description = exc.msg
|
||||
await s.reset()
|
||||
await s.conn.close()
|
||||
raise newLPStreamConnDownError(exc)
|
||||
|
|
|
@ -70,7 +70,7 @@ proc cleanupChann(m: Mplex, chann: LPChannel) {.async: (raises: []), inline.} =
|
|||
labelValues = [$chann.initiator, $m.connection.peerId],
|
||||
)
|
||||
except CancelledError as exc:
|
||||
warn "Error cleaning up mplex channel", m, chann, msg = exc.msg
|
||||
warn "Error cleaning up mplex channel", m, chann, description = exc.msg
|
||||
|
||||
proc newStreamInternal*(
|
||||
m: Mplex,
|
||||
|
@ -175,7 +175,7 @@ method handle*(m: Mplex) {.async: (raises: []).} =
|
|||
except LPStreamClosedError as exc:
|
||||
# Channel is being closed, but `cleanupChann` was not yet triggered.
|
||||
trace "pushing data to channel failed",
|
||||
m, channel, len = data.len, msg = exc.msg
|
||||
m, channel, len = data.len, description = exc.msg
|
||||
discard # Ignore message, same as if `cleanupChann` had completed.
|
||||
of MessageType.CloseIn, MessageType.CloseOut:
|
||||
await channel.pushEof()
|
||||
|
@ -185,11 +185,11 @@ method handle*(m: Mplex) {.async: (raises: []).} =
|
|||
except CancelledError:
|
||||
debug "Unexpected cancellation in mplex handler", m
|
||||
except LPStreamEOFError as exc:
|
||||
trace "Stream EOF", m, msg = exc.msg
|
||||
trace "Stream EOF", m, description = exc.msg
|
||||
except LPStreamError as exc:
|
||||
debug "Unexpected stream exception in mplex read loop", m, msg = exc.msg
|
||||
debug "Unexpected stream exception in mplex read loop", m, description = exc.msg
|
||||
except MuxerError as exc:
|
||||
debug "Unexpected muxer exception in mplex read loop", m, msg = exc.msg
|
||||
debug "Unexpected muxer exception in mplex read loop", m, description = exc.msg
|
||||
finally:
|
||||
await m.close()
|
||||
trace "Stopped mplex handler", m
|
||||
|
|
|
@ -279,10 +279,15 @@ method readOnce*(
|
|||
raise newLPStreamRemoteClosedError()
|
||||
if channel.recvQueue.len == 0:
|
||||
channel.receivedData.clear()
|
||||
try: # https://github.com/status-im/nim-chronos/issues/516
|
||||
discard await race(channel.closedRemotely.wait(), channel.receivedData.wait())
|
||||
except ValueError:
|
||||
raiseAssert("Futures list is not empty")
|
||||
let
|
||||
closedRemotelyFut = channel.closedRemotely.wait()
|
||||
receivedDataFut = channel.receivedData.wait()
|
||||
defer:
|
||||
if not closedRemotelyFut.finished():
|
||||
await closedRemotelyFut.cancelAndWait()
|
||||
if not receivedDataFut.finished():
|
||||
await receivedDataFut.cancelAndWait()
|
||||
await closedRemotelyFut or receivedDataFut
|
||||
if channel.closedRemotely.isSet() and channel.recvQueue.len == 0:
|
||||
channel.isEof = true
|
||||
return
|
||||
|
@ -508,9 +513,9 @@ method close*(m: Yamux) {.async: (raises: []).} =
|
|||
try:
|
||||
await m.connection.write(YamuxHeader.goAway(NormalTermination))
|
||||
except CancelledError as exc:
|
||||
trace "cancelled sending goAway", msg = exc.msg
|
||||
trace "cancelled sending goAway", description = exc.msg
|
||||
except LPStreamError as exc:
|
||||
trace "failed to send goAway", msg = exc.msg
|
||||
trace "failed to send goAway", description = exc.msg
|
||||
await m.connection.close()
|
||||
trace "Closed yamux"
|
||||
|
||||
|
@ -596,7 +601,7 @@ method handle*(m: Yamux) {.async: (raises: []).} =
|
|||
if header.length > 0:
|
||||
var buffer = newSeqUninitialized[byte](header.length)
|
||||
await m.connection.readExactly(addr buffer[0], int(header.length))
|
||||
trace "Msg Rcv", msg = shortLog(buffer)
|
||||
trace "Msg Rcv", description = shortLog(buffer)
|
||||
await channel.gotDataFromRemote(buffer)
|
||||
|
||||
if MsgFlags.Fin in header.flags:
|
||||
|
@ -606,19 +611,19 @@ method handle*(m: Yamux) {.async: (raises: []).} =
|
|||
trace "remote reset channel"
|
||||
await channel.reset()
|
||||
except CancelledError as exc:
|
||||
debug "Unexpected cancellation in yamux handler", msg = exc.msg
|
||||
debug "Unexpected cancellation in yamux handler", description = exc.msg
|
||||
except LPStreamEOFError as exc:
|
||||
trace "Stream EOF", msg = exc.msg
|
||||
trace "Stream EOF", description = exc.msg
|
||||
except LPStreamError as exc:
|
||||
debug "Unexpected stream exception in yamux read loop", msg = exc.msg
|
||||
debug "Unexpected stream exception in yamux read loop", description = exc.msg
|
||||
except YamuxError as exc:
|
||||
trace "Closing yamux connection", error = exc.msg
|
||||
trace "Closing yamux connection", description = exc.msg
|
||||
try:
|
||||
await m.connection.write(YamuxHeader.goAway(ProtocolError))
|
||||
except CancelledError, LPStreamError:
|
||||
discard
|
||||
except MuxerError as exc:
|
||||
debug "Unexpected muxer exception in yamux read loop", msg = exc.msg
|
||||
debug "Unexpected muxer exception in yamux read loop", description = exc.msg
|
||||
try:
|
||||
await m.connection.write(YamuxHeader.goAway(ProtocolError))
|
||||
except CancelledError, LPStreamError:
|
||||
|
|
|
@ -41,7 +41,7 @@ proc questionToBuf(address: string, kind: QKind): seq[byte] =
|
|||
discard requestStream.readData(addr buf[0], dataLen)
|
||||
return buf
|
||||
except CatchableError as exc:
|
||||
info "Failed to created DNS buffer", msg = exc.msg
|
||||
info "Failed to created DNS buffer", description = exc.msg
|
||||
return newSeq[byte](0)
|
||||
|
||||
proc getDnsResponse(
|
||||
|
|
|
@ -84,13 +84,13 @@ proc tryDial(autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.asy
|
|||
except CancelledError as exc:
|
||||
raise exc
|
||||
except AllFuturesFailedError as exc:
|
||||
debug "All dial attempts failed", addrs, exc = exc.msg
|
||||
debug "All dial attempts failed", addrs, description = exc.msg
|
||||
await conn.sendResponseError(DialError, "All dial attempts failed")
|
||||
except AsyncTimeoutError as exc:
|
||||
debug "Dial timeout", addrs, exc = exc.msg
|
||||
debug "Dial timeout", addrs, description = exc.msg
|
||||
await conn.sendResponseError(DialError, "Dial timeout")
|
||||
except CatchableError as exc:
|
||||
debug "Unexpected error", addrs, exc = exc.msg
|
||||
debug "Unexpected error", addrs, description = exc.msg
|
||||
await conn.sendResponseError(DialError, "Unexpected error")
|
||||
finally:
|
||||
autonat.sem.release()
|
||||
|
@ -165,7 +165,7 @@ proc new*(
|
|||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "exception in autonat handler", exc = exc.msg, conn
|
||||
debug "exception in autonat handler", description = exc.msg, conn
|
||||
finally:
|
||||
trace "exiting autonat handler", conn
|
||||
await conn.close()
|
||||
|
|
|
@ -146,13 +146,13 @@ proc askPeer(
|
|||
debug "dialMe answer is reachable"
|
||||
Reachable
|
||||
except AutonatUnreachableError as error:
|
||||
debug "dialMe answer is not reachable", msg = error.msg
|
||||
debug "dialMe answer is not reachable", description = error.msg
|
||||
NotReachable
|
||||
except AsyncTimeoutError as error:
|
||||
debug "dialMe timed out", msg = error.msg
|
||||
debug "dialMe timed out", description = error.msg
|
||||
Unknown
|
||||
except CatchableError as error:
|
||||
debug "dialMe unexpected error", msg = error.msg
|
||||
debug "dialMe unexpected error", description = error.msg
|
||||
Unknown
|
||||
let hasReachabilityOrConfidenceChanged = await self.handleAnswer(ans)
|
||||
if hasReachabilityOrConfidenceChanged:
|
||||
|
@ -194,7 +194,7 @@ proc addressMapper(
|
|||
processedMA = peerStore.guessDialableAddr(listenAddr)
|
||||
# handle manual port forwarding
|
||||
except CatchableError as exc:
|
||||
debug "Error while handling address mapper", msg = exc.msg
|
||||
debug "Error while handling address mapper", description = exc.msg
|
||||
addrs.add(processedMA)
|
||||
return addrs
|
||||
|
||||
|
|
|
@ -88,7 +88,7 @@ proc startSync*(
|
|||
raise err
|
||||
except AllFuturesFailedError as err:
|
||||
debug "Dcutr initiator could not connect to the remote peer, all connect attempts failed",
|
||||
peerDialableAddrs, msg = err.msg
|
||||
peerDialableAddrs, description = err.msg
|
||||
raise newException(
|
||||
DcutrError,
|
||||
"Dcutr initiator could not connect to the remote peer, all connect attempts failed",
|
||||
|
@ -96,7 +96,7 @@ proc startSync*(
|
|||
)
|
||||
except AsyncTimeoutError as err:
|
||||
debug "Dcutr initiator could not connect to the remote peer, all connect attempts timed out",
|
||||
peerDialableAddrs, msg = err.msg
|
||||
peerDialableAddrs, description = err.msg
|
||||
raise newException(
|
||||
DcutrError,
|
||||
"Dcutr initiator could not connect to the remote peer, all connect attempts timed out",
|
||||
|
@ -104,7 +104,7 @@ proc startSync*(
|
|||
)
|
||||
except CatchableError as err:
|
||||
debug "Unexpected error when Dcutr initiator tried to connect to the remote peer",
|
||||
err = err.msg
|
||||
description = err.msg
|
||||
raise newException(
|
||||
DcutrError,
|
||||
"Unexpected error when Dcutr initiator tried to connect to the remote peer", err,
|
||||
|
|
|
@ -80,13 +80,13 @@ proc new*(
|
|||
raise err
|
||||
except AllFuturesFailedError as err:
|
||||
debug "Dcutr receiver could not connect to the remote peer, " &
|
||||
"all connect attempts failed", peerDialableAddrs, msg = err.msg
|
||||
"all connect attempts failed", peerDialableAddrs, description = err.msg
|
||||
except AsyncTimeoutError as err:
|
||||
debug "Dcutr receiver could not connect to the remote peer, " &
|
||||
"all connect attempts timed out", peerDialableAddrs, msg = err.msg
|
||||
"all connect attempts timed out", peerDialableAddrs, description = err.msg
|
||||
except CatchableError as err:
|
||||
warn "Unexpected error when Dcutr receiver tried to connect " &
|
||||
"to the remote peer", msg = err.msg
|
||||
"to the remote peer", description = err.msg
|
||||
|
||||
let self = T()
|
||||
self.handler = handleStream
|
||||
|
|
|
@ -93,7 +93,7 @@ proc reserve*(
|
|||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "error writing or reading reservation message", exc = exc.msg
|
||||
trace "error writing or reading reservation message", description = exc.msg
|
||||
raise newException(ReservationError, exc.msg)
|
||||
|
||||
if msg.msgType != HopMessageType.Status:
|
||||
|
@ -139,7 +139,7 @@ proc dialPeerV1*(
|
|||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "error writing hop request", exc = exc.msg
|
||||
trace "error writing hop request", description = exc.msg
|
||||
raise exc
|
||||
|
||||
let msgRcvFromRelayOpt =
|
||||
|
@ -148,7 +148,7 @@ proc dialPeerV1*(
|
|||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "error reading stop response", exc = exc.msg
|
||||
trace "error reading stop response", description = exc.msg
|
||||
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
|
||||
raise exc
|
||||
|
||||
|
@ -190,13 +190,13 @@ proc dialPeerV2*(
|
|||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "error reading stop response", exc = exc.msg
|
||||
trace "error reading stop response", description = exc.msg
|
||||
raise newException(RelayV2DialError, exc.msg)
|
||||
|
||||
if msgRcvFromRelay.msgType != HopMessageType.Status:
|
||||
raise newException(RelayV2DialError, "Unexpected stop response")
|
||||
if msgRcvFromRelay.status.get(UnexpectedMessage) != Ok:
|
||||
trace "Relay stop failed", msg = msgRcvFromRelay.status
|
||||
trace "Relay stop failed", description = msgRcvFromRelay.status
|
||||
raise newException(RelayV2DialError, "Relay stop failure")
|
||||
conn.limitDuration = msgRcvFromRelay.limit.duration
|
||||
conn.limitData = msgRcvFromRelay.limit.data
|
||||
|
@ -302,7 +302,7 @@ proc new*(
|
|||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "exception in client handler", exc = exc.msg, conn
|
||||
trace "exception in client handler", description = exc.msg, conn
|
||||
finally:
|
||||
trace "exiting client handler", conn
|
||||
await conn.close()
|
||||
|
|
|
@ -167,7 +167,7 @@ proc handleConnect(r: Relay, connSrc: Connection, msg: HopMessage) {.async.} =
|
|||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "error opening relay stream", dst, exc = exc.msg
|
||||
trace "error opening relay stream", dst, description = exc.msg
|
||||
await sendHopStatus(connSrc, ConnectionFailed)
|
||||
return
|
||||
defer:
|
||||
|
@ -196,7 +196,7 @@ proc handleConnect(r: Relay, connSrc: Connection, msg: HopMessage) {.async.} =
|
|||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "error sending stop message", msg = exc.msg
|
||||
trace "error sending stop message", description = exc.msg
|
||||
await sendHopStatus(connSrc, ConnectionFailed)
|
||||
return
|
||||
|
||||
|
@ -213,7 +213,7 @@ proc handleHopStreamV2*(r: Relay, conn: Connection) {.async.} =
|
|||
let msg = HopMessage.decode(await conn.readLp(r.msgSize)).valueOr:
|
||||
await sendHopStatus(conn, MalformedMessage)
|
||||
return
|
||||
trace "relayv2 handle stream", msg = msg
|
||||
trace "relayv2 handle stream", hopMsg = msg
|
||||
case msg.msgType
|
||||
of HopMessageType.Reserve:
|
||||
await r.handleReserve(conn)
|
||||
|
@ -272,7 +272,7 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
|
|||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "error opening relay stream", dst, exc = exc.msg
|
||||
trace "error opening relay stream", dst, description = exc.msg
|
||||
await sendStatus(connSrc, StatusV1.HopCantDialDst)
|
||||
return
|
||||
defer:
|
||||
|
@ -289,12 +289,13 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
|
|||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "error writing stop handshake or reading stop response", exc = exc.msg
|
||||
trace "error writing stop handshake or reading stop response",
|
||||
description = exc.msg
|
||||
await sendStatus(connSrc, StatusV1.HopCantOpenDstStream)
|
||||
return
|
||||
|
||||
let msgRcvFromDst = msgRcvFromDstOpt.valueOr:
|
||||
trace "error reading stop response", msg = msgRcvFromDstOpt
|
||||
trace "error reading stop response", response = msgRcvFromDstOpt
|
||||
await sendStatus(connSrc, StatusV1.HopCantOpenDstStream)
|
||||
return
|
||||
|
||||
|
@ -369,7 +370,7 @@ proc new*(
|
|||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "exception in relayv2 handler", exc = exc.msg, conn
|
||||
debug "exception in relayv2 handler", description = exc.msg, conn
|
||||
finally:
|
||||
trace "exiting relayv2 handler", conn
|
||||
await conn.close()
|
||||
|
|
|
@ -87,7 +87,7 @@ proc bridge*(
|
|||
trace "relay src closed connection", src = connSrc.peerId
|
||||
if connDst.closed() or connDst.atEof():
|
||||
trace "relay dst closed connection", dst = connDst.peerId
|
||||
trace "relay error", exc = exc.msg
|
||||
trace "relay error", description = exc.msg
|
||||
trace "end relaying", bytesSentFromSrcToDst, bytesSentFromDstToSrc
|
||||
await futSrc.cancelAndWait()
|
||||
await futDst.cancelAndWait()
|
||||
|
|
|
@ -156,7 +156,7 @@ method init*(p: Identify) =
|
|||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "exception in identify handler", exc = exc.msg, conn
|
||||
trace "exception in identify handler", description = exc.msg, conn
|
||||
finally:
|
||||
trace "exiting identify handler", conn
|
||||
await conn.closeWithEOF()
|
||||
|
@ -226,7 +226,7 @@ proc init*(p: IdentifyPush) =
|
|||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
info "exception in identify push handler", exc = exc.msg, conn
|
||||
info "exception in identify push handler", description = exc.msg, conn
|
||||
finally:
|
||||
trace "exiting identify push handler", conn
|
||||
await conn.closeWithEOF()
|
||||
|
|
|
@ -49,7 +49,7 @@ proc new*(T: typedesc[Perf]): T {.public.} =
|
|||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "exception in perf handler", exc = exc.msg, conn
|
||||
trace "exception in perf handler", description = exc.msg, conn
|
||||
await conn.close()
|
||||
|
||||
p.handler = handle
|
||||
|
|
|
@ -63,7 +63,7 @@ method init*(p: Ping) =
|
|||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "exception in ping handler", exc = exc.msg, conn
|
||||
trace "exception in ping handler", description = exc.msg, conn
|
||||
|
||||
p.handler = handle
|
||||
p.codec = PingCodec
|
||||
|
|
|
@ -106,7 +106,7 @@ method rpcHandler*(f: FloodSub, peer: PubSubPeer, data: seq[byte]) {.async.} =
|
|||
debug "failed to decode msg from peer", peer, err = error
|
||||
raise newException(CatchableError, "Peer msg couldn't be decoded")
|
||||
|
||||
trace "decoded msg from peer", peer, msg = rpcMsg.shortLog
|
||||
trace "decoded msg from peer", peer, payload = rpcMsg.shortLog
|
||||
# trigger hooks
|
||||
peer.recvObservers(rpcMsg)
|
||||
|
||||
|
@ -187,7 +187,7 @@ method init*(f: FloodSub) =
|
|||
# do not need to propagate CancelledError.
|
||||
trace "Unexpected cancellation in floodsub handler", conn
|
||||
except CatchableError as exc:
|
||||
trace "FloodSub handler leaks an error", exc = exc.msg, conn
|
||||
trace "FloodSub handler leaks an error", description = exc.msg, conn
|
||||
|
||||
f.handler = handler
|
||||
f.codec = FloodSubCodec
|
||||
|
@ -219,7 +219,7 @@ method publish*(f: FloodSub, topic: string, data: seq[byte]): Future[int] {.asyn
|
|||
trace "Error generating message id, skipping publish", error = error
|
||||
return 0
|
||||
|
||||
trace "Created new message", msg = shortLog(msg), peers = peers.len, topic, msgId
|
||||
trace "Created new message", payload = shortLog(msg), peers = peers.len, topic, msgId
|
||||
|
||||
if f.addSeen(f.salt(msgId)):
|
||||
# custom msgid providers might cause this
|
||||
|
|
|
@ -220,7 +220,7 @@ method init*(g: GossipSub) =
|
|||
# do not need to propogate CancelledError.
|
||||
trace "Unexpected cancellation in gossipsub handler", conn
|
||||
except CatchableError as exc:
|
||||
trace "GossipSub handler leaks an error", exc = exc.msg, conn
|
||||
trace "GossipSub handler leaks an error", description = exc.msg, conn
|
||||
|
||||
g.handler = handler
|
||||
g.codecs &= GossipSubCodec_12
|
||||
|
@ -368,7 +368,7 @@ proc handleControl(g: GossipSub, peer: PubSubPeer, control: ControlMessage) =
|
|||
else:
|
||||
libp2p_pubsub_broadcast_prune.inc(labelValues = ["generic"])
|
||||
|
||||
trace "sending control message", msg = shortLog(respControl), peer
|
||||
trace "sending control message", payload = shortLog(respControl), peer
|
||||
g.send(peer, RPCMsg(control: some(respControl)), isHighPriority = true)
|
||||
|
||||
if messages.len > 0:
|
||||
|
@ -453,6 +453,9 @@ proc validateAndRelay(
|
|||
|
||||
g.rewardDelivered(peer, topic, true)
|
||||
|
||||
# trigger hooks
|
||||
peer.validatedObservers(msg, msgId)
|
||||
|
||||
# The send list typically matches the idontwant list from above, but
|
||||
# might differ if validation takes time
|
||||
var toSendPeers = HashSet[PubSubPeer]()
|
||||
|
@ -488,7 +491,7 @@ proc validateAndRelay(
|
|||
|
||||
await handleData(g, topic, msg.data)
|
||||
except CatchableError as exc:
|
||||
info "validateAndRelay failed", msg = exc.msg
|
||||
info "validateAndRelay failed", description = exc.msg
|
||||
|
||||
proc dataAndTopicsIdSize(msgs: seq[Message]): int =
|
||||
msgs.mapIt(it.data.len + it.topic.len).foldl(a + b, 0)
|
||||
|
@ -537,7 +540,7 @@ method rpcHandler*(g: GossipSub, peer: PubSubPeer, data: seq[byte]) {.async.} =
|
|||
for m in rpcMsg.messages:
|
||||
libp2p_pubsub_received_messages.inc(labelValues = [$peer.peerId, m.topic])
|
||||
|
||||
trace "decoded msg from peer", peer, msg = rpcMsg.shortLog
|
||||
trace "decoded msg from peer", peer, payload = rpcMsg.shortLog
|
||||
await rateLimit(g, peer, g.messageOverhead(rpcMsg, msgSize))
|
||||
|
||||
# trigger hooks - these may modify the message
|
||||
|
@ -768,7 +771,7 @@ method publish*(g: GossipSub, topic: string, data: seq[byte]): Future[int] {.asy
|
|||
logScope:
|
||||
msgId = shortLog(msgId)
|
||||
|
||||
trace "Created new message", msg = shortLog(msg), peers = peers.len
|
||||
trace "Created new message", payload = shortLog(msg), peers = peers.len
|
||||
|
||||
if g.addSeen(g.salt(msgId)):
|
||||
# If the message was received or published recently, don't re-publish it -
|
||||
|
@ -803,7 +806,7 @@ proc maintainDirectPeer(g: GossipSub, id: PeerId, addrs: seq[MultiAddress]) {.as
|
|||
trace "Direct peer dial canceled"
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "Direct peer error dialing", msg = exc.msg
|
||||
debug "Direct peer error dialing", description = exc.msg
|
||||
|
||||
proc addDirectPeer*(g: GossipSub, id: PeerId, addrs: seq[MultiAddress]) {.async.} =
|
||||
g.parameters.directPeers[id] = addrs
|
||||
|
|
|
@ -135,7 +135,7 @@ proc disconnectPeer*(g: GossipSub, peer: PubSubPeer) {.async.} =
|
|||
try:
|
||||
await g.switch.disconnect(peer.peerId)
|
||||
except CatchableError as exc: # Never cancelled
|
||||
trace "Failed to close connection", peer, error = exc.name, msg = exc.msg
|
||||
trace "Failed to close connection", peer, errName = exc.name, description = exc.msg
|
||||
|
||||
proc disconnectIfBadScorePeer*(g: GossipSub, peer: PubSubPeer, score: float64) =
|
||||
if g.parameters.disconnectBadPeers and score < g.parameters.graylistThreshold and
|
||||
|
|
|
@ -197,7 +197,7 @@ proc send*(
|
|||
## High priority messages are sent immediately, while low priority messages are queued and sent only after all high
|
||||
## priority messages have been sent.
|
||||
|
||||
trace "sending pubsub message to peer", peer, msg = shortLog(msg)
|
||||
trace "sending pubsub message to peer", peer, payload = shortLog(msg)
|
||||
peer.send(msg, p.anonymize, isHighPriority)
|
||||
|
||||
proc broadcast*(
|
||||
|
@ -255,7 +255,7 @@ proc broadcast*(
|
|||
else:
|
||||
libp2p_pubsub_broadcast_prune.inc(npeers, labelValues = ["generic"])
|
||||
|
||||
trace "broadcasting messages to peers", peers = sendPeers.len, msg = shortLog(msg)
|
||||
trace "broadcasting messages to peers", peers = sendPeers.len, payload = shortLog(msg)
|
||||
|
||||
if anyIt(sendPeers, it.hasObservers):
|
||||
for peer in sendPeers:
|
||||
|
@ -403,7 +403,7 @@ proc handleData*(p: PubSub, topic: string, data: seq[byte]): Future[void] =
|
|||
for fut in futs:
|
||||
if fut.failed:
|
||||
let err = fut.readError()
|
||||
warn "Error in topic handler", msg = err.msg
|
||||
warn "Error in topic handler", description = err.msg
|
||||
|
||||
return waiter()
|
||||
|
||||
|
@ -437,7 +437,7 @@ method handleConn*(p: PubSub, conn: Connection, proto: string) {.base, async.} =
|
|||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "exception ocurred in pubsub handle", exc = exc.msg, conn
|
||||
trace "exception ocurred in pubsub handle", description = exc.msg, conn
|
||||
finally:
|
||||
await conn.closeWithEOF()
|
||||
|
||||
|
|
|
@ -67,6 +67,8 @@ type
|
|||
PubSubObserver* = ref object
|
||||
onRecv*: proc(peer: PubSubPeer, msgs: var RPCMsg) {.gcsafe, raises: [].}
|
||||
onSend*: proc(peer: PubSubPeer, msgs: var RPCMsg) {.gcsafe, raises: [].}
|
||||
onValidated*:
|
||||
proc(peer: PubSubPeer, msg: Message, msgId: MessageId) {.gcsafe, raises: [].}
|
||||
|
||||
PubSubPeerEventKind* {.pure.} = enum
|
||||
StreamOpened
|
||||
|
@ -170,14 +172,23 @@ proc recvObservers*(p: PubSubPeer, msg: var RPCMsg) =
|
|||
if not (isNil(p.observers)) and p.observers[].len > 0:
|
||||
for obs in p.observers[]:
|
||||
if not (isNil(obs)): # TODO: should never be nil, but...
|
||||
obs.onRecv(p, msg)
|
||||
if not (isNil(obs.onRecv)):
|
||||
obs.onRecv(p, msg)
|
||||
|
||||
proc validatedObservers*(p: PubSubPeer, msg: Message, msgId: MessageId) =
|
||||
# trigger hooks
|
||||
if not (isNil(p.observers)) and p.observers[].len > 0:
|
||||
for obs in p.observers[]:
|
||||
if not (isNil(obs.onValidated)):
|
||||
obs.onValidated(p, msg, msgId)
|
||||
|
||||
proc sendObservers(p: PubSubPeer, msg: var RPCMsg) =
|
||||
# trigger hooks
|
||||
if not (isNil(p.observers)) and p.observers[].len > 0:
|
||||
for obs in p.observers[]:
|
||||
if not (isNil(obs)): # TODO: should never be nil, but...
|
||||
obs.onSend(p, msg)
|
||||
if not (isNil(obs.onSend)):
|
||||
obs.onSend(p, msg)
|
||||
|
||||
proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
|
||||
debug "starting pubsub read loop", conn, peer = p, closed = conn.closed
|
||||
|
@ -194,10 +205,10 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
|
|||
data = newSeq[byte]() # Release memory
|
||||
except PeerRateLimitError as exc:
|
||||
debug "Peer rate limit exceeded, exiting read while",
|
||||
conn, peer = p, error = exc.msg
|
||||
conn, peer = p, description = exc.msg
|
||||
except CatchableError as exc:
|
||||
debug "Exception occurred in PubSubPeer.handle",
|
||||
conn, peer = p, closed = conn.closed, exc = exc.msg
|
||||
conn, peer = p, closed = conn.closed, description = exc.msg
|
||||
finally:
|
||||
await conn.close()
|
||||
except CancelledError:
|
||||
|
@ -206,7 +217,7 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
|
|||
trace "Unexpected cancellation in PubSubPeer.handle"
|
||||
except CatchableError as exc:
|
||||
trace "Exception occurred in PubSubPeer.handle",
|
||||
conn, peer = p, closed = conn.closed, exc = exc.msg
|
||||
conn, peer = p, closed = conn.closed, description = exc.msg
|
||||
finally:
|
||||
debug "exiting pubsub read loop", conn, peer = p, closed = conn.closed
|
||||
|
||||
|
@ -225,7 +236,7 @@ proc closeSendConn(p: PubSubPeer, event: PubSubPeerEventKind) {.async.} =
|
|||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "Errors during diconnection events", error = exc.msg
|
||||
debug "Errors during diconnection events", description = exc.msg
|
||||
# don't cleanup p.address else we leak some gossip stat table
|
||||
|
||||
proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
|
||||
|
@ -272,7 +283,7 @@ proc connectImpl(p: PubSubPeer) {.async.} =
|
|||
return
|
||||
await connectOnce(p)
|
||||
except CatchableError as exc: # never cancelled
|
||||
debug "Could not establish send connection", msg = exc.msg
|
||||
debug "Could not establish send connection", description = exc.msg
|
||||
|
||||
proc connect*(p: PubSubPeer) =
|
||||
if p.connected:
|
||||
|
@ -314,7 +325,7 @@ proc sendMsgContinue(conn: Connection, msgFut: Future[void]) {.async.} =
|
|||
except CatchableError as exc: # never cancelled
|
||||
# Because we detach the send call from the currently executing task using
|
||||
# asyncSpawn, no exceptions may leak out of it
|
||||
trace "Unable to send to remote", conn, msg = exc.msg
|
||||
trace "Unable to send to remote", conn, description = exc.msg
|
||||
# Next time sendConn is used, it will be have its close flag set and thus
|
||||
# will be recycled
|
||||
|
||||
|
@ -330,7 +341,7 @@ proc sendMsgSlow(p: PubSubPeer, msg: seq[byte]) {.async.} =
|
|||
|
||||
var conn = p.sendConn
|
||||
if conn == nil or conn.closed():
|
||||
debug "No send connection", p, msg = shortLog(msg)
|
||||
debug "No send connection", p, payload = shortLog(msg)
|
||||
return
|
||||
|
||||
trace "sending encoded msg to peer", conn, encoded = shortLog(msg)
|
||||
|
@ -372,7 +383,7 @@ proc sendEncoded*(p: PubSubPeer, msg: seq[byte], isHighPriority: bool): Future[v
|
|||
) == 0
|
||||
|
||||
if msg.len <= 0:
|
||||
debug "empty message, skipping", p, msg = shortLog(msg)
|
||||
debug "empty message, skipping", p, payload = shortLog(msg)
|
||||
Future[void].completed()
|
||||
elif msg.len > p.maxMessageSize:
|
||||
info "trying to send a msg too big for pubsub",
|
||||
|
|
|
@ -310,7 +310,7 @@ proc decodeMessages*(pb: ProtoBuffer): ProtoResult[seq[Message]] {.inline.} =
|
|||
ok(msgs)
|
||||
|
||||
proc encodeRpcMsg*(msg: RPCMsg, anonymize: bool): seq[byte] =
|
||||
trace "encodeRpcMsg: encoding message", msg = msg.shortLog()
|
||||
trace "encodeRpcMsg: encoding message", payload = msg.shortLog()
|
||||
var pb = initProtoBuffer(maxSize = uint.high)
|
||||
for item in msg.subscriptions:
|
||||
pb.write(1, item)
|
||||
|
@ -329,7 +329,7 @@ proc encodeRpcMsg*(msg: RPCMsg, anonymize: bool): seq[byte] =
|
|||
pb.buffer
|
||||
|
||||
proc decodeRpcMsg*(msg: seq[byte]): ProtoResult[RPCMsg] {.inline.} =
|
||||
trace "decodeRpcMsg: decoding message", msg = msg.shortLog()
|
||||
trace "decodeRpcMsg: decoding message", payload = msg.shortLog()
|
||||
var pb = initProtoBuffer(msg, maxSize = uint.high)
|
||||
var rpcMsg = RPCMsg()
|
||||
assign(rpcMsg.messages, ?pb.decodeMessages())
|
||||
|
|
|
@ -29,6 +29,7 @@ type
|
|||
head, tail: TimedEntry[K] # nim linked list doesn't allow inserting at pos
|
||||
entries: HashSet[TimedEntry[K]]
|
||||
timeout: Duration
|
||||
maxSize: int # Optional max size of the cache, 0 means unlimited
|
||||
|
||||
func `==`*[E](a, b: TimedEntry[E]): bool =
|
||||
if isNil(a) == isNil(b):
|
||||
|
@ -78,7 +79,18 @@ func put*[K](t: var TimedCache[K], k: K, now = Moment.now()): bool =
|
|||
# Puts k in cache, returning true if the item was already present and false
|
||||
# otherwise. If the item was already present, its expiry timer will be
|
||||
# refreshed.
|
||||
func ensureSizeBound(t: var TimedCache[K]) =
|
||||
if t.maxSize > 0 and t.entries.len() >= t.maxSize and k notin t:
|
||||
if t.head != nil:
|
||||
t.entries.excl(t.head)
|
||||
t.head = t.head.next
|
||||
if t.head != nil:
|
||||
t.head.prev = nil
|
||||
else:
|
||||
t.tail = nil
|
||||
|
||||
t.expire(now)
|
||||
t.ensureSizeBound()
|
||||
|
||||
let
|
||||
previous = t.del(k) # Refresh existing item
|
||||
|
@ -128,5 +140,5 @@ func addedAt*[K](t: var TimedCache[K], k: K): Moment =
|
|||
|
||||
default(Moment)
|
||||
|
||||
func init*[K](T: type TimedCache[K], timeout: Duration = Timeout): T =
|
||||
T(timeout: timeout)
|
||||
func init*[K](T: type TimedCache[K], timeout: Duration = Timeout, maxSize: int = 0): T =
|
||||
T(timeout: timeout, maxSize: maxSize)
|
||||
|
|
|
@ -499,7 +499,7 @@ proc advertisePeer(rdv: RendezVous, peer: PeerId, msg: seq[byte]) {.async.} =
|
|||
else:
|
||||
trace "Successfully registered", peer, response = msgRecv.registerResponse
|
||||
except CatchableError as exc:
|
||||
trace "exception in the advertise", error = exc.msg
|
||||
trace "exception in the advertise", description = exc.msg
|
||||
finally:
|
||||
rdv.sema.release()
|
||||
|
||||
|
@ -618,7 +618,7 @@ proc request*(
|
|||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "exception catch in request", error = exc.msg
|
||||
trace "exception catch in request", description = exc.msg
|
||||
return toSeq(s.values()).mapIt(it[0])
|
||||
|
||||
proc unsubscribeLocally*(rdv: RendezVous, ns: string) =
|
||||
|
@ -646,7 +646,7 @@ proc unsubscribe*(rdv: RendezVous, ns: string) {.async.} =
|
|||
await conn.close()
|
||||
await conn.writeLp(msg.buffer)
|
||||
except CatchableError as exc:
|
||||
trace "exception while unsubscribing", error = exc.msg
|
||||
trace "exception while unsubscribing", description = exc.msg
|
||||
|
||||
for peer in rdv.peers:
|
||||
discard await rdv.unsubscribePeer(peer).withTimeout(5.seconds)
|
||||
|
@ -692,7 +692,7 @@ proc new*(T: typedesc[RendezVous], rng: ref HmacDrbgContext = newRng()): T =
|
|||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "exception in rendezvous handler", error = exc.msg
|
||||
trace "exception in rendezvous handler", description = exc.msg
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
|
|
|
@ -155,7 +155,7 @@ method init*(s: Secure) =
|
|||
await conn.close()
|
||||
raise exc
|
||||
except LPStreamError as exc:
|
||||
warn "securing connection failed", err = exc.msg, conn
|
||||
warn "securing connection failed", description = exc.msg, conn
|
||||
await conn.close()
|
||||
|
||||
s.handler = handle
|
||||
|
|
|
@ -97,52 +97,49 @@ proc getWildcardMultiAddresses(
|
|||
|
||||
proc getWildcardAddress(
|
||||
maddress: MultiAddress,
|
||||
multiCodec: MultiCodec,
|
||||
anyAddr: openArray[uint8],
|
||||
addrFamily: AddressFamily,
|
||||
port: Port,
|
||||
networkInterfaceProvider: NetworkInterfaceProvider,
|
||||
): seq[MultiAddress] =
|
||||
var addresses: seq[MultiAddress]
|
||||
maddress.getProtocolArgument(multiCodec).withValue(address):
|
||||
if address == anyAddr:
|
||||
let filteredInterfaceAddresses = networkInterfaceProvider(addrFamily)
|
||||
addresses.add(
|
||||
getWildcardMultiAddresses(filteredInterfaceAddresses, IPPROTO_TCP, port)
|
||||
)
|
||||
else:
|
||||
addresses.add(maddress)
|
||||
return addresses
|
||||
let filteredInterfaceAddresses = networkInterfaceProvider(addrFamily)
|
||||
getWildcardMultiAddresses(filteredInterfaceAddresses, IPPROTO_TCP, port)
|
||||
|
||||
proc expandWildcardAddresses(
|
||||
networkInterfaceProvider: NetworkInterfaceProvider, listenAddrs: seq[MultiAddress]
|
||||
): seq[MultiAddress] =
|
||||
var addresses: seq[MultiAddress]
|
||||
|
||||
# In this loop we expand bound addresses like `0.0.0.0` and `::` to list of interface addresses.
|
||||
for listenAddr in listenAddrs:
|
||||
if TCP_IP.matchPartial(listenAddr):
|
||||
listenAddr.getProtocolArgument(multiCodec("tcp")).withValue(portArg):
|
||||
let port = Port(uint16.fromBytesBE(portArg))
|
||||
if IP4.matchPartial(listenAddr):
|
||||
let wildcardAddresses = getWildcardAddress(
|
||||
listenAddr,
|
||||
multiCodec("ip4"),
|
||||
AnyAddress.address_v4,
|
||||
AddressFamily.IPv4,
|
||||
port,
|
||||
networkInterfaceProvider,
|
||||
)
|
||||
addresses.add(wildcardAddresses)
|
||||
listenAddr.getProtocolArgument(multiCodec("ip4")).withValue(ip4):
|
||||
if ip4 == AnyAddress.address_v4:
|
||||
addresses.add(
|
||||
getWildcardAddress(
|
||||
listenAddr, AddressFamily.IPv4, port, networkInterfaceProvider
|
||||
)
|
||||
)
|
||||
else:
|
||||
addresses.add(listenAddr)
|
||||
elif IP6.matchPartial(listenAddr):
|
||||
let wildcardAddresses = getWildcardAddress(
|
||||
listenAddr,
|
||||
multiCodec("ip6"),
|
||||
AnyAddress6.address_v6,
|
||||
AddressFamily.IPv6,
|
||||
port,
|
||||
networkInterfaceProvider,
|
||||
)
|
||||
addresses.add(wildcardAddresses)
|
||||
listenAddr.getProtocolArgument(multiCodec("ip6")).withValue(ip6):
|
||||
if ip6 == AnyAddress6.address_v6:
|
||||
addresses.add(
|
||||
getWildcardAddress(
|
||||
listenAddr, AddressFamily.IPv6, port, networkInterfaceProvider
|
||||
)
|
||||
)
|
||||
# IPv6 dual stack
|
||||
addresses.add(
|
||||
getWildcardAddress(
|
||||
listenAddr, AddressFamily.IPv4, port, networkInterfaceProvider
|
||||
)
|
||||
)
|
||||
else:
|
||||
addresses.add(listenAddr)
|
||||
else:
|
||||
addresses.add(listenAddr)
|
||||
else:
|
||||
|
|
|
@ -326,4 +326,4 @@ proc closeWithEOF*(s: LPStream): Future[void] {.async: (raises: []), public.} =
|
|||
except LPStreamEOFError:
|
||||
trace "Expected EOF came", s
|
||||
except LPStreamError as exc:
|
||||
debug "Unexpected error while waiting for EOF", s, msg = exc.msg
|
||||
debug "Unexpected error while waiting for EOF", s, description = exc.msg
|
||||
|
|
|
@ -219,7 +219,7 @@ proc upgradeMonitor(
|
|||
libp2p_failed_upgrades_incoming.inc()
|
||||
if not isNil(conn):
|
||||
await conn.close()
|
||||
trace "Exception awaiting connection upgrade", exc = exc.msg, conn
|
||||
trace "Exception awaiting connection upgrade", description = exc.msg, conn
|
||||
finally:
|
||||
upgrades.release()
|
||||
|
||||
|
@ -264,7 +264,7 @@ proc accept(s: Switch, transport: Transport) {.async.} = # noraises
|
|||
upgrades.release() # always release the slot
|
||||
return
|
||||
except CatchableError as exc:
|
||||
error "Exception in accept loop, exiting", exc = exc.msg
|
||||
error "Exception in accept loop, exiting", description = exc.msg
|
||||
upgrades.release() # always release the slot
|
||||
if not isNil(conn):
|
||||
await conn.close()
|
||||
|
@ -282,7 +282,7 @@ proc stop*(s: Switch) {.async, public.} =
|
|||
# Stop accepting incoming connections
|
||||
await allFutures(s.acceptFuts.mapIt(it.cancelAndWait())).wait(1.seconds)
|
||||
except CatchableError as exc:
|
||||
debug "Cannot cancel accepts", error = exc.msg
|
||||
debug "Cannot cancel accepts", description = exc.msg
|
||||
|
||||
for service in s.services:
|
||||
discard await service.stop(s)
|
||||
|
@ -296,7 +296,7 @@ proc stop*(s: Switch) {.async, public.} =
|
|||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "error cleaning up transports", msg = exc.msg
|
||||
warn "error cleaning up transports", description = exc.msg
|
||||
|
||||
await s.ms.stop()
|
||||
|
||||
|
|
|
@ -230,47 +230,50 @@ method accept*(self: TcpTransport): Future[Connection] =
|
|||
raise newTransportClosedError()
|
||||
|
||||
if self.acceptFuts.len <= 0:
|
||||
# Holds futures representing ongoing accept calls on multiple servers.
|
||||
self.acceptFuts = self.servers.mapIt(it.accept())
|
||||
|
||||
let
|
||||
finished =
|
||||
try:
|
||||
# Waits for any one of these futures to complete, indicating that a new connection has been accepted on one of the servers.
|
||||
await one(self.acceptFuts)
|
||||
except ValueError:
|
||||
raise (ref TcpTransportError)(msg: "No listeners configured")
|
||||
|
||||
index = self.acceptFuts.find(finished)
|
||||
transp =
|
||||
try:
|
||||
await finished
|
||||
except TransportTooManyError as exc:
|
||||
debug "Too many files opened", exc = exc.msg
|
||||
return nil
|
||||
except TransportAbortedError as exc:
|
||||
debug "Connection aborted", exc = exc.msg
|
||||
return nil
|
||||
except TransportUseClosedError as exc:
|
||||
raise newTransportClosedError(exc)
|
||||
except TransportOsError as exc:
|
||||
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
|
||||
except common.TransportError as exc: # Needed for chronos 4.0.0 support
|
||||
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
|
||||
# A new connection has been accepted. The corresponding server should immediately start accepting another connection.
|
||||
# Thus we replace the completed future with a new one by calling accept on the same server again.
|
||||
self.acceptFuts[index] = self.servers[index].accept()
|
||||
let transp =
|
||||
try:
|
||||
await finished
|
||||
except TransportTooManyError as exc:
|
||||
debug "Too many files opened", description = exc.msg
|
||||
return nil
|
||||
except TransportAbortedError as exc:
|
||||
debug "Connection aborted", description = exc.msg
|
||||
return nil
|
||||
except TransportUseClosedError as exc:
|
||||
raise newTransportClosedError(exc)
|
||||
except TransportOsError as exc:
|
||||
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
|
||||
except common.TransportError as exc: # Needed for chronos 4.0.0 support
|
||||
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
|
||||
if not self.running: # Stopped while waiting
|
||||
await transp.closeWait()
|
||||
raise newTransportClosedError()
|
||||
|
||||
self.acceptFuts[index] = self.servers[index].accept()
|
||||
|
||||
let remote =
|
||||
try:
|
||||
transp.remoteAddress
|
||||
except TransportOsError as exc:
|
||||
# The connection had errors / was closed before `await` returned control
|
||||
await transp.closeWait()
|
||||
debug "Cannot read remote address", exc = exc.msg
|
||||
debug "Cannot read remote address", description = exc.msg
|
||||
return nil
|
||||
|
||||
let observedAddr =
|
||||
|
|
|
@ -206,7 +206,7 @@ method stop*(self: WsTransport) {.async.} =
|
|||
self.httpservers = @[]
|
||||
trace "Transport stopped"
|
||||
except CatchableError as exc:
|
||||
trace "Error shutting down ws transport", exc = exc.msg
|
||||
trace "Error shutting down ws transport", description = exc.msg
|
||||
|
||||
proc connHandler(
|
||||
self: WsTransport, stream: WSSession, secure: bool, dir: Direction
|
||||
|
@ -223,7 +223,7 @@ proc connHandler(
|
|||
|
||||
MultiAddress.init(remoteAddr).tryGet() & codec.tryGet()
|
||||
except CatchableError as exc:
|
||||
trace "Failed to create observedAddr", exc = exc.msg
|
||||
trace "Failed to create observedAddr", description = exc.msg
|
||||
if not (isNil(stream) and stream.stream.reader.closed):
|
||||
await stream.close()
|
||||
raise exc
|
||||
|
@ -271,26 +271,26 @@ method accept*(self: WsTransport): Future[Connection] {.async.} =
|
|||
await req.stream.closeWait()
|
||||
raise exc
|
||||
except WebSocketError as exc:
|
||||
debug "Websocket Error", exc = exc.msg
|
||||
debug "Websocket Error", description = exc.msg
|
||||
except HttpError as exc:
|
||||
debug "Http Error", exc = exc.msg
|
||||
debug "Http Error", description = exc.msg
|
||||
except AsyncStreamError as exc:
|
||||
debug "AsyncStream Error", exc = exc.msg
|
||||
debug "AsyncStream Error", description = exc.msg
|
||||
except TransportTooManyError as exc:
|
||||
debug "Too many files opened", exc = exc.msg
|
||||
debug "Too many files opened", description = exc.msg
|
||||
except TransportAbortedError as exc:
|
||||
debug "Connection aborted", exc = exc.msg
|
||||
debug "Connection aborted", description = exc.msg
|
||||
except AsyncTimeoutError as exc:
|
||||
debug "Timed out", exc = exc.msg
|
||||
debug "Timed out", description = exc.msg
|
||||
except TransportUseClosedError as exc:
|
||||
debug "Server was closed", exc = exc.msg
|
||||
debug "Server was closed", description = exc.msg
|
||||
raise newTransportClosedError(exc)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except TransportOsError as exc:
|
||||
debug "OS Error", exc = exc.msg
|
||||
debug "OS Error", description = exc.msg
|
||||
except CatchableError as exc:
|
||||
info "Unexpected error accepting connection", exc = exc.msg
|
||||
info "Unexpected error accepting connection", description = exc.msg
|
||||
raise exc
|
||||
|
||||
method dial*(
|
||||
|
|
|
@ -144,7 +144,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
|
|||
let transport1 = transpProvider()
|
||||
await transport1.start(addrs)
|
||||
|
||||
proc acceptHandler() {.async.} =
|
||||
proc acceptHandler() {.async, gensym.} =
|
||||
while true:
|
||||
let conn = await transport1.accept()
|
||||
await conn.write(newSeq[byte](0))
|
||||
|
@ -208,7 +208,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
|
|||
let transport1 = transpProvider()
|
||||
await transport1.start(ma)
|
||||
|
||||
proc acceptHandler() {.async.} =
|
||||
proc acceptHandler() {.async, gensym.} =
|
||||
let conn = await transport1.accept()
|
||||
await conn.close()
|
||||
|
||||
|
|
|
@ -18,6 +18,9 @@ import strutils, os
|
|||
--d:
|
||||
unittestPrintTime
|
||||
--skipParentCfg
|
||||
--mm:
|
||||
refc
|
||||
# reconsider when there's a version-2-2 branch worth testing with as we might switch to orc
|
||||
|
||||
# Only add chronicles param if the
|
||||
# user didn't specify any
|
||||
|
|
|
@ -120,7 +120,7 @@ proc main() {.async.} =
|
|||
echo &"""{{"rtt_to_holepunched_peer_millis":{delay.millis}}}"""
|
||||
quit(0)
|
||||
except CatchableError as e:
|
||||
error "Unexpected error", msg = e.msg
|
||||
error "Unexpected error", description = e.msg
|
||||
|
||||
discard waitFor(main().withTimeout(4.minutes))
|
||||
quit(1)
|
||||
|
|
|
@ -220,6 +220,63 @@ suite "GossipSub":
|
|||
|
||||
await allFuturesThrowing(nodesFut.concat())
|
||||
|
||||
asyncTest "GossipSub's observers should run after message is sent, received and validated":
|
||||
var
|
||||
recvCounter = 0
|
||||
sendCounter = 0
|
||||
validatedCounter = 0
|
||||
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
discard
|
||||
|
||||
proc onRecv(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
inc recvCounter
|
||||
|
||||
proc onSend(peer: PubSubPeer, msgs: var RPCMsg) =
|
||||
inc sendCounter
|
||||
|
||||
proc onValidated(peer: PubSubPeer, msg: Message, msgId: MessageId) =
|
||||
inc validatedCounter
|
||||
|
||||
let obs0 = PubSubObserver(onSend: onSend)
|
||||
let obs1 = PubSubObserver(onRecv: onRecv, onValidated: onValidated)
|
||||
|
||||
let nodes = generateNodes(2, gossip = true)
|
||||
# start switches
|
||||
discard await allFinished(nodes[0].switch.start(), nodes[1].switch.start())
|
||||
|
||||
await subscribeNodes(nodes)
|
||||
|
||||
nodes[0].addObserver(obs0)
|
||||
nodes[1].addObserver(obs1)
|
||||
nodes[1].subscribe("foo", handler)
|
||||
nodes[1].subscribe("bar", handler)
|
||||
|
||||
proc validator(
|
||||
topic: string, message: Message
|
||||
): Future[ValidationResult] {.async.} =
|
||||
result = if topic == "foo": ValidationResult.Accept else: ValidationResult.Reject
|
||||
|
||||
nodes[1].addValidator("foo", "bar", validator)
|
||||
|
||||
# Send message that will be accepted by the receiver's validator
|
||||
tryPublish await nodes[0].publish("foo", "Hello!".toBytes()), 1
|
||||
|
||||
check:
|
||||
recvCounter == 1
|
||||
validatedCounter == 1
|
||||
sendCounter == 1
|
||||
|
||||
# Send message that will be rejected by the receiver's validator
|
||||
tryPublish await nodes[0].publish("bar", "Hello!".toBytes()), 1
|
||||
|
||||
check:
|
||||
recvCounter == 2
|
||||
validatedCounter == 1
|
||||
sendCounter == 2
|
||||
|
||||
await allFuturesThrowing(nodes[0].switch.stop(), nodes[1].switch.stop())
|
||||
|
||||
asyncTest "GossipSub unsub - resub faster than backoff":
|
||||
var handlerFut = newFuture[bool]()
|
||||
proc handler(topic: string, data: seq[byte]) {.async.} =
|
||||
|
|
|
@ -57,3 +57,93 @@ suite "TimedCache":
|
|||
for i in 101 .. 100000:
|
||||
check:
|
||||
i in cache
|
||||
|
||||
test "max size constraint":
|
||||
var cache = TimedCache[int].init(5.seconds, 3) # maxSize = 3
|
||||
|
||||
let now = Moment.now()
|
||||
check:
|
||||
not cache.put(1, now)
|
||||
not cache.put(2, now + 1.seconds)
|
||||
not cache.put(3, now + 2.seconds)
|
||||
|
||||
check:
|
||||
1 in cache
|
||||
2 in cache
|
||||
3 in cache
|
||||
|
||||
check:
|
||||
not cache.put(4, now + 3.seconds) # exceeds maxSize, evicts 1
|
||||
|
||||
check:
|
||||
1 notin cache
|
||||
2 in cache
|
||||
3 in cache
|
||||
4 in cache
|
||||
|
||||
check:
|
||||
not cache.put(5, now + 4.seconds) # exceeds maxSize, evicts 2
|
||||
|
||||
check:
|
||||
1 notin cache
|
||||
2 notin cache
|
||||
3 in cache
|
||||
4 in cache
|
||||
5 in cache
|
||||
|
||||
check:
|
||||
not cache.put(6, now + 5.seconds) # exceeds maxSize, evicts 3
|
||||
|
||||
check:
|
||||
1 notin cache
|
||||
2 notin cache
|
||||
3 notin cache
|
||||
4 in cache
|
||||
5 in cache
|
||||
6 in cache
|
||||
|
||||
test "max size with expiration":
|
||||
var cache = TimedCache[int].init(3.seconds, 2) # maxSize = 2
|
||||
|
||||
let now = Moment.now()
|
||||
check:
|
||||
not cache.put(1, now)
|
||||
not cache.put(2, now + 1.seconds)
|
||||
|
||||
check:
|
||||
1 in cache
|
||||
2 in cache
|
||||
|
||||
check:
|
||||
not cache.put(3, now + 5.seconds) # expires 1 and 2, should only contain 3
|
||||
|
||||
check:
|
||||
1 notin cache
|
||||
2 notin cache
|
||||
3 in cache
|
||||
|
||||
test "max size constraint with refresh":
|
||||
var cache = TimedCache[int].init(5.seconds, 3) # maxSize = 3
|
||||
|
||||
let now = Moment.now()
|
||||
check:
|
||||
not cache.put(1, now)
|
||||
not cache.put(2, now + 1.seconds)
|
||||
not cache.put(3, now + 2.seconds)
|
||||
|
||||
check:
|
||||
1 in cache
|
||||
2 in cache
|
||||
3 in cache
|
||||
|
||||
check:
|
||||
cache.put(1, now + 3.seconds) # refreshes 1, now 2 is the oldest
|
||||
|
||||
check:
|
||||
not cache.put(4, now + 3.seconds) # exceeds maxSize, evicts 2
|
||||
|
||||
check:
|
||||
1 in cache
|
||||
2 notin cache
|
||||
3 in cache
|
||||
4 in cache
|
||||
|
|
|
@ -24,4 +24,5 @@ import
|
|||
testbufferstream, testidentify, testobservedaddrmanager, testconnmngr, testswitch,
|
||||
testnoise, testpeerinfo, testpeerstore, testping, testmplex, testrelayv1, testrelayv2,
|
||||
testrendezvous, testdiscovery, testyamux, testautonat, testautonatservice,
|
||||
testautorelay, testdcutr, testhpservice, testutility, testhelpers
|
||||
testautorelay, testdcutr, testhpservice, testutility, testhelpers,
|
||||
testwildcardresolverservice
|
||||
|
|
|
@ -38,18 +38,11 @@ proc getAddressesMock(
|
|||
echo "Error: " & $e.msg
|
||||
fail()
|
||||
|
||||
proc createSwitch(svc: Service): Switch =
|
||||
proc createSwitch(svc: Service, addrs: seq[MultiAddress]): Switch =
|
||||
SwitchBuilder
|
||||
.new()
|
||||
.withRng(newRng())
|
||||
.withAddresses(
|
||||
@[
|
||||
MultiAddress.init("/ip4/127.0.0.1/tcp/0/").tryGet(),
|
||||
MultiAddress.init("/ip4/0.0.0.0/tcp/0/").tryGet(),
|
||||
MultiAddress.init("/ip6/::/tcp/0/").tryGet(),
|
||||
],
|
||||
false,
|
||||
)
|
||||
.withAddresses(addrs, false)
|
||||
.withTcpTransport()
|
||||
.withMplex()
|
||||
.withNoise()
|
||||
|
@ -63,19 +56,19 @@ suite "WildcardAddressResolverService":
|
|||
asyncTest "WildcardAddressResolverService must resolve wildcard addresses and stop doing so when stopped":
|
||||
let svc: Service =
|
||||
WildcardAddressResolverService.new(networkInterfaceProvider = getAddressesMock)
|
||||
let switch = createSwitch(svc)
|
||||
let switch = createSwitch(
|
||||
svc,
|
||||
@[
|
||||
MultiAddress.init("/ip4/127.0.0.1/tcp/0/").tryGet(),
|
||||
MultiAddress.init("/ip4/0.0.0.0/tcp/0/").tryGet(),
|
||||
MultiAddress.init("/ip6/::/tcp/0/").tryGet(),
|
||||
],
|
||||
)
|
||||
await switch.start()
|
||||
let tcpIp4Locahost = switch.peerInfo.addrs[0][multiCodec("tcp")].get
|
||||
let tcpIp4Wildcard = switch.peerInfo.addrs[1][multiCodec("tcp")].get
|
||||
let tcpIp6 = switch.peerInfo.addrs[2][multiCodec("tcp")].get # tcp port for ip6
|
||||
let tcpIp6 = switch.peerInfo.addrs[3][multiCodec("tcp")].get # tcp port for ip6
|
||||
|
||||
check switch.peerInfo.addrs ==
|
||||
@[
|
||||
MultiAddress.init("/ip4/127.0.0.1" & $tcpIp4Locahost).get,
|
||||
MultiAddress.init("/ip4/0.0.0.0" & $tcpIp4Wildcard).get,
|
||||
MultiAddress.init("/ip6/::" & $tcpIp6).get,
|
||||
]
|
||||
await svc.run(switch)
|
||||
check switch.peerInfo.addrs ==
|
||||
@[
|
||||
MultiAddress.init("/ip4/127.0.0.1" & $tcpIp4Locahost).get,
|
||||
|
@ -83,6 +76,9 @@ suite "WildcardAddressResolverService":
|
|||
MultiAddress.init("/ip4/192.168.1.22" & $tcpIp4Wildcard).get,
|
||||
MultiAddress.init("/ip6/::1" & $tcpIp6).get,
|
||||
MultiAddress.init("/ip6/fe80::1" & $tcpIp6).get,
|
||||
# IPv6 dual stack
|
||||
MultiAddress.init("/ip4/127.0.0.1" & $tcpIp6).get,
|
||||
MultiAddress.init("/ip4/192.168.1.22" & $tcpIp6).get,
|
||||
]
|
||||
await switch.stop()
|
||||
check switch.peerInfo.addrs ==
|
||||
|
|
Loading…
Reference in New Issue