mirror of
https://github.com/status-im/nim-libp2p.git
synced 2025-02-23 18:18:11 +00:00
Merge remote-tracking branch 'origin/unstable' into quic
This commit is contained in:
commit
af0a9ac66e
131
.github/actions/install_nim/action.yml
vendored
Normal file
131
.github/actions/install_nim/action.yml
vendored
Normal file
@ -0,0 +1,131 @@
|
|||||||
|
name: Install Nim
|
||||||
|
inputs:
|
||||||
|
os:
|
||||||
|
description: "Operating system to build for"
|
||||||
|
required: true
|
||||||
|
cpu:
|
||||||
|
description: "CPU to build for"
|
||||||
|
default: "amd64"
|
||||||
|
nim_branch:
|
||||||
|
description: "Nim version"
|
||||||
|
default: "version-1-6"
|
||||||
|
shell:
|
||||||
|
description: "Shell to run commands in"
|
||||||
|
default: "bash --noprofile --norc -e -o pipefail"
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- name: Install build dependencies (Linux i386)
|
||||||
|
shell: ${{ inputs.shell }}
|
||||||
|
if: inputs.os == 'Linux' && inputs.cpu == 'i386'
|
||||||
|
run: |
|
||||||
|
sudo dpkg --add-architecture i386
|
||||||
|
sudo apt-get update -qq
|
||||||
|
sudo DEBIAN_FRONTEND='noninteractive' apt-get install \
|
||||||
|
--no-install-recommends -yq gcc-multilib g++-multilib \
|
||||||
|
libssl-dev:i386
|
||||||
|
mkdir -p external/bin
|
||||||
|
cat << EOF > external/bin/gcc
|
||||||
|
#!/bin/bash
|
||||||
|
exec $(which gcc) -m32 "\$@"
|
||||||
|
EOF
|
||||||
|
cat << EOF > external/bin/g++
|
||||||
|
#!/bin/bash
|
||||||
|
exec $(which g++) -m32 "\$@"
|
||||||
|
EOF
|
||||||
|
chmod 755 external/bin/gcc external/bin/g++
|
||||||
|
echo '${{ github.workspace }}/external/bin' >> $GITHUB_PATH
|
||||||
|
|
||||||
|
- name: MSYS2 (Windows i386)
|
||||||
|
if: inputs.os == 'Windows' && inputs.cpu == 'i386'
|
||||||
|
uses: msys2/setup-msys2@v2
|
||||||
|
with:
|
||||||
|
path-type: inherit
|
||||||
|
msystem: MINGW32
|
||||||
|
install: >-
|
||||||
|
base-devel
|
||||||
|
git
|
||||||
|
mingw-w64-i686-toolchain
|
||||||
|
|
||||||
|
- name: MSYS2 (Windows amd64)
|
||||||
|
if: inputs.os == 'Windows' && inputs.cpu == 'amd64'
|
||||||
|
uses: msys2/setup-msys2@v2
|
||||||
|
with:
|
||||||
|
path-type: inherit
|
||||||
|
install: >-
|
||||||
|
base-devel
|
||||||
|
git
|
||||||
|
mingw-w64-x86_64-toolchain
|
||||||
|
|
||||||
|
- name: Restore Nim DLLs dependencies (Windows) from cache
|
||||||
|
if: inputs.os == 'Windows'
|
||||||
|
id: windows-dlls-cache
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: external/dlls
|
||||||
|
key: 'dlls'
|
||||||
|
|
||||||
|
- name: Install DLL dependencies (Windows)
|
||||||
|
shell: ${{ inputs.shell }}
|
||||||
|
if: >
|
||||||
|
steps.windows-dlls-cache.outputs.cache-hit != 'true' &&
|
||||||
|
inputs.os == 'Windows'
|
||||||
|
run: |
|
||||||
|
mkdir external
|
||||||
|
curl -L "https://nim-lang.org/download/windeps.zip" -o external/windeps.zip
|
||||||
|
7z x external/windeps.zip -oexternal/dlls
|
||||||
|
|
||||||
|
- name: Path to cached dependencies (Windows)
|
||||||
|
shell: ${{ inputs.shell }}
|
||||||
|
if: >
|
||||||
|
inputs.os == 'Windows'
|
||||||
|
run: |
|
||||||
|
echo '${{ github.workspace }}'"/external/dlls" >> $GITHUB_PATH
|
||||||
|
|
||||||
|
- name: Derive environment variables
|
||||||
|
shell: ${{ inputs.shell }}
|
||||||
|
run: |
|
||||||
|
if [[ '${{ inputs.cpu }}' == 'amd64' ]]; then
|
||||||
|
PLATFORM=x64
|
||||||
|
else
|
||||||
|
PLATFORM=x86
|
||||||
|
fi
|
||||||
|
echo "PLATFORM=$PLATFORM" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
ncpu=
|
||||||
|
MAKE_CMD="make"
|
||||||
|
case '${{ inputs.os }}' in
|
||||||
|
'Linux')
|
||||||
|
ncpu=$(nproc)
|
||||||
|
;;
|
||||||
|
'macOS')
|
||||||
|
ncpu=$(sysctl -n hw.ncpu)
|
||||||
|
;;
|
||||||
|
'Windows')
|
||||||
|
ncpu=$NUMBER_OF_PROCESSORS
|
||||||
|
MAKE_CMD="mingw32-make"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
[[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1
|
||||||
|
echo "ncpu=$ncpu" >> $GITHUB_ENV
|
||||||
|
echo "MAKE_CMD=${MAKE_CMD}" >> $GITHUB_ENV
|
||||||
|
echo '${{ github.workspace }}/nim/bin' >> $GITHUB_PATH
|
||||||
|
|
||||||
|
- name: Restore Nim from cache
|
||||||
|
id: nim-cache
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: '${{ github.workspace }}/nim'
|
||||||
|
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_branch }}-cache-${{ env.cache_nonce }}
|
||||||
|
|
||||||
|
- name: Build Nim and Nimble
|
||||||
|
shell: ${{ inputs.shell }}
|
||||||
|
if: ${{ steps.nim-cache.outputs.cache-hit != 'true' }}
|
||||||
|
run: |
|
||||||
|
# We don't want partial matches of the cache restored
|
||||||
|
rm -rf nim
|
||||||
|
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
||||||
|
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} NIM_COMMIT=${{ inputs.nim_branch }} \
|
||||||
|
QUICK_AND_DIRTY_COMPILER=1 QUICK_AND_DIRTY_NIMBLE=1 CC=gcc \
|
||||||
|
bash build_nim.sh nim csources dist/nimble NimBinaries
|
15
.github/workflows/bumper.yml
vendored
15
.github/workflows/bumper.yml
vendored
@ -7,14 +7,21 @@ on:
|
|||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
bumpNimbus:
|
bumpProjects:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
target: [
|
||||||
|
{ repo: status-im/nimbus-eth2, branch: unstable },
|
||||||
|
{ repo: status-im/nwaku, branch: master },
|
||||||
|
{ repo: status-im/nim-codex, branch: main }
|
||||||
|
]
|
||||||
steps:
|
steps:
|
||||||
- name: Clone NBC
|
- name: Clone repo
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
repository: status-im/nimbus-eth2
|
repository: ${{ matrix.target.repo }}
|
||||||
ref: unstable
|
ref: ${{ matrix.target.branch }}
|
||||||
path: nbc
|
path: nbc
|
||||||
submodules: true
|
submodules: true
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
126
.github/workflows/ci.yml
vendored
126
.github/workflows/ci.yml
vendored
@ -7,6 +7,10 @@ on:
|
|||||||
pull_request:
|
pull_request:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
timeout-minutes: 90
|
timeout-minutes: 90
|
||||||
@ -45,111 +49,20 @@ jobs:
|
|||||||
|
|
||||||
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
|
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
|
||||||
runs-on: ${{ matrix.builder }}
|
runs-on: ${{ matrix.builder }}
|
||||||
continue-on-error: ${{ matrix.branch == 'version-1-6' || matrix.branch == 'devel' }}
|
continue-on-error: ${{ matrix.branch == 'devel' }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
|
|
||||||
- name: Install build dependencies (Linux i386)
|
- name: Setup Nim
|
||||||
if: runner.os == 'Linux' && matrix.target.cpu == 'i386'
|
uses: "./.github/actions/install_nim"
|
||||||
run: |
|
|
||||||
sudo dpkg --add-architecture i386
|
|
||||||
sudo apt-get update -qq
|
|
||||||
sudo DEBIAN_FRONTEND='noninteractive' apt-get install \
|
|
||||||
--no-install-recommends -yq gcc-multilib g++-multilib \
|
|
||||||
libssl-dev:i386
|
|
||||||
mkdir -p external/bin
|
|
||||||
cat << EOF > external/bin/gcc
|
|
||||||
#!/bin/bash
|
|
||||||
exec $(which gcc) -m32 "\$@"
|
|
||||||
EOF
|
|
||||||
cat << EOF > external/bin/g++
|
|
||||||
#!/bin/bash
|
|
||||||
exec $(which g++) -m32 "\$@"
|
|
||||||
EOF
|
|
||||||
chmod 755 external/bin/gcc external/bin/g++
|
|
||||||
echo '${{ github.workspace }}/external/bin' >> $GITHUB_PATH
|
|
||||||
|
|
||||||
- name: MSYS2 (Windows i386)
|
|
||||||
if: runner.os == 'Windows' && matrix.target.cpu == 'i386'
|
|
||||||
uses: msys2/setup-msys2@v2
|
|
||||||
with:
|
with:
|
||||||
path-type: inherit
|
os: ${{ matrix.target.os }}
|
||||||
msystem: MINGW32
|
cpu: ${{ matrix.target.cpu }}
|
||||||
install: >-
|
shell: ${{ matrix.shell }}
|
||||||
base-devel
|
nim_branch: ${{ matrix.branch }}
|
||||||
git
|
|
||||||
mingw-w64-i686-toolchain
|
|
||||||
|
|
||||||
- name: MSYS2 (Windows amd64)
|
|
||||||
if: runner.os == 'Windows' && matrix.target.cpu == 'amd64'
|
|
||||||
uses: msys2/setup-msys2@v2
|
|
||||||
with:
|
|
||||||
path-type: inherit
|
|
||||||
install: >-
|
|
||||||
base-devel
|
|
||||||
git
|
|
||||||
mingw-w64-x86_64-toolchain
|
|
||||||
|
|
||||||
- name: Restore Nim DLLs dependencies (Windows) from cache
|
|
||||||
if: runner.os == 'Windows'
|
|
||||||
id: windows-dlls-cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: external/dlls
|
|
||||||
key: 'dlls'
|
|
||||||
|
|
||||||
- name: Install DLL dependencies (Windows)
|
|
||||||
if: >
|
|
||||||
steps.windows-dlls-cache.outputs.cache-hit != 'true' &&
|
|
||||||
runner.os == 'Windows'
|
|
||||||
run: |
|
|
||||||
mkdir external
|
|
||||||
curl -L "https://nim-lang.org/download/windeps.zip" -o external/windeps.zip
|
|
||||||
7z x external/windeps.zip -oexternal/dlls
|
|
||||||
|
|
||||||
- name: Path to cached dependencies (Windows)
|
|
||||||
if: >
|
|
||||||
runner.os == 'Windows'
|
|
||||||
run: |
|
|
||||||
echo '${{ github.workspace }}'"/external/dlls" >> $GITHUB_PATH
|
|
||||||
|
|
||||||
- name: Derive environment variables
|
|
||||||
run: |
|
|
||||||
if [[ '${{ matrix.target.cpu }}' == 'amd64' ]]; then
|
|
||||||
PLATFORM=x64
|
|
||||||
else
|
|
||||||
PLATFORM=x86
|
|
||||||
fi
|
|
||||||
echo "PLATFORM=$PLATFORM" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
ncpu=
|
|
||||||
MAKE_CMD="make"
|
|
||||||
case '${{ runner.os }}' in
|
|
||||||
'Linux')
|
|
||||||
ncpu=$(nproc)
|
|
||||||
;;
|
|
||||||
'macOS')
|
|
||||||
ncpu=$(sysctl -n hw.ncpu)
|
|
||||||
;;
|
|
||||||
'Windows')
|
|
||||||
ncpu=$NUMBER_OF_PROCESSORS
|
|
||||||
MAKE_CMD="mingw32-make"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
[[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1
|
|
||||||
echo "ncpu=$ncpu" >> $GITHUB_ENV
|
|
||||||
echo "MAKE_CMD=${MAKE_CMD}" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Build Nim and Nimble
|
|
||||||
run: |
|
|
||||||
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
|
||||||
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} NIM_COMMIT=${{ matrix.branch }} \
|
|
||||||
QUICK_AND_DIRTY_COMPILER=1 QUICK_AND_DIRTY_NIMBLE=1 CC=gcc \
|
|
||||||
bash build_nim.sh nim csources dist/nimble NimBinaries
|
|
||||||
echo '${{ github.workspace }}/nim/bin' >> $GITHUB_PATH
|
|
||||||
|
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v2
|
uses: actions/setup-go@v2
|
||||||
@ -160,13 +73,20 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
|
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
|
||||||
|
|
||||||
|
- name: Restore deps from cache
|
||||||
|
id: deps-cache
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: nimbledeps
|
||||||
|
key: nimbledeps-${{ hashFiles('.pinned') }}
|
||||||
|
|
||||||
|
- name: Install deps
|
||||||
|
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
|
||||||
|
run: |
|
||||||
|
nimble install_pinned
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: |
|
run: |
|
||||||
if [[ "${{ matrix.target.os }}" == "windows" ]]; then
|
|
||||||
# https://github.com/status-im/nimbus-eth2/issues/3121
|
|
||||||
export NIMFLAGS="-d:nimRawSetjmp"
|
|
||||||
fi
|
|
||||||
nim --version
|
nim --version
|
||||||
nimble --version
|
nimble --version
|
||||||
nimble install_pinned
|
|
||||||
nimble test
|
nimble test
|
||||||
|
151
.github/workflows/codecov.yml
vendored
151
.github/workflows/codecov.yml
vendored
@ -5,136 +5,61 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- master
|
- master
|
||||||
|
- unstable
|
||||||
pull_request:
|
pull_request:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
GossipSub:
|
Coverage:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
strategy:
|
env:
|
||||||
matrix:
|
CICOV: YES
|
||||||
nim-options: [
|
|
||||||
"",
|
|
||||||
"-d:libp2p_pubsub_anonymize=true -d:libp2p_pubsub_sign=false -d:libp2p_pubsub_verify=false",
|
|
||||||
"-d:libp2p_pubsub_sign=true -d:libp2p_pubsub_verify=true"
|
|
||||||
]
|
|
||||||
test-program: [
|
|
||||||
"tests/pubsub/testpubsub",
|
|
||||||
"tests/pubsub/testfloodsub",
|
|
||||||
"tests/pubsub/testgossipinternal"
|
|
||||||
]
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Setup Nim
|
||||||
|
uses: "./.github/actions/install_nim"
|
||||||
|
with:
|
||||||
|
os: linux
|
||||||
|
cpu: amd64
|
||||||
|
shell: bash
|
||||||
|
|
||||||
|
- name: Restore deps from cache
|
||||||
|
id: deps-cache
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: nimbledeps
|
||||||
|
key: nimbledeps-${{ hashFiles('.pinned') }}
|
||||||
|
|
||||||
|
- name: Install deps
|
||||||
|
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
|
||||||
|
run: |
|
||||||
|
nimble install_pinned
|
||||||
|
|
||||||
- name: Run
|
- name: Run
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install -y lcov build-essential git curl
|
sudo apt-get install -y lcov build-essential git curl
|
||||||
mkdir coverage
|
mkdir coverage
|
||||||
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
export NIMFLAGS="--lineDir:on --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage"
|
||||||
env MAKE="make -j${NPROC}" bash build_nim.sh Nim csources dist/nimble NimBinaries
|
nimble testnative
|
||||||
export PATH="$PATH:$PWD/Nim/bin"
|
nimble testpubsub
|
||||||
nimble install_pinned
|
nimble testfilter
|
||||||
export NIM_OPTIONS="--opt:speed -d:debug --verbosity:0 --hints:off --lineDir:on -d:chronicles_log_level=INFO --warning[CaseTransition]:off --warning[ObservableStores]:off --warning[LockLevel]:off --nimcache:nimcache --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage ${{ matrix.nim-options }}"
|
find nimcache -name *.c -delete
|
||||||
nim c $NIM_OPTIONS -r ${{ matrix.test-program }}
|
|
||||||
cd nimcache; rm *.c; cd ..
|
|
||||||
lcov --capture --directory nimcache --output-file coverage/coverage.info
|
lcov --capture --directory nimcache --output-file coverage/coverage.info
|
||||||
shopt -s globstar
|
shopt -s globstar
|
||||||
ls `pwd`/libp2p/{*,**/*}.nim
|
ls `pwd`/libp2p/{*,**/*}.nim
|
||||||
lcov --extract coverage/coverage.info `pwd`/libp2p/{*,**/*}.nim --output-file coverage/coverage.f.info
|
lcov --extract coverage/coverage.info `pwd`/libp2p/{*,**/*}.nim --output-file coverage/coverage.f.info
|
||||||
export COV_UUID=`cksum <<< "${{ matrix.test-program }} $NIM_OPTIONS" | cut -f 1 -d ' '`
|
genhtml coverage/coverage.f.info --output-directory coverage/output
|
||||||
genhtml coverage/coverage.f.info --output-directory coverage/$COV_UUID-output
|
|
||||||
echo ${{ matrix.test-program }} > coverage/$COV_UUID-nim_options.txt
|
|
||||||
echo $NIM_OPTIONS >> coverage/$COV_UUID-nim_options.txt
|
|
||||||
bash <(curl -s https://codecov.io/bash) -f coverage/coverage.f.info || echo "Codecov did not collect coverage reports"
|
bash <(curl -s https://codecov.io/bash) -f coverage/coverage.f.info || echo "Codecov did not collect coverage reports"
|
||||||
- uses: actions/upload-artifact@master
|
|
||||||
with:
|
|
||||||
name: coverage
|
|
||||||
path: coverage
|
|
||||||
|
|
||||||
Tests:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
nim-options: [
|
|
||||||
""
|
|
||||||
]
|
|
||||||
test-program: [
|
|
||||||
"tests/testnative",
|
|
||||||
]
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
- name: Run
|
|
||||||
run: |
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y lcov build-essential git curl
|
|
||||||
mkdir coverage
|
|
||||||
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
|
||||||
env MAKE="make -j${NPROC}" bash build_nim.sh Nim csources dist/nimble NimBinaries
|
|
||||||
export PATH="$PATH:$PWD/Nim/bin"
|
|
||||||
nimble install_pinned
|
|
||||||
export NIM_OPTIONS="--opt:speed -d:debug --verbosity:0 --hints:off --lineDir:on -d:chronicles_log_level=INFO --warning[CaseTransition]:off --warning[ObservableStores]:off --warning[LockLevel]:off --nimcache:nimcache --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage ${{ matrix.nim-options }} --clearNimblePath --NimblePath:nimbledeps/pkgs"
|
|
||||||
nim c $NIM_OPTIONS -r ${{ matrix.test-program }}
|
|
||||||
cd nimcache; rm *.c; cd ..
|
|
||||||
lcov --capture --directory nimcache --output-file coverage/coverage.info
|
|
||||||
shopt -s globstar
|
|
||||||
ls `pwd`/libp2p/{*,**/*}.nim
|
|
||||||
lcov --extract coverage/coverage.info `pwd`/libp2p/{*,**/*}.nim --output-file coverage/coverage.f.info
|
|
||||||
export COV_UUID=`cksum <<< "${{ matrix.test-program }} $NIM_OPTIONS" | cut -f 1 -d ' '`
|
|
||||||
genhtml coverage/coverage.f.info --output-directory coverage/$COV_UUID-output
|
|
||||||
echo ${{ matrix.test-program }} > coverage/$COV_UUID-nim_options.txt
|
|
||||||
echo $NIM_OPTIONS >> coverage/$COV_UUID-nim_options.txt
|
|
||||||
bash <(curl -s https://codecov.io/bash) -f coverage/coverage.f.info || echo "Codecov did not collect coverage reports"
|
|
||||||
- uses: actions/upload-artifact@master
|
|
||||||
with:
|
|
||||||
name: coverage
|
|
||||||
path: coverage
|
|
||||||
|
|
||||||
Filter:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
nim-options: [
|
|
||||||
"",
|
|
||||||
"-d:libp2p_pki_schemes=secp256k1",
|
|
||||||
"-d:libp2p_pki_schemes=secp256k1;ed25519",
|
|
||||||
"-d:libp2p_pki_schemes=secp256k1;ed25519;ecnist",
|
|
||||||
]
|
|
||||||
test-program: [
|
|
||||||
"tests/testpkifilter",
|
|
||||||
]
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
- name: Run
|
|
||||||
run: |
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y lcov build-essential git curl
|
|
||||||
mkdir coverage
|
|
||||||
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
|
||||||
env MAKE="make -j${NPROC}" bash build_nim.sh Nim csources dist/nimble NimBinaries
|
|
||||||
export PATH="$PATH:$PWD/Nim/bin"
|
|
||||||
nimble install_pinned
|
|
||||||
export NIM_OPTIONS="--opt:speed -d:debug --verbosity:0 --hints:off --lineDir:on -d:chronicles_log_level=INFO --warning[CaseTransition]:off --warning[ObservableStores]:off --warning[LockLevel]:off --nimcache:nimcache --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage ${{ matrix.nim-options }}"
|
|
||||||
nim c $NIM_OPTIONS -r ${{ matrix.test-program }}
|
|
||||||
cd nimcache; rm *.c; cd ..
|
|
||||||
lcov --capture --directory nimcache --output-file coverage/coverage.info
|
|
||||||
shopt -s globstar
|
|
||||||
ls `pwd`/libp2p/{*,**/*}.nim
|
|
||||||
lcov --extract coverage/coverage.info `pwd`/libp2p/{*,**/*}.nim --output-file coverage/coverage.f.info
|
|
||||||
export COV_UUID=`cksum <<< "${{ matrix.test-program }} $NIM_OPTIONS" | cut -f 1 -d ' '`
|
|
||||||
genhtml coverage/coverage.f.info --output-directory coverage/$COV_UUID-output
|
|
||||||
echo ${{ matrix.test-program }} > coverage/$COV_UUID-nim_options.txt
|
|
||||||
echo $NIM_OPTIONS >> coverage/$COV_UUID-nim_options.txt
|
|
||||||
bash <(curl -s https://codecov.io/bash) -f coverage/coverage.f.info || echo "Codecov did not collect coverage reports"
|
|
||||||
- uses: actions/upload-artifact@master
|
|
||||||
with:
|
|
||||||
name: coverage
|
|
||||||
path: coverage
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#- uses: actions/upload-artifact@master
|
||||||
|
# with:
|
||||||
|
# name: coverage
|
||||||
|
# path: coverage
|
||||||
|
8
.github/workflows/doc.yml
vendored
8
.github/workflows/doc.yml
vendored
@ -63,7 +63,7 @@ jobs:
|
|||||||
git push origin gh-pages
|
git push origin gh-pages
|
||||||
|
|
||||||
update_site:
|
update_site:
|
||||||
if: github.ref == 'refs/heads/master'
|
if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/docs'
|
||||||
name: 'Rebuild website'
|
name: 'Rebuild website'
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
@ -74,8 +74,12 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
python-version: 3.x
|
python-version: 3.x
|
||||||
|
|
||||||
|
- uses: jiro4989/setup-nim-action@v1
|
||||||
|
with:
|
||||||
|
nim-version: 'stable'
|
||||||
|
|
||||||
- name: Generate website
|
- name: Generate website
|
||||||
run: pip install mkdocs-material && mkdocs build
|
run: pip install mkdocs-material && nimble website
|
||||||
|
|
||||||
- name: Clone the gh-pages branch
|
- name: Clone the gh-pages branch
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
|
129
.github/workflows/multi_nim.yml
vendored
129
.github/workflows/multi_nim.yml
vendored
@ -5,7 +5,13 @@ on:
|
|||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
delete-cache:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: snnaplab/delete-branch-cache-action@v1
|
||||||
|
|
||||||
build:
|
build:
|
||||||
|
needs: delete-cache
|
||||||
timeout-minutes: 120
|
timeout-minutes: 120
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
@ -21,7 +27,7 @@ jobs:
|
|||||||
cpu: amd64
|
cpu: amd64
|
||||||
#- os: windows
|
#- os: windows
|
||||||
#cpu: i386
|
#cpu: i386
|
||||||
branch: [version-1-2, version-1-4, version-1-6, devel]
|
branch: [version-1-2, version-1-6, devel]
|
||||||
include:
|
include:
|
||||||
- target:
|
- target:
|
||||||
os: linux
|
os: linux
|
||||||
@ -42,112 +48,18 @@ jobs:
|
|||||||
|
|
||||||
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
|
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
|
||||||
runs-on: ${{ matrix.builder }}
|
runs-on: ${{ matrix.builder }}
|
||||||
continue-on-error: ${{ matrix.branch == 'version-1-6' || matrix.branch == 'devel' }}
|
continue-on-error: ${{ matrix.branch == 'devel' }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Setup Nim
|
||||||
|
uses: "./.github/actions/install_nim"
|
||||||
with:
|
with:
|
||||||
ref: unstable
|
os: ${{ matrix.target.os }}
|
||||||
submodules: true
|
shell: ${{ matrix.shell }}
|
||||||
|
nim_branch: ${{ matrix.branch }}
|
||||||
- name: Install build dependencies (Linux i386)
|
cpu: ${{ matrix.target.cpu }}
|
||||||
if: runner.os == 'Linux' && matrix.target.cpu == 'i386'
|
|
||||||
run: |
|
|
||||||
sudo dpkg --add-architecture i386
|
|
||||||
sudo apt-get update -qq
|
|
||||||
sudo DEBIAN_FRONTEND='noninteractive' apt-get install \
|
|
||||||
--no-install-recommends -yq gcc-multilib g++-multilib \
|
|
||||||
libssl-dev:i386
|
|
||||||
mkdir -p external/bin
|
|
||||||
cat << EOF > external/bin/gcc
|
|
||||||
#!/bin/bash
|
|
||||||
exec $(which gcc) -m32 "\$@"
|
|
||||||
EOF
|
|
||||||
cat << EOF > external/bin/g++
|
|
||||||
#!/bin/bash
|
|
||||||
exec $(which g++) -m32 "\$@"
|
|
||||||
EOF
|
|
||||||
chmod 755 external/bin/gcc external/bin/g++
|
|
||||||
echo '${{ github.workspace }}/external/bin' >> $GITHUB_PATH
|
|
||||||
|
|
||||||
- name: MSYS2 (Windows i386)
|
|
||||||
if: runner.os == 'Windows' && matrix.target.cpu == 'i386'
|
|
||||||
uses: msys2/setup-msys2@v2
|
|
||||||
with:
|
|
||||||
path-type: inherit
|
|
||||||
msystem: MINGW32
|
|
||||||
install: >-
|
|
||||||
base-devel
|
|
||||||
git
|
|
||||||
mingw-w64-i686-toolchain
|
|
||||||
|
|
||||||
- name: MSYS2 (Windows amd64)
|
|
||||||
if: runner.os == 'Windows' && matrix.target.cpu == 'amd64'
|
|
||||||
uses: msys2/setup-msys2@v2
|
|
||||||
with:
|
|
||||||
path-type: inherit
|
|
||||||
install: >-
|
|
||||||
base-devel
|
|
||||||
git
|
|
||||||
mingw-w64-x86_64-toolchain
|
|
||||||
|
|
||||||
- name: Restore Nim DLLs dependencies (Windows) from cache
|
|
||||||
if: runner.os == 'Windows'
|
|
||||||
id: windows-dlls-cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: external/dlls
|
|
||||||
key: 'dlls'
|
|
||||||
|
|
||||||
- name: Install DLL dependencies (Windows)
|
|
||||||
if: >
|
|
||||||
steps.windows-dlls-cache.outputs.cache-hit != 'true' &&
|
|
||||||
runner.os == 'Windows'
|
|
||||||
run: |
|
|
||||||
mkdir external
|
|
||||||
curl -L "https://nim-lang.org/download/windeps.zip" -o external/windeps.zip
|
|
||||||
7z x external/windeps.zip -oexternal/dlls
|
|
||||||
|
|
||||||
- name: Path to cached dependencies (Windows)
|
|
||||||
if: >
|
|
||||||
runner.os == 'Windows'
|
|
||||||
run: |
|
|
||||||
echo '${{ github.workspace }}'"/external/dlls" >> $GITHUB_PATH
|
|
||||||
|
|
||||||
- name: Derive environment variables
|
|
||||||
run: |
|
|
||||||
if [[ '${{ matrix.target.cpu }}' == 'amd64' ]]; then
|
|
||||||
PLATFORM=x64
|
|
||||||
else
|
|
||||||
PLATFORM=x86
|
|
||||||
fi
|
|
||||||
echo "PLATFORM=$PLATFORM" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
ncpu=
|
|
||||||
MAKE_CMD="make"
|
|
||||||
case '${{ runner.os }}' in
|
|
||||||
'Linux')
|
|
||||||
ncpu=$(nproc)
|
|
||||||
;;
|
|
||||||
'macOS')
|
|
||||||
ncpu=$(sysctl -n hw.ncpu)
|
|
||||||
;;
|
|
||||||
'Windows')
|
|
||||||
ncpu=$NUMBER_OF_PROCESSORS
|
|
||||||
MAKE_CMD="mingw32-make"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
[[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1
|
|
||||||
echo "ncpu=$ncpu" >> $GITHUB_ENV
|
|
||||||
echo "MAKE_CMD=${MAKE_CMD}" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Build Nim and Nimble
|
|
||||||
run: |
|
|
||||||
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
|
||||||
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} NIM_COMMIT=${{ matrix.branch }} \
|
|
||||||
QUICK_AND_DIRTY_COMPILER=1 QUICK_AND_DIRTY_NIMBLE=1 CC=gcc \
|
|
||||||
bash build_nim.sh nim csources dist/nimble NimBinaries
|
|
||||||
echo '${{ github.workspace }}/nim/bin' >> $GITHUB_PATH
|
|
||||||
|
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v2
|
uses: actions/setup-go@v2
|
||||||
@ -160,16 +72,11 @@ jobs:
|
|||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: |
|
run: |
|
||||||
if [[ "${{ matrix.target.os }}" == "windows" ]]; then
|
|
||||||
# https://github.com/status-im/nimbus-eth2/issues/3121
|
|
||||||
export NIMFLAGS="-d:nimRawSetjmp"
|
|
||||||
fi
|
|
||||||
nim --version
|
nim --version
|
||||||
nimble --version
|
nimble --version
|
||||||
nimble install -y --depsOnly
|
nimble install -y --depsOnly
|
||||||
nimble test
|
NIMFLAGS="${NIMFLAGS} --gc:refc" nimble test
|
||||||
if [[ "${{ matrix.branch }}" == "version-1-6" || "${{ matrix.branch }}" == "devel" ]]; then
|
if [[ "${{ matrix.branch }}" == "devel" ]]; then
|
||||||
echo -e "\nTesting with '--gc:orc':\n"
|
echo -e "\nTesting with '--gc:orc':\n"
|
||||||
export NIMFLAGS="${NIMFLAGS} --gc:orc"
|
NIMFLAGS="${NIMFLAGS} --gc:orc" nimble test
|
||||||
nimble test
|
|
||||||
fi
|
fi
|
||||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -13,5 +13,6 @@ build/
|
|||||||
.vscode/
|
.vscode/
|
||||||
.DS_Store
|
.DS_Store
|
||||||
tests/pubsub/testgossipsub
|
tests/pubsub/testgossipsub
|
||||||
|
examples/*.md
|
||||||
nimble.develop
|
nimble.develop
|
||||||
nimble.paths
|
nimble.paths
|
||||||
|
26
.pinned
26
.pinned
@ -1,19 +1,19 @@
|
|||||||
bearssl;https://github.com/status-im/nim-bearssl@#f4c4233de453cb7eac0ce3f3ffad6496295f83ab
|
bearssl;https://github.com/status-im/nim-bearssl@#a647994910904b0103a05db3a5ec1ecfc4d91a88
|
||||||
chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
|
chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
|
||||||
chronos;https://github.com/status-im/nim-chronos@#1334cdfebdc6182ff752e7d20796d9936cc8faa3
|
chronos;https://github.com/status-im/nim-chronos@#e9f8baa6ee2e21ff8e6b6c0ce0c22368cdd9e758
|
||||||
dnsclient;https://github.com/ba0f3/dnsclient.nim@#4960de2b345f567b12f09a08e9967af104ab39a3
|
dnsclient;https://github.com/ba0f3/dnsclient.nim@#fcd7443634b950eaea574e5eaa00a628ae029823
|
||||||
faststreams;https://github.com/status-im/nim-faststreams@#49e2c52eb5dda46b1c9c10d079abe7bffe6cea89
|
faststreams;https://github.com/status-im/nim-faststreams@#b42daf41d8eb4fbce40add6836bed838f8d85b6f
|
||||||
httputils;https://github.com/status-im/nim-http-utils@#e88e231dfcef4585fe3b2fbd9b664dbd28a88040
|
httputils;https://github.com/status-im/nim-http-utils@#a85bd52ae0a956983ca6b3267c72961d2ec0245f
|
||||||
json_serialization;https://github.com/status-im/nim-json-serialization@#e5b18fb710c3d0167ec79f3b892f5a7a1bc6d1a4
|
json_serialization;https://github.com/status-im/nim-json-serialization@#a7d815ed92f200f490c95d3cfd722089cc923ce6
|
||||||
metrics;https://github.com/status-im/nim-metrics@#0a6477268e850d7bc98347b3875301524871765f
|
metrics;https://github.com/status-im/nim-metrics@#21e99a2e9d9f80e68bef65c80ef781613005fccb
|
||||||
ngtcp2;https://github.com/status-im/nim-ngtcp2@#76bf92475f55728ff55a2a19b45a5fcbb4faa2ab
|
ngtcp2;https://github.com/status-im/nim-ngtcp2@#76bf92475f55728ff55a2a19b45a5fcbb4faa2ab
|
||||||
nimcrypto;https://github.com/cheatfate/nimcrypto@#24e006df85927f64916e60511620583b11403178
|
nimcrypto;https://github.com/cheatfate/nimcrypto@#24e006df85927f64916e60511620583b11403178
|
||||||
quic;https://github.com/status-im/nim-quic.git@#26b1c76f8574883e385f8650090a74d33cc9d0fa
|
quic;https://github.com/status-im/nim-quic.git@#9dd9b528cfda50f6482f800175d7350cb03911ff
|
||||||
secp256k1;https://github.com/status-im/nim-secp256k1@#c7f1a37d9b0f17292649bfed8bf6cef83cf4221f
|
secp256k1;https://github.com/status-im/nim-secp256k1@#fd173fdff863ce2e211cf64c9a03bc7539fe40b0
|
||||||
serialization;https://github.com/status-im/nim-serialization@#493d18b8292fc03aa4f835fd825dea1183f97466
|
serialization;https://github.com/status-im/nim-serialization@#d77417cba6896c26287a68e6a95762e45a1b87e5
|
||||||
stew;https://github.com/status-im/nim-stew@#018760954a1530b7336aed7133393908875d860f
|
stew;https://github.com/status-im/nim-stew@#f2f9685ec904868bbb48485e72ddc026ed51b230
|
||||||
testutils;https://github.com/status-im/nim-testutils@#dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34
|
testutils;https://github.com/status-im/nim-testutils@#dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34
|
||||||
unittest2;https://github.com/status-im/nim-unittest2@#f180f596c88dfd266f746ed6f8dbebce39c824db
|
unittest2;https://github.com/status-im/nim-unittest2@#da8398c45cafd5bd7772da1fc96e3924a18d3823
|
||||||
upraises;https://github.com/markspanbroek/upraises@#d9f268db1021959fe0f2c7a5e49fba741f9932a0
|
upraises;https://github.com/markspanbroek/upraises@#d9f268db1021959fe0f2c7a5e49fba741f9932a0
|
||||||
websock;https://github.com/status-im/nim-websock@#af8779d9d95e488ec9fd2d584b6328bd506c702b
|
websock;https://github.com/status-im/nim-websock@#691f069b209d372b1240d5ae1f57fb7bbafeaba7
|
||||||
zlib;https://github.com/status-im/nim-zlib@#6a6670afba6b97b29b920340e2641978c05ab4d8
|
zlib;https://github.com/status-im/nim-zlib@#6a6670afba6b97b29b920340e2641978c05ab4d8
|
87
README.md
87
README.md
@ -2,7 +2,7 @@
|
|||||||
<a href="https://libp2p.io"><img width="250" src="./.assets/full-logo.svg?raw=true" alt="nim-libp2p logo" /></a>
|
<a href="https://libp2p.io"><img width="250" src="./.assets/full-logo.svg?raw=true" alt="nim-libp2p logo" /></a>
|
||||||
</h1>
|
</h1>
|
||||||
|
|
||||||
<h3 align="center">The Nim implementation of the libp2p Networking Stack.</h3>
|
<h3 align="center">The <a href="https://nim-lang.org/">Nim</a> implementation of the <a href="https://libp2p.io/">libp2p</a> Networking Stack.</h3>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<a href="https://github.com/status-im/nim-libp2p/actions"><img src="https://github.com/status-im/nim-libp2p/actions/workflows/ci.yml/badge.svg" /></a>
|
<a href="https://github.com/status-im/nim-libp2p/actions"><img src="https://github.com/status-im/nim-libp2p/actions/workflows/ci.yml/badge.svg" /></a>
|
||||||
@ -16,30 +16,26 @@
|
|||||||
<img src="https://img.shields.io/badge/nim-%3E%3D1.2.0-orange.svg?style=flat-square" />
|
<img src="https://img.shields.io/badge/nim-%3E%3D1.2.0-orange.svg?style=flat-square" />
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
An implementation of [libp2p](https://libp2p.io/) in [Nim](https://nim-lang.org/).
|
|
||||||
|
|
||||||
# Table of Contents
|
# Table of Contents
|
||||||
- [Background](#background)
|
- [Background](#background)
|
||||||
- [Install](#install)
|
- [Install](#install)
|
||||||
- [Getting Started](#getting-started)
|
- [Getting Started](#getting-started)
|
||||||
- [Modules](#modules)
|
- [Modules](#modules)
|
||||||
- [Users](#users)
|
- [Users](#users)
|
||||||
|
- [Stability](#stability)
|
||||||
- [Development](#development)
|
- [Development](#development)
|
||||||
- [Contribute](#contribute)
|
- [Contribute](#contribute)
|
||||||
- [Core Developers](#core-developers)
|
- [Contributors](#contributors)
|
||||||
|
- [Core Maintainers](#core-maintainers)
|
||||||
- [License](#license)
|
- [License](#license)
|
||||||
|
|
||||||
## Background
|
## Background
|
||||||
libp2p is a networking stack and library modularized out of [The IPFS Project](https://github.com/ipfs/ipfs), and bundled separately for other tools to use.
|
libp2p is a [Peer-to-Peer](https://en.wikipedia.org/wiki/Peer-to-peer) networking stack, with [implementations](https://github.com/libp2p/libp2p#implementations) in multiple languages derived from the same [specifications.](https://github.com/libp2p/specs)
|
||||||
|
|
||||||
libp2p is the product of a long and arduous quest of understanding; a deep dive into the internet's network stack and the peer-to-peer protocols from the past. Building large scale peer-to-peer systems has been complex and difficult in the last 15 years and libp2p is a way to fix that. It is a "network stack", a suite of networking protocols that cleanly separates concerns and enables sophisticated applications to only use the protocols they absolutely need, without giving up interoperability and upgradeability.
|
Building large scale peer-to-peer systems has been complex and difficult in the last 15 years and libp2p is a way to fix that. It's striving to be a modular stack, with sane and secure defaults, useful protocols, while remain open and extensible.
|
||||||
|
This implementation in native Nim, relying on [chronos](https://github.com/status-im/nim-chronos) for async. It's used in production by a few [projects](#users)
|
||||||
|
|
||||||
libp2p grew out of IPFS, but it is built so that lots of people can use it, for lots of different projects.
|
Learn more about libp2p at [**libp2p.io**](https://libp2p.io) and follow libp2p's documentation [**docs.libp2p.io**](https://docs.libp2p.io).
|
||||||
|
|
||||||
- Learn more about libp2p at [**libp2p.io**](https://libp2p.io) and follow our evolving documentation efforts at [**docs.libp2p.io**](https://docs.libp2p.io).
|
|
||||||
- [Here](https://github.com/libp2p/libp2p#description) is an overview of libp2p and its implementations in other programming languages.
|
|
||||||
|
|
||||||
## Install
|
## Install
|
||||||
**Prerequisite**
|
**Prerequisite**
|
||||||
@ -49,7 +45,7 @@ nimble install libp2p
|
|||||||
```
|
```
|
||||||
|
|
||||||
## Getting Started
|
## Getting Started
|
||||||
You'll find the documentation [here].(https://status-im.github.io/nim-libp2p/docs/)
|
You'll find the nim-libp2p documentation [here](https://status-im.github.io/nim-libp2p/docs/).
|
||||||
|
|
||||||
**Go Daemon:**
|
**Go Daemon:**
|
||||||
Please find the installation and usage intructions in [daemonapi.md](examples/go-daemon/daemonapi.md).
|
Please find the installation and usage intructions in [daemonapi.md](examples/go-daemon/daemonapi.md).
|
||||||
@ -63,25 +59,28 @@ List of packages modules implemented in nim-libp2p:
|
|||||||
| **Libp2p** | |
|
| **Libp2p** | |
|
||||||
| [libp2p](libp2p/switch.nim) | The core of the project |
|
| [libp2p](libp2p/switch.nim) | The core of the project |
|
||||||
| [connmanager](libp2p/connmanager.nim) | Connection manager |
|
| [connmanager](libp2p/connmanager.nim) | Connection manager |
|
||||||
| [identify / push identify](libp2p/protocols/identify.nim) | [Identify](https://docs.libp2p.io/concepts/protocols/#identify) protocol |
|
| [identify / push identify](libp2p/protocols/identify.nim) | [Identify](https://docs.libp2p.io/concepts/fundamentals/protocols/#identify) protocol |
|
||||||
| [ping](libp2p/protocols/ping.nim) | [Ping](https://docs.libp2p.io/concepts/protocols/#ping) protocol |
|
| [ping](libp2p/protocols/ping.nim) | [Ping](https://docs.libp2p.io/concepts/fundamentals/protocols/#ping) protocol |
|
||||||
| [libp2p-daemon-client](libp2p/daemon/daemonapi.nim) | [go-daemon](https://github.com/libp2p/go-libp2p-daemon) nim wrapper |
|
| [libp2p-daemon-client](libp2p/daemon/daemonapi.nim) | [go-daemon](https://github.com/libp2p/go-libp2p-daemon) nim wrapper |
|
||||||
| [interop-libp2p](tests/testinterop.nim) | Interop tests |
|
| [interop-libp2p](tests/testinterop.nim) | Interop tests |
|
||||||
| **Transports** | |
|
| **Transports** | |
|
||||||
| [libp2p-tcp](libp2p/transports/tcptransport.nim) | TCP transport |
|
| [libp2p-tcp](libp2p/transports/tcptransport.nim) | TCP transport |
|
||||||
| [libp2p-ws](libp2p/transports/wstransport.nim) | WebSocket & WebSocket Secure transport |
|
| [libp2p-ws](libp2p/transports/wstransport.nim) | WebSocket & WebSocket Secure transport |
|
||||||
|
| [libp2p-tor](libp2p/transports/tortransport.nim) | Tor Transport |
|
||||||
| **Secure Channels** | |
|
| **Secure Channels** | |
|
||||||
| [libp2p-secio](libp2p/protocols/secure/secio.nim) | [Secio](https://docs.libp2p.io/concepts/protocols/#secio) secure channel |
|
| [libp2p-secio](libp2p/protocols/secure/secio.nim) | Secio secure channel |
|
||||||
| [libp2p-noise](libp2p/protocols/secure/noise.nim) | [Noise](https://github.com/libp2p/specs/tree/master/noise) secure channel |
|
| [libp2p-noise](libp2p/protocols/secure/noise.nim) | [Noise](https://docs.libp2p.io/concepts/secure-comm/noise/) secure channel |
|
||||||
| [libp2p-plaintext](libp2p/protocols/secure/plaintext.nim) | [Plain Text](https://github.com/libp2p/specs/tree/master/plaintext) for development purposes |
|
| [libp2p-plaintext](libp2p/protocols/secure/plaintext.nim) | Plain Text for development purposes |
|
||||||
| **Stream Multiplexers** | |
|
| **Stream Multiplexers** | |
|
||||||
| [libp2p-mplex](libp2p/muxers/mplex/mplex.nim) | [MPlex](https://github.com/libp2p/specs/tree/master/mplex) multiplexer |
|
| [libp2p-mplex](libp2p/muxers/mplex/mplex.nim) | [MPlex](https://github.com/libp2p/specs/tree/master/mplex) multiplexer |
|
||||||
|
| [libp2p-yamux](libp2p/muxers/yamux/yamux.nim) | [Yamux](https://docs.libp2p.io/concepts/multiplex/yamux/) multiplexer |
|
||||||
| **Data Types** | |
|
| **Data Types** | |
|
||||||
| [peer-id](libp2p/peerid.nim) | [Cryptographic identifiers](https://docs.libp2p.io/concepts/peer-id/) |
|
| [peer-id](libp2p/peerid.nim) | [Cryptographic identifiers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-id) |
|
||||||
| [peer-store](libp2p/peerstore.nim) | ["Phone book" of known peers](https://docs.libp2p.io/concepts/peer-id/#peerinfo) |
|
| [peer-store](libp2p/peerstore.nim) | ["Address book" of known peers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-store) |
|
||||||
| [multiaddress](libp2p/multiaddress.nim) | [Composable network addresses](https://github.com/multiformats/multiaddr) |
|
| [multiaddress](libp2p/multiaddress.nim) | [Composable network addresses](https://github.com/multiformats/multiaddr) |
|
||||||
| [signed envelope](libp2p/signed_envelope.nim) | [Signed generic data container](https://github.com/libp2p/specs/blob/master/RFC/0002-signed-envelopes.md) |
|
| [signed envelope](libp2p/signed_envelope.nim) | [Signed generic data container](https://github.com/libp2p/specs/blob/master/RFC/0002-signed-envelopes.md) |
|
||||||
| [routing record](libp2p/routing_record.nim) | [Signed peer dialing informations](https://github.com/libp2p/specs/blob/master/RFC/0003-routing-records.md) |
|
| [routing record](libp2p/routing_record.nim) | [Signed peer dialing informations](https://github.com/libp2p/specs/blob/master/RFC/0003-routing-records.md) |
|
||||||
|
| [discovery manager](libp2p/discovery/discoverymngr.nim) | Discovery Manager |
|
||||||
| **Utilities** | |
|
| **Utilities** | |
|
||||||
| [libp2p-crypto](libp2p/crypto) | Cryptographic backend |
|
| [libp2p-crypto](libp2p/crypto) | Cryptographic backend |
|
||||||
| [libp2p-crypto-secp256k1](libp2p/crypto/secp.nim) | |
|
| [libp2p-crypto-secp256k1](libp2p/crypto/secp.nim) | |
|
||||||
@ -98,20 +97,34 @@ nim-libp2p is used by:
|
|||||||
- [nim-codex](https://github.com/status-im/nim-codex), a decentralized storage application
|
- [nim-codex](https://github.com/status-im/nim-codex), a decentralized storage application
|
||||||
- (open a pull request if you want to be included here)
|
- (open a pull request if you want to be included here)
|
||||||
|
|
||||||
## Development
|
## Stability
|
||||||
**Clone and Install dependencies:**
|
nim-libp2p has been used in production for over a year in high-stake scenarios, so its core is considered stable.
|
||||||
|
Some modules are more recent and less stable.
|
||||||
|
|
||||||
|
The versioning follows [semver](https://semver.org/), with some additions:
|
||||||
|
- Some of libp2p procedures are marked as `.public.`, they will remain compatible during each `MAJOR` version
|
||||||
|
- The rest of the procedures are considered internal, and can change at any `MINOR` version (but remain compatible for each new `PATCH`)
|
||||||
|
|
||||||
|
We aim to be compatible at all time with at least 2 Nim `MINOR` versions, currently `1.2 & 1.6`
|
||||||
|
|
||||||
|
## Development
|
||||||
|
Clone and Install dependencies:
|
||||||
```sh
|
```sh
|
||||||
git clone https://github.com/status-im/nim-libp2p
|
git clone https://github.com/status-im/nim-libp2p
|
||||||
cd nim-libp2p
|
cd nim-libp2p
|
||||||
nimble install
|
# to use dependencies computed by nimble
|
||||||
|
nimble install -dy
|
||||||
|
# OR to install the dependencies versions used in CI
|
||||||
|
nimble install_pinned
|
||||||
```
|
```
|
||||||
|
|
||||||
**Run unit tests**
|
Run unit tests:
|
||||||
```sh
|
```sh
|
||||||
# run all the unit tests
|
# run all the unit tests
|
||||||
nimble test
|
nimble test
|
||||||
```
|
```
|
||||||
|
This requires the go daemon to be available. To only run native tests, use `nimble testnative`.
|
||||||
|
Or use `nimble tasks` to show all available tasks.
|
||||||
|
|
||||||
### Contribute
|
### Contribute
|
||||||
|
|
||||||
@ -122,25 +135,33 @@ The libp2p implementation in Nim is a work in progress. We welcome contributors
|
|||||||
|
|
||||||
The code follows the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
|
The code follows the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
|
||||||
|
|
||||||
### Core Developers
|
### Contributors
|
||||||
[@cheatfate](https://github.com/cheatfate), [Dmitriy Ryajov](https://github.com/dryajov), [Tanguy](https://github.com/Menduist), [Zahary Karadjov](https://github.com/zah)
|
<a href="https://github.com/status-im/nim-libp2p/graphs/contributors"><img src="https://contrib.rocks/image?repo=status-im/nim-libp2p" alt="nim-libp2p contributors"></a>
|
||||||
|
|
||||||
### Tips and tricks
|
### Core Maintainers
|
||||||
|
<table>
|
||||||
|
<tbody>
|
||||||
|
<tr>
|
||||||
|
<td align="center"><a href="https://github.com/Menduist"><img src="https://avatars.githubusercontent.com/u/13471753?v=4?s=100" width="100px;" alt="Tanguy"/><br /><sub><b>Tanguy (Menduist)</b></sub></a></td>
|
||||||
|
<td align="center"><a href="https://github.com/lchenut"><img src="https://avatars.githubusercontent.com/u/11214565?v=4?s=100" width="100px;" alt="Ludovic"/><br /><sub><b>Ludovic</b></sub></a></td>
|
||||||
|
<td align="center"><a href="https://github.com/diegomrsantos"><img src="https://avatars.githubusercontent.com/u/7316595?v=4?s=100" width="100px;" alt="Diego"/><br /><sub><b>Diego</b></sub></a></td>
|
||||||
|
</tr>
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
|
||||||
**enable expensive metrics:**
|
### Compile time flags
|
||||||
|
|
||||||
|
Enable expensive metrics (ie, metrics with per-peer cardinality):
|
||||||
```bash
|
```bash
|
||||||
nim c -d:libp2p_expensive_metrics some_file.nim
|
nim c -d:libp2p_expensive_metrics some_file.nim
|
||||||
```
|
```
|
||||||
|
|
||||||
**use identify metrics**
|
Set list of known libp2p agents for metrics:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
nim c -d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,prysm,teku some_file.nim
|
nim c -d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,lodestar,prysm,teku some_file.nim
|
||||||
```
|
```
|
||||||
|
|
||||||
**specify gossipsub specific topics to measure**
|
Specify gossipsub specific topics to measure in the metrics:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
nim c -d:KnownLibP2PTopics=topic1,topic2,topic3 some_file.nim
|
nim c -d:KnownLibP2PTopics=topic1,topic2,topic3 some_file.nim
|
||||||
```
|
```
|
||||||
|
@ -1,14 +1,8 @@
|
|||||||
codecov:
|
codecov:
|
||||||
notify:
|
notify:
|
||||||
require_ci_to_pass: true
|
require_ci_to_pass: true
|
||||||
# must be the number of coverage report builds
|
|
||||||
# notice that this number is for PRs;
|
|
||||||
# like this we disabled notify on pure branches report
|
|
||||||
# which is fine I guess
|
|
||||||
after_n_builds: 28
|
|
||||||
comment:
|
comment:
|
||||||
layout: "reach, diff, flags, files"
|
layout: "reach, diff, flags, files"
|
||||||
after_n_builds: 28 # must be the number of coverage report builds
|
|
||||||
coverage:
|
coverage:
|
||||||
status:
|
status:
|
||||||
project:
|
project:
|
||||||
|
15
config.nims
15
config.nims
@ -2,6 +2,21 @@
|
|||||||
if dirExists("nimbledeps/pkgs"):
|
if dirExists("nimbledeps/pkgs"):
|
||||||
switch("NimblePath", "nimbledeps/pkgs")
|
switch("NimblePath", "nimbledeps/pkgs")
|
||||||
|
|
||||||
|
switch("warning", "CaseTransition:off")
|
||||||
|
switch("warning", "ObservableStores:off")
|
||||||
|
switch("warning", "LockLevel:off")
|
||||||
|
--define:chronosStrictException
|
||||||
|
--styleCheck:usages
|
||||||
|
if (NimMajor, NimMinor) < (1, 6):
|
||||||
|
--styleCheck:hint
|
||||||
|
else:
|
||||||
|
--styleCheck:error
|
||||||
|
|
||||||
|
# Avoid some rare stack corruption while using exceptions with a SEH-enabled
|
||||||
|
# toolchain: https://github.com/status-im/nimbus-eth2/issues/3121
|
||||||
|
if defined(windows) and not defined(vcc):
|
||||||
|
--define:nimRawSetjmp
|
||||||
|
|
||||||
# begin Nimble config (version 1)
|
# begin Nimble config (version 1)
|
||||||
when fileExists("nimble.paths"):
|
when fileExists("nimble.paths"):
|
||||||
include "nimble.paths"
|
include "nimble.paths"
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
# nim-libp2p documentation
|
# nim-libp2p examples
|
||||||
|
|
||||||
Welcome to the nim-libp2p documentation!
|
In this folder, you'll find the sources of the [nim-libp2p website](https://status-im.github.io/nim-libp2p/docs/)
|
||||||
|
|
||||||
Here, you'll find [tutorials](tutorial_1_connect.md) to help you get started, as well as [examples](directchat.nim) and
|
We recommand to follow the tutorials on the website, but feel free to grok the sources here!
|
||||||
the [full reference](https://status-im.github.io/nim-libp2p/master/libp2p.html).
|
|
||||||
|
@ -1,6 +1,14 @@
|
|||||||
|
## # Circuit Relay example
|
||||||
|
##
|
||||||
|
## Circuit Relay can be used when a node cannot reach another node
|
||||||
|
## directly, but can reach it through a another node (the Relay).
|
||||||
|
##
|
||||||
|
## That may happen because of NAT, Firewalls, or incompatible transports.
|
||||||
|
##
|
||||||
|
## More informations [here](https://docs.libp2p.io/concepts/circuit-relay/).
|
||||||
import chronos, stew/byteutils
|
import chronos, stew/byteutils
|
||||||
import ../libp2p,
|
import libp2p,
|
||||||
../libp2p/protocols/relay/[relay, client]
|
libp2p/protocols/connectivity/relay/[relay, client]
|
||||||
|
|
||||||
# Helper to create a circuit relay node
|
# Helper to create a circuit relay node
|
||||||
proc createCircuitRelaySwitch(r: Relay): Switch =
|
proc createCircuitRelaySwitch(r: Relay): Switch =
|
||||||
@ -40,19 +48,19 @@ proc main() {.async.} =
|
|||||||
swSrc = createCircuitRelaySwitch(clSrc)
|
swSrc = createCircuitRelaySwitch(clSrc)
|
||||||
swDst = createCircuitRelaySwitch(clDst)
|
swDst = createCircuitRelaySwitch(clDst)
|
||||||
|
|
||||||
# Create a relay address to swDst using swRel as the relay
|
|
||||||
addrs = MultiAddress.init($swRel.peerInfo.addrs[0] & "/p2p/" &
|
|
||||||
$swRel.peerInfo.peerId & "/p2p-circuit/p2p/" &
|
|
||||||
$swDst.peerInfo.peerId).get()
|
|
||||||
|
|
||||||
swDst.mount(proto)
|
swDst.mount(proto)
|
||||||
|
|
||||||
await swRel.start()
|
await swRel.start()
|
||||||
await swSrc.start()
|
await swSrc.start()
|
||||||
await swDst.start()
|
await swDst.start()
|
||||||
|
|
||||||
# Connect both Src and Dst to the relay, but not to each other.
|
let
|
||||||
await swSrc.connect(swRel.peerInfo.peerId, swRel.peerInfo.addrs)
|
# Create a relay address to swDst using swRel as the relay
|
||||||
|
addrs = MultiAddress.init($swRel.peerInfo.addrs[0] & "/p2p/" &
|
||||||
|
$swRel.peerInfo.peerId & "/p2p-circuit/p2p/" &
|
||||||
|
$swDst.peerInfo.peerId).get()
|
||||||
|
|
||||||
|
# Connect Dst to the relay
|
||||||
await swDst.connect(swRel.peerInfo.peerId, swRel.peerInfo.addrs)
|
await swDst.connect(swRel.peerInfo.peerId, swRel.peerInfo.addrs)
|
||||||
|
|
||||||
# Dst reserve a slot on the relay.
|
# Dst reserve a slot on the relay.
|
||||||
|
@ -5,7 +5,7 @@ import
|
|||||||
strformat, strutils,
|
strformat, strutils,
|
||||||
stew/byteutils,
|
stew/byteutils,
|
||||||
chronos,
|
chronos,
|
||||||
../libp2p
|
libp2p
|
||||||
|
|
||||||
const DefaultAddr = "/ip4/127.0.0.1/tcp/0"
|
const DefaultAddr = "/ip4/127.0.0.1/tcp/0"
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
import chronos # an efficient library for async
|
import chronos # an efficient library for async
|
||||||
import stew/byteutils # various utils
|
import stew/byteutils # various utils
|
||||||
import ../libp2p # when installed through nimble, just use `import libp2p`
|
import libp2p
|
||||||
|
|
||||||
##
|
##
|
||||||
# Create our custom protocol
|
# Create our custom protocol
|
||||||
|
6
examples/index.md
Normal file
6
examples/index.md
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
# nim-libp2p documentation
|
||||||
|
|
||||||
|
Welcome to the nim-libp2p documentation!
|
||||||
|
|
||||||
|
Here, you'll find [tutorials](tutorial_1_connect.md) to help you get started, as well as
|
||||||
|
the [full reference](https://status-im.github.io/nim-libp2p/master/libp2p.html).
|
@ -1,108 +0,0 @@
|
|||||||
# Simple ping tutorial
|
|
||||||
|
|
||||||
Hi all, welcome to the first nim-libp2p tutorial!
|
|
||||||
|
|
||||||
!!! tips ""
|
|
||||||
This tutorial is for everyone who is interested in building peer-to-peer applications. No Nim programming experience is needed.
|
|
||||||
|
|
||||||
To give you a quick overview, **Nim** is the programming language we are using and **nim-libp2p** is the Nim implementation of [libp2p](https://libp2p.io/), a modular library that enables the development of peer-to-peer network applications.
|
|
||||||
|
|
||||||
Hope you'll find it helpful in your journey of learning. Happy coding! ;)
|
|
||||||
|
|
||||||
## Before you start
|
|
||||||
The only prerequisite here is [Nim](https://nim-lang.org/), the programming language with a Python-like syntax and a performance similar to C. Detailed information can be found [here](https://nim-lang.org/docs/tut1.html).
|
|
||||||
|
|
||||||
Install Nim via their [official website](https://nim-lang.org/install.html).
|
|
||||||
Check Nim's installation via `nim --version` and its package manager Nimble via `nimble --version`.
|
|
||||||
|
|
||||||
You can now install the latest version of `nim-libp2p`:
|
|
||||||
```bash
|
|
||||||
nimble install libp2p@#master
|
|
||||||
```
|
|
||||||
|
|
||||||
## A simple ping application
|
|
||||||
We'll start by creating a simple application, which is starting two libp2p [switch](https://docs.libp2p.io/concepts/stream-multiplexing/#switch-swarm), and pinging each other using the [Ping](https://docs.libp2p.io/concepts/protocols/#ping) protocol.
|
|
||||||
|
|
||||||
!!! tips ""
|
|
||||||
You can extract the code from this tutorial by running `nim c -r tools/markdown_runner.nim examples/tutorial_1_connect.md` in the libp2p folder!
|
|
||||||
|
|
||||||
Let's create a `part1.nim`, and import our dependencies:
|
|
||||||
```nim
|
|
||||||
import chronos
|
|
||||||
|
|
||||||
import libp2p
|
|
||||||
import libp2p/protocols/ping
|
|
||||||
```
|
|
||||||
[chronos](https://github.com/status-im/nim-chronos) the asynchronous framework used by `nim-libp2p`
|
|
||||||
|
|
||||||
Next, we'll create an helper procedure to create our switches. A switch needs a bit of configuration, and it will be easier to do this configuration only once:
|
|
||||||
```nim
|
|
||||||
proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
|
|
||||||
var switch = SwitchBuilder
|
|
||||||
.new()
|
|
||||||
.withRng(rng) # Give the application RNG
|
|
||||||
.withAddress(ma) # Our local address(es)
|
|
||||||
.withTcpTransport() # Use TCP as transport
|
|
||||||
.withMplex() # Use Mplex as muxer
|
|
||||||
.withNoise() # Use Noise as secure manager
|
|
||||||
.build()
|
|
||||||
|
|
||||||
return switch
|
|
||||||
```
|
|
||||||
This will create a switch using [Mplex](https://docs.libp2p.io/concepts/stream-multiplexing/) as a multiplexer, Noise to secure the communication, and TCP as an underlying transport.
|
|
||||||
|
|
||||||
You can of course tweak this, to use a different or multiple transport, or tweak the configuration of Mplex and Noise, but this is some sane defaults that we'll use going forward.
|
|
||||||
|
|
||||||
|
|
||||||
Let's now start to create our main procedure:
|
|
||||||
```nim
|
|
||||||
proc main() {.async, gcsafe.} =
|
|
||||||
let
|
|
||||||
rng = newRng()
|
|
||||||
localAddress = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
|
||||||
pingProtocol = Ping.new(rng=rng)
|
|
||||||
```
|
|
||||||
We created some variables that we'll need for the rest of the application: the global `rng` instance, our `localAddress`, and an instance of the `Ping` protocol.
|
|
||||||
The address is in the [MultiAddress](https://github.com/multiformats/multiaddr) format. The port `0` means "take any port available".
|
|
||||||
|
|
||||||
`tryGet` is procedure which is part of [nim-result](https://github.com/arnetheduck/nim-result/), that will throw an exception if the supplied MultiAddress is invalid.
|
|
||||||
|
|
||||||
We can now create our two switches:
|
|
||||||
```nim
|
|
||||||
let
|
|
||||||
switch1 = createSwitch(localAddress, rng)
|
|
||||||
switch2 = createSwitch(localAddress, rng)
|
|
||||||
|
|
||||||
switch1.mount(pingProtocol)
|
|
||||||
|
|
||||||
await switch1.start()
|
|
||||||
await switch2.start()
|
|
||||||
```
|
|
||||||
We've **mounted** the `pingProtocol` on our first switch. This means that the first switch will actually listen for any ping requests coming in, and handle them accordingly.
|
|
||||||
|
|
||||||
Now that we've started the nodes, they are listening for incoming peers.
|
|
||||||
We can find out which port was attributed, and the resulting local addresses, by using `switch1.peerInfo.addrs`.
|
|
||||||
|
|
||||||
We'll **dial** the first switch from the second one, by specifying it's **Peer ID**, it's **MultiAddress** and the **`Ping` protocol codec**:
|
|
||||||
```nim
|
|
||||||
let conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, PingCodec)
|
|
||||||
```
|
|
||||||
We now have a `Ping` connection setup between the second and the first switch, we can use it to actually ping the node:
|
|
||||||
```nim
|
|
||||||
# ping the other node and echo the ping duration
|
|
||||||
echo "ping: ", await pingProtocol.ping(conn)
|
|
||||||
|
|
||||||
# We must close the connection ourselves when we're done with it
|
|
||||||
await conn.close()
|
|
||||||
```
|
|
||||||
|
|
||||||
And that's it! Just a little bit of cleanup: shutting down the switches, waiting for them to stop, and we'll call our `main` procedure:
|
|
||||||
```nim
|
|
||||||
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
|
|
||||||
|
|
||||||
waitFor(main())
|
|
||||||
```
|
|
||||||
|
|
||||||
You can now run this program using `nim c -r part1.nim`, and you should see the dialing sequence, ending with a ping output.
|
|
||||||
|
|
||||||
In the [next tutorial](tutorial_2_customproto.md), we'll look at how to create our own custom protocol.
|
|
95
examples/tutorial_1_connect.nim
Normal file
95
examples/tutorial_1_connect.nim
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
## # Simple ping tutorial
|
||||||
|
##
|
||||||
|
## Hi all, welcome to the first nim-libp2p tutorial!
|
||||||
|
##
|
||||||
|
## !!! tips ""
|
||||||
|
## This tutorial is for everyone who is interested in building peer-to-peer applications. No Nim programming experience is needed.
|
||||||
|
##
|
||||||
|
## To give you a quick overview, **Nim** is the programming language we are using and **nim-libp2p** is the Nim implementation of [libp2p](https://libp2p.io/), a modular library that enables the development of peer-to-peer network applications.
|
||||||
|
##
|
||||||
|
## Hope you'll find it helpful in your journey of learning. Happy coding! ;)
|
||||||
|
##
|
||||||
|
## ## Before you start
|
||||||
|
## The only prerequisite here is [Nim](https://nim-lang.org/), the programming language with a Python-like syntax and a performance similar to C. Detailed information can be found [here](https://nim-lang.org/docs/tut1.html).
|
||||||
|
##
|
||||||
|
## Install Nim via their [official website](https://nim-lang.org/install.html).
|
||||||
|
## Check Nim's installation via `nim --version` and its package manager Nimble via `nimble --version`.
|
||||||
|
##
|
||||||
|
## You can now install the latest version of `nim-libp2p`:
|
||||||
|
## ```bash
|
||||||
|
## nimble install libp2p@#master
|
||||||
|
## ```
|
||||||
|
##
|
||||||
|
## ## A simple ping application
|
||||||
|
## We'll start by creating a simple application, which is starting two libp2p [switch](https://docs.libp2p.io/concepts/stream-multiplexing/#switch-swarm), and pinging each other using the [Ping](https://docs.libp2p.io/concepts/protocols/#ping) protocol.
|
||||||
|
##
|
||||||
|
## !!! tips ""
|
||||||
|
## You can find the source of this tutorial (and other tutorials) in the [libp2p/examples](https://github.com/status-im/nim-libp2p/tree/master/examples) folder!
|
||||||
|
##
|
||||||
|
## Let's create a `part1.nim`, and import our dependencies:
|
||||||
|
import chronos
|
||||||
|
|
||||||
|
import libp2p
|
||||||
|
import libp2p/protocols/ping
|
||||||
|
|
||||||
|
## [chronos](https://github.com/status-im/nim-chronos) the asynchronous framework used by `nim-libp2p`
|
||||||
|
##
|
||||||
|
## Next, we'll create an helper procedure to create our switches. A switch needs a bit of configuration, and it will be easier to do this configuration only once:
|
||||||
|
proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
|
||||||
|
var switch = SwitchBuilder
|
||||||
|
.new()
|
||||||
|
.withRng(rng) # Give the application RNG
|
||||||
|
.withAddress(ma) # Our local address(es)
|
||||||
|
.withTcpTransport() # Use TCP as transport
|
||||||
|
.withMplex() # Use Mplex as muxer
|
||||||
|
.withNoise() # Use Noise as secure manager
|
||||||
|
.build()
|
||||||
|
|
||||||
|
return switch
|
||||||
|
|
||||||
|
## This will create a switch using [Mplex](https://docs.libp2p.io/concepts/stream-multiplexing/) as a multiplexer, Noise to secure the communication, and TCP as an underlying transport.
|
||||||
|
##
|
||||||
|
## You can of course tweak this, to use a different or multiple transport, or tweak the configuration of Mplex and Noise, but this is some sane defaults that we'll use going forward.
|
||||||
|
##
|
||||||
|
##
|
||||||
|
## Let's now start to create our main procedure:
|
||||||
|
proc main() {.async, gcsafe.} =
|
||||||
|
let
|
||||||
|
rng = newRng()
|
||||||
|
localAddress = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||||
|
pingProtocol = Ping.new(rng=rng)
|
||||||
|
## We created some variables that we'll need for the rest of the application: the global `rng` instance, our `localAddress`, and an instance of the `Ping` protocol.
|
||||||
|
## The address is in the [MultiAddress](https://github.com/multiformats/multiaddr) format. The port `0` means "take any port available".
|
||||||
|
##
|
||||||
|
## `tryGet` is procedure which is part of [nim-result](https://github.com/arnetheduck/nim-result/), that will throw an exception if the supplied MultiAddress is invalid.
|
||||||
|
##
|
||||||
|
## We can now create our two switches:
|
||||||
|
let
|
||||||
|
switch1 = createSwitch(localAddress, rng)
|
||||||
|
switch2 = createSwitch(localAddress, rng)
|
||||||
|
|
||||||
|
switch1.mount(pingProtocol)
|
||||||
|
|
||||||
|
await switch1.start()
|
||||||
|
await switch2.start()
|
||||||
|
## We've **mounted** the `pingProtocol` on our first switch. This means that the first switch will actually listen for any ping requests coming in, and handle them accordingly.
|
||||||
|
##
|
||||||
|
## Now that we've started the nodes, they are listening for incoming peers.
|
||||||
|
## We can find out which port was attributed, and the resulting local addresses, by using `switch1.peerInfo.addrs`.
|
||||||
|
##
|
||||||
|
## We'll **dial** the first switch from the second one, by specifying it's **Peer ID**, it's **MultiAddress** and the **`Ping` protocol codec**:
|
||||||
|
let conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, PingCodec)
|
||||||
|
## We now have a `Ping` connection setup between the second and the first switch, we can use it to actually ping the node:
|
||||||
|
# ping the other node and echo the ping duration
|
||||||
|
echo "ping: ", await pingProtocol.ping(conn)
|
||||||
|
|
||||||
|
# We must close the connection ourselves when we're done with it
|
||||||
|
await conn.close()
|
||||||
|
## And that's it! Just a little bit of cleanup: shutting down the switches, waiting for them to stop, and we'll call our `main` procedure:
|
||||||
|
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
|
||||||
|
|
||||||
|
waitFor(main())
|
||||||
|
|
||||||
|
## You can now run this program using `nim c -r part1.nim`, and you should see the dialing sequence, ending with a ping output.
|
||||||
|
##
|
||||||
|
## In the [next tutorial](tutorial_2_customproto.md), we'll look at how to create our own custom protocol.
|
@ -1,82 +0,0 @@
|
|||||||
# Custom protocol in libp2p
|
|
||||||
|
|
||||||
In the [previous tutorial](tutorial_1_connect.md), we've looked at how to create a simple ping program using the `nim-libp2p`.
|
|
||||||
|
|
||||||
We'll now look at how to create a custom protocol inside the libp2p
|
|
||||||
|
|
||||||
Let's create a `part2.nim`, and import our dependencies:
|
|
||||||
```nim
|
|
||||||
import chronos
|
|
||||||
import stew/byteutils
|
|
||||||
|
|
||||||
import libp2p
|
|
||||||
```
|
|
||||||
This is similar to the first tutorial, except we don't need to import the `Ping` protocol.
|
|
||||||
|
|
||||||
Next, we'll declare our custom protocol
|
|
||||||
```nim
|
|
||||||
const TestCodec = "/test/proto/1.0.0"
|
|
||||||
|
|
||||||
type TestProto = ref object of LPProtocol
|
|
||||||
```
|
|
||||||
|
|
||||||
We've set a [protocol ID](https://docs.libp2p.io/concepts/protocols/#protocol-ids), and created a custom `LPProtocol`. In a more complex protocol, we could use this structure to store interesting variables.
|
|
||||||
|
|
||||||
A protocol generally has two part: and handling/server part, and a dialing/client part.
|
|
||||||
Theses two parts can be identical, but in our trivial protocol, the server will wait for a message from the client, and the client will send a message, so we have to handle the two cases separately.
|
|
||||||
|
|
||||||
Let's start with the server part:
|
|
||||||
```nim
|
|
||||||
proc new(T: typedesc[TestProto]): T =
|
|
||||||
# every incoming connections will in be handled in this closure
|
|
||||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
|
||||||
# Read up to 1024 bytes from this connection, and transform them into
|
|
||||||
# a string
|
|
||||||
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
|
|
||||||
# We must close the connections ourselves when we're done with it
|
|
||||||
await conn.close()
|
|
||||||
|
|
||||||
return T(codecs: @[TestCodec], handler: handle)
|
|
||||||
```
|
|
||||||
This is a constructor for our `TestProto`, that will specify our `codecs` and a `handler`, which will be called for each incoming peer asking for this protocol.
|
|
||||||
In our handle, we simply read a message from the connection and `echo` it.
|
|
||||||
|
|
||||||
We can now create our client part:
|
|
||||||
```nim
|
|
||||||
proc hello(p: TestProto, conn: Connection) {.async.} =
|
|
||||||
await conn.writeLp("Hello p2p!")
|
|
||||||
```
|
|
||||||
Again, pretty straight-forward, we just send a message on the connection.
|
|
||||||
|
|
||||||
We can now create our main procedure:
|
|
||||||
```nim
|
|
||||||
proc main() {.async, gcsafe.} =
|
|
||||||
let
|
|
||||||
rng = newRng()
|
|
||||||
testProto = TestProto.new()
|
|
||||||
switch1 = newStandardSwitch(rng=rng)
|
|
||||||
switch2 = newStandardSwitch(rng=rng)
|
|
||||||
|
|
||||||
switch1.mount(testProto)
|
|
||||||
|
|
||||||
await switch1.start()
|
|
||||||
await switch2.start()
|
|
||||||
|
|
||||||
let conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, TestCodec)
|
|
||||||
|
|
||||||
await testProto.hello(conn)
|
|
||||||
|
|
||||||
# We must close the connection ourselves when we're done with it
|
|
||||||
await conn.close()
|
|
||||||
|
|
||||||
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
|
|
||||||
```
|
|
||||||
|
|
||||||
This is very similar to the first tutorial's `main`, the only noteworthy difference is that we use `newStandardSwitch`, which is similar to the `createSwitch` of the first tutorial, but is bundled directly in libp2p
|
|
||||||
|
|
||||||
We can now wrap our program by calling our main proc:
|
|
||||||
```nim
|
|
||||||
waitFor(main())
|
|
||||||
```
|
|
||||||
|
|
||||||
And that's it!
|
|
74
examples/tutorial_2_customproto.nim
Normal file
74
examples/tutorial_2_customproto.nim
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
## # Custom protocol in libp2p
|
||||||
|
##
|
||||||
|
## In the [previous tutorial](tutorial_1_connect.md), we've looked at how to create a simple ping program using the `nim-libp2p`.
|
||||||
|
##
|
||||||
|
## We'll now look at how to create a custom protocol inside the libp2p
|
||||||
|
##
|
||||||
|
## Let's create a `part2.nim`, and import our dependencies:
|
||||||
|
import chronos
|
||||||
|
import stew/byteutils
|
||||||
|
|
||||||
|
import libp2p
|
||||||
|
## This is similar to the first tutorial, except we don't need to import the `Ping` protocol.
|
||||||
|
##
|
||||||
|
## Next, we'll declare our custom protocol
|
||||||
|
const TestCodec = "/test/proto/1.0.0"
|
||||||
|
|
||||||
|
type TestProto = ref object of LPProtocol
|
||||||
|
|
||||||
|
## We've set a [protocol ID](https://docs.libp2p.io/concepts/protocols/#protocol-ids), and created a custom `LPProtocol`. In a more complex protocol, we could use this structure to store interesting variables.
|
||||||
|
##
|
||||||
|
## A protocol generally has two part: and handling/server part, and a dialing/client part.
|
||||||
|
## Theses two parts can be identical, but in our trivial protocol, the server will wait for a message from the client, and the client will send a message, so we have to handle the two cases separately.
|
||||||
|
##
|
||||||
|
## Let's start with the server part:
|
||||||
|
|
||||||
|
proc new(T: typedesc[TestProto]): T =
|
||||||
|
# every incoming connections will in be handled in this closure
|
||||||
|
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||||
|
# Read up to 1024 bytes from this connection, and transform them into
|
||||||
|
# a string
|
||||||
|
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
|
||||||
|
# We must close the connections ourselves when we're done with it
|
||||||
|
await conn.close()
|
||||||
|
|
||||||
|
return T.new(codecs = @[TestCodec], handler = handle)
|
||||||
|
|
||||||
|
## This is a constructor for our `TestProto`, that will specify our `codecs` and a `handler`, which will be called for each incoming peer asking for this protocol.
|
||||||
|
## In our handle, we simply read a message from the connection and `echo` it.
|
||||||
|
##
|
||||||
|
## We can now create our client part:
|
||||||
|
proc hello(p: TestProto, conn: Connection) {.async.} =
|
||||||
|
await conn.writeLp("Hello p2p!")
|
||||||
|
|
||||||
|
## Again, pretty straight-forward, we just send a message on the connection.
|
||||||
|
##
|
||||||
|
## We can now create our main procedure:
|
||||||
|
proc main() {.async, gcsafe.} =
|
||||||
|
let
|
||||||
|
rng = newRng()
|
||||||
|
testProto = TestProto.new()
|
||||||
|
switch1 = newStandardSwitch(rng=rng)
|
||||||
|
switch2 = newStandardSwitch(rng=rng)
|
||||||
|
|
||||||
|
switch1.mount(testProto)
|
||||||
|
|
||||||
|
await switch1.start()
|
||||||
|
await switch2.start()
|
||||||
|
|
||||||
|
let conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, TestCodec)
|
||||||
|
|
||||||
|
await testProto.hello(conn)
|
||||||
|
|
||||||
|
# We must close the connection ourselves when we're done with it
|
||||||
|
await conn.close()
|
||||||
|
|
||||||
|
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
|
||||||
|
|
||||||
|
## This is very similar to the first tutorial's `main`, the only noteworthy difference is that we use `newStandardSwitch`, which is similar to the `createSwitch` of the first tutorial, but is bundled directly in libp2p
|
||||||
|
##
|
||||||
|
## We can now wrap our program by calling our main proc:
|
||||||
|
waitFor(main())
|
||||||
|
|
||||||
|
## And that's it!
|
||||||
|
## In the [next tutorial](tutorial_3_protobuf.md), we'll create a more complex protocol using Protobuf.
|
162
examples/tutorial_3_protobuf.nim
Normal file
162
examples/tutorial_3_protobuf.nim
Normal file
@ -0,0 +1,162 @@
|
|||||||
|
## # Protobuf usage
|
||||||
|
##
|
||||||
|
## In the [previous tutorial](tutorial_2_customproto.md), we created a simple "ping" protocol.
|
||||||
|
## Most real protocol want their messages to be structured and extensible, which is why
|
||||||
|
## most real protocols use [protobuf](https://developers.google.com/protocol-buffers) to
|
||||||
|
## define their message structures.
|
||||||
|
##
|
||||||
|
## Here, we'll create a slightly more complex protocol, which parses & generate protobuf
|
||||||
|
## messages. Let's start by importing our dependencies, as usual:
|
||||||
|
import chronos
|
||||||
|
import stew/results # for Opt[T]
|
||||||
|
|
||||||
|
import libp2p
|
||||||
|
|
||||||
|
## ## Protobuf encoding & decoding
|
||||||
|
## This will be the structure of our messages:
|
||||||
|
## ```protobuf
|
||||||
|
## message MetricList {
|
||||||
|
## message Metric {
|
||||||
|
## string name = 1;
|
||||||
|
## float value = 2;
|
||||||
|
## }
|
||||||
|
##
|
||||||
|
## repeated Metric metrics = 2;
|
||||||
|
## }
|
||||||
|
## ```
|
||||||
|
## We'll create our protobuf types, encoders & decoders, according to this format.
|
||||||
|
## To create the encoders & decoders, we are going to use minprotobuf
|
||||||
|
## (included in libp2p).
|
||||||
|
##
|
||||||
|
## While more modern technics
|
||||||
|
## (such as [nim-protobuf-serialization](https://github.com/status-im/nim-protobuf-serialization))
|
||||||
|
## exists, minprotobuf is currently the recommended method to handle protobuf, since it has
|
||||||
|
## been used in production extensively, and audited.
|
||||||
|
type
|
||||||
|
Metric = object
|
||||||
|
name: string
|
||||||
|
value: float
|
||||||
|
|
||||||
|
MetricList = object
|
||||||
|
metrics: seq[Metric]
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
proc encode(m: Metric): ProtoBuffer =
|
||||||
|
result = initProtoBuffer()
|
||||||
|
result.write(1, m.name)
|
||||||
|
result.write(2, m.value)
|
||||||
|
result.finish()
|
||||||
|
|
||||||
|
proc decode(_: type Metric, buf: seq[byte]): Result[Metric, ProtoError] =
|
||||||
|
var res: Metric
|
||||||
|
let pb = initProtoBuffer(buf)
|
||||||
|
# "getField" will return a Result[bool, ProtoError].
|
||||||
|
# The Result will hold an error if the protobuf is invalid.
|
||||||
|
# The Result will hold "false" if the field is missing
|
||||||
|
#
|
||||||
|
# We are just checking the error, and ignoring whether the value
|
||||||
|
# is present or not (default values are valid).
|
||||||
|
discard ? pb.getField(1, res.name)
|
||||||
|
discard ? pb.getField(2, res.value)
|
||||||
|
ok(res)
|
||||||
|
|
||||||
|
proc encode(m: MetricList): ProtoBuffer =
|
||||||
|
result = initProtoBuffer()
|
||||||
|
for metric in m.metrics:
|
||||||
|
result.write(1, metric.encode())
|
||||||
|
result.finish()
|
||||||
|
|
||||||
|
proc decode(_: type MetricList, buf: seq[byte]): Result[MetricList, ProtoError] =
|
||||||
|
var
|
||||||
|
res: MetricList
|
||||||
|
metrics: seq[seq[byte]]
|
||||||
|
let pb = initProtoBuffer(buf)
|
||||||
|
discard ? pb.getRepeatedField(1, metrics)
|
||||||
|
|
||||||
|
for metric in metrics:
|
||||||
|
res.metrics &= ? Metric.decode(metric)
|
||||||
|
ok(res)
|
||||||
|
|
||||||
|
## ## Results instead of exceptions
|
||||||
|
## As you can see, this part of the program also uses Results instead of exceptions for error handling.
|
||||||
|
## We start by `{.push raises: [].}`, which will prevent every non-async function from raising
|
||||||
|
## exceptions.
|
||||||
|
##
|
||||||
|
## Then, we use [nim-result](https://github.com/arnetheduck/nim-result) to convey
|
||||||
|
## errors to function callers. A `Result[T, E]` will either hold a valid result of type
|
||||||
|
## T, or an error of type E.
|
||||||
|
##
|
||||||
|
## You can check if the call succeeded by using `res.isOk`, and then get the
|
||||||
|
## value using `res.value` or the error by using `res.error`.
|
||||||
|
##
|
||||||
|
## Another useful tool is `?`, which will unpack a Result if it succeeded,
|
||||||
|
## or if it failed, exit the current procedure returning the error.
|
||||||
|
##
|
||||||
|
## nim-result is packed with other functionalities that you'll find in the
|
||||||
|
## nim-result repository.
|
||||||
|
##
|
||||||
|
## Results and exception are generally interchangeable, but have different semantics
|
||||||
|
## that you may or may not prefer.
|
||||||
|
##
|
||||||
|
## ## Creating the protocol
|
||||||
|
## We'll next create a protocol, like in the last tutorial, to request these metrics from our host
|
||||||
|
type
|
||||||
|
MetricCallback = proc: Future[MetricList] {.raises: [], gcsafe.}
|
||||||
|
MetricProto = ref object of LPProtocol
|
||||||
|
metricGetter: MetricCallback
|
||||||
|
|
||||||
|
proc new(_: typedesc[MetricProto], cb: MetricCallback): MetricProto =
|
||||||
|
var res: MetricProto
|
||||||
|
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||||
|
let
|
||||||
|
metrics = await res.metricGetter()
|
||||||
|
asProtobuf = metrics.encode()
|
||||||
|
await conn.writeLp(asProtobuf.buffer)
|
||||||
|
await conn.close()
|
||||||
|
|
||||||
|
res = MetricProto.new(@["/metric-getter/1.0.0"], handle)
|
||||||
|
res.metricGetter = cb
|
||||||
|
return res
|
||||||
|
|
||||||
|
proc fetch(p: MetricProto, conn: Connection): Future[MetricList] {.async.} =
|
||||||
|
let protobuf = await conn.readLp(2048)
|
||||||
|
# tryGet will raise an exception if the Result contains an error.
|
||||||
|
# It's useful to bridge between exception-world and result-world
|
||||||
|
return MetricList.decode(protobuf).tryGet()
|
||||||
|
|
||||||
|
## We can now create our main procedure:
|
||||||
|
proc main() {.async, gcsafe.} =
|
||||||
|
let rng = newRng()
|
||||||
|
proc randomMetricGenerator: Future[MetricList] {.async.} =
|
||||||
|
let metricCount = rng[].generate(uint32) mod 16
|
||||||
|
for i in 0 ..< metricCount + 1:
|
||||||
|
result.metrics.add(Metric(
|
||||||
|
name: "metric_" & $i,
|
||||||
|
value: float(rng[].generate(uint16)) / 1000.0
|
||||||
|
))
|
||||||
|
return result
|
||||||
|
let
|
||||||
|
metricProto1 = MetricProto.new(randomMetricGenerator)
|
||||||
|
metricProto2 = MetricProto.new(randomMetricGenerator)
|
||||||
|
switch1 = newStandardSwitch(rng=rng)
|
||||||
|
switch2 = newStandardSwitch(rng=rng)
|
||||||
|
|
||||||
|
switch1.mount(metricProto1)
|
||||||
|
|
||||||
|
await switch1.start()
|
||||||
|
await switch2.start()
|
||||||
|
|
||||||
|
let
|
||||||
|
conn = await switch2.dial(switch1.peerInfo.peerId, switch1.peerInfo.addrs, metricProto2.codecs)
|
||||||
|
metrics = await metricProto2.fetch(conn)
|
||||||
|
await conn.close()
|
||||||
|
|
||||||
|
for metric in metrics.metrics:
|
||||||
|
echo metric.name, " = ", metric.value
|
||||||
|
|
||||||
|
await allFutures(switch1.stop(), switch2.stop()) # close connections and shutdown all transports
|
||||||
|
|
||||||
|
waitFor(main())
|
||||||
|
|
||||||
|
## If you run this program, you should see random metrics being sent from the switch1 to the switch2.
|
163
examples/tutorial_4_gossipsub.nim
Normal file
163
examples/tutorial_4_gossipsub.nim
Normal file
@ -0,0 +1,163 @@
|
|||||||
|
## # GossipSub
|
||||||
|
##
|
||||||
|
## In this tutorial, we'll build a simple GossipSub network
|
||||||
|
## to broadcast the metrics we built in the previous tutorial.
|
||||||
|
##
|
||||||
|
## GossipSub is used to broadcast some messages in a network,
|
||||||
|
## and allows to balance between latency, bandwidth usage,
|
||||||
|
## privacy and attack resistance.
|
||||||
|
##
|
||||||
|
## You'll find a good explanation on how GossipSub works
|
||||||
|
## [here.](https://docs.libp2p.io/concepts/publish-subscribe/) There are a lot
|
||||||
|
## of parameters you can tweak to adjust how GossipSub behaves but here we'll
|
||||||
|
## use the sane defaults shipped with libp2p.
|
||||||
|
##
|
||||||
|
## We'll start by creating our metric structure like previously
|
||||||
|
|
||||||
|
import chronos
|
||||||
|
import stew/results
|
||||||
|
|
||||||
|
import libp2p
|
||||||
|
import libp2p/protocols/pubsub/rpc/messages
|
||||||
|
|
||||||
|
type
|
||||||
|
Metric = object
|
||||||
|
name: string
|
||||||
|
value: float
|
||||||
|
|
||||||
|
MetricList = object
|
||||||
|
hostname: string
|
||||||
|
metrics: seq[Metric]
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
proc encode(m: Metric): ProtoBuffer =
|
||||||
|
result = initProtoBuffer()
|
||||||
|
result.write(1, m.name)
|
||||||
|
result.write(2, m.value)
|
||||||
|
result.finish()
|
||||||
|
|
||||||
|
proc decode(_: type Metric, buf: seq[byte]): Result[Metric, ProtoError] =
|
||||||
|
var res: Metric
|
||||||
|
let pb = initProtoBuffer(buf)
|
||||||
|
discard ? pb.getField(1, res.name)
|
||||||
|
discard ? pb.getField(2, res.value)
|
||||||
|
ok(res)
|
||||||
|
|
||||||
|
proc encode(m: MetricList): ProtoBuffer =
|
||||||
|
result = initProtoBuffer()
|
||||||
|
for metric in m.metrics:
|
||||||
|
result.write(1, metric.encode())
|
||||||
|
result.write(2, m.hostname)
|
||||||
|
result.finish()
|
||||||
|
|
||||||
|
proc decode(_: type MetricList, buf: seq[byte]): Result[MetricList, ProtoError] =
|
||||||
|
var
|
||||||
|
res: MetricList
|
||||||
|
metrics: seq[seq[byte]]
|
||||||
|
let pb = initProtoBuffer(buf)
|
||||||
|
discard ? pb.getRepeatedField(1, metrics)
|
||||||
|
|
||||||
|
for metric in metrics:
|
||||||
|
res.metrics &= ? Metric.decode(metric)
|
||||||
|
? pb.getRequiredField(2, res.hostname)
|
||||||
|
ok(res)
|
||||||
|
|
||||||
|
## This is exactly like the previous structure, except that we added
|
||||||
|
## a `hostname` to distinguish where the metric is coming from.
|
||||||
|
##
|
||||||
|
## Now we'll create a small GossipSub network to broadcast the metrics,
|
||||||
|
## and collect them on one of the node.
|
||||||
|
|
||||||
|
type Node = tuple[switch: Switch, gossip: GossipSub, hostname: string]
|
||||||
|
|
||||||
|
proc oneNode(node: Node, rng: ref HmacDrbgContext) {.async.} =
|
||||||
|
# This procedure will handle one of the node of the network
|
||||||
|
node.gossip.addValidator(["metrics"],
|
||||||
|
proc(topic: string, message: Message): Future[ValidationResult] {.async.} =
|
||||||
|
let decoded = MetricList.decode(message.data)
|
||||||
|
if decoded.isErr: return ValidationResult.Reject
|
||||||
|
return ValidationResult.Accept
|
||||||
|
)
|
||||||
|
# This "validator" will attach to the `metrics` topic and make sure
|
||||||
|
# that every message in this topic is valid. This allows us to stop
|
||||||
|
# propagation of invalid messages quickly in the network, and punish
|
||||||
|
# peers sending them.
|
||||||
|
|
||||||
|
# `John` will be responsible to log the metrics, the rest of the nodes
|
||||||
|
# will just forward them in the network
|
||||||
|
if node.hostname == "John":
|
||||||
|
node.gossip.subscribe("metrics",
|
||||||
|
proc (topic: string, data: seq[byte]) {.async.} =
|
||||||
|
echo MetricList.decode(data).tryGet()
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
node.gossip.subscribe("metrics", nil)
|
||||||
|
|
||||||
|
# Create random metrics 10 times and broadcast them
|
||||||
|
for _ in 0..<10:
|
||||||
|
await sleepAsync(500.milliseconds)
|
||||||
|
var metricList = MetricList(hostname: node.hostname)
|
||||||
|
let metricCount = rng[].generate(uint32) mod 4
|
||||||
|
for i in 0 ..< metricCount + 1:
|
||||||
|
metricList.metrics.add(Metric(
|
||||||
|
name: "metric_" & $i,
|
||||||
|
value: float(rng[].generate(uint16)) / 1000.0
|
||||||
|
))
|
||||||
|
|
||||||
|
discard await node.gossip.publish("metrics", encode(metricList).buffer)
|
||||||
|
await node.switch.stop()
|
||||||
|
|
||||||
|
## For our main procedure, we'll create a few nodes, and connect them together.
|
||||||
|
## Note that they are not all interconnected, but GossipSub will take care of
|
||||||
|
## broadcasting to the full network nonetheless.
|
||||||
|
proc main {.async.} =
|
||||||
|
let rng = newRng()
|
||||||
|
var nodes: seq[Node]
|
||||||
|
|
||||||
|
for hostname in ["John", "Walter", "David", "Thuy", "Amy"]:
|
||||||
|
let
|
||||||
|
switch = newStandardSwitch(rng=rng)
|
||||||
|
gossip = GossipSub.init(switch = switch, triggerSelf = true)
|
||||||
|
switch.mount(gossip)
|
||||||
|
await switch.start()
|
||||||
|
|
||||||
|
nodes.add((switch, gossip, hostname))
|
||||||
|
|
||||||
|
for index, node in nodes:
|
||||||
|
# Connect to a few neighbors
|
||||||
|
for otherNodeIdx in index - 1 .. index + 2:
|
||||||
|
if otherNodeIdx notin 0 ..< nodes.len or otherNodeIdx == index: continue
|
||||||
|
let otherNode = nodes[otherNodeIdx]
|
||||||
|
await node.switch.connect(
|
||||||
|
otherNode.switch.peerInfo.peerId,
|
||||||
|
otherNode.switch.peerInfo.addrs)
|
||||||
|
|
||||||
|
var allFuts: seq[Future[void]]
|
||||||
|
for node in nodes:
|
||||||
|
allFuts.add(oneNode(node, rng))
|
||||||
|
|
||||||
|
await allFutures(allFuts)
|
||||||
|
|
||||||
|
waitFor(main())
|
||||||
|
|
||||||
|
## If you run this program, you should see something like:
|
||||||
|
## ```
|
||||||
|
## (hostname: "John", metrics: @[(name: "metric_0", value: 42.097), (name: "metric_1", value: 50.99), (name: "metric_2", value: 47.86), (name: "metric_3", value: 5.368)])
|
||||||
|
## (hostname: "Walter", metrics: @[(name: "metric_0", value: 39.452), (name: "metric_1", value: 15.606), (name: "metric_2", value: 14.059), (name: "metric_3", value: 6.68)])
|
||||||
|
## (hostname: "David", metrics: @[(name: "metric_0", value: 9.82), (name: "metric_1", value: 2.862), (name: "metric_2", value: 15.514)])
|
||||||
|
## (hostname: "Thuy", metrics: @[(name: "metric_0", value: 59.038)])
|
||||||
|
## (hostname: "Amy", metrics: @[(name: "metric_0", value: 55.616), (name: "metric_1", value: 23.52), (name: "metric_2", value: 59.081), (name: "metric_3", value: 2.516)])
|
||||||
|
## ```
|
||||||
|
##
|
||||||
|
## This is John receiving & logging everyone's metrics.
|
||||||
|
##
|
||||||
|
## ## Going further
|
||||||
|
## Building efficient & safe GossipSub networks is a tricky subject. By tweaking the [gossip params](https://status-im.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#GossipSubParams)
|
||||||
|
## and [topic params](https://status-im.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#TopicParams),
|
||||||
|
## you can achieve very different properties.
|
||||||
|
##
|
||||||
|
## Also see reports for [GossipSub v1.1](https://gateway.ipfs.io/ipfs/QmRAFP5DBnvNjdYSbWhEhVRJJDFCLpPyvew5GwCCB4VxM4)
|
||||||
|
##
|
||||||
|
## If you are interested in broadcasting for your application, you may want to use [Waku](https://waku.org/), which builds on top of GossipSub,
|
||||||
|
## and adds features such as history, spam protection, and light node friendliness.
|
132
examples/tutorial_5_discovery.nim
Normal file
132
examples/tutorial_5_discovery.nim
Normal file
@ -0,0 +1,132 @@
|
|||||||
|
## # Discovery Manager
|
||||||
|
##
|
||||||
|
## In the [previous tutorial](tutorial_4_gossipsub.md), we built a custom protocol using [protobuf](https://developers.google.com/protocol-buffers) and
|
||||||
|
## spread informations (some metrics) on the network using gossipsub.
|
||||||
|
## For this tutorial, on the other hand, we'll go back on a simple example
|
||||||
|
## we'll try to discover a specific peers to greet on the network.
|
||||||
|
##
|
||||||
|
## First, as usual, we import the dependencies:
|
||||||
|
import sequtils
|
||||||
|
import chronos
|
||||||
|
import stew/byteutils
|
||||||
|
|
||||||
|
import libp2p
|
||||||
|
import libp2p/protocols/rendezvous
|
||||||
|
import libp2p/discovery/rendezvousinterface
|
||||||
|
import libp2p/discovery/discoverymngr
|
||||||
|
|
||||||
|
## We'll not use newStandardSwitch this time as we need the discovery protocol
|
||||||
|
## [RendezVous](https://github.com/libp2p/specs/blob/master/rendezvous/README.md) to be mounted on the switch using withRendezVous.
|
||||||
|
##
|
||||||
|
## Note that other discovery methods such as [Kademlia](https://github.com/libp2p/specs/blob/master/kad-dht/README.md) or [discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md) exist.
|
||||||
|
proc createSwitch(rdv: RendezVous = RendezVous.new()): Switch =
|
||||||
|
SwitchBuilder.new()
|
||||||
|
.withRng(newRng())
|
||||||
|
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
|
||||||
|
.withTcpTransport()
|
||||||
|
.withYamux()
|
||||||
|
.withNoise()
|
||||||
|
.withRendezVous(rdv)
|
||||||
|
.build()
|
||||||
|
|
||||||
|
# Create a really simple protocol to log one message received then close the stream
|
||||||
|
const DumbCodec = "/dumb/proto/1.0.0"
|
||||||
|
type DumbProto = ref object of LPProtocol
|
||||||
|
proc new(T: typedesc[DumbProto], nodeNumber: int): T =
|
||||||
|
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||||
|
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
|
||||||
|
await conn.close()
|
||||||
|
return T.new(codecs = @[DumbCodec], handler = handle)
|
||||||
|
|
||||||
|
## ## Bootnodes
|
||||||
|
## The first time a p2p program is ran, he needs to know how to join
|
||||||
|
## its network. This is generally done by hard-coding a list of stable
|
||||||
|
## nodes in the binary, called "bootnodes". These bootnodes are a
|
||||||
|
## critical part of a p2p network, since they are used by every new
|
||||||
|
## user to onboard the network.
|
||||||
|
##
|
||||||
|
## By using libp2p, we can use any node supporting our discovery protocol
|
||||||
|
## (rendezvous in this case) as a bootnode. For this example, we'll
|
||||||
|
## create a bootnode, and then every peer will advertise itself on the
|
||||||
|
## bootnode, and use it to find other peers
|
||||||
|
proc main() {.async, gcsafe.} =
|
||||||
|
let bootNode = createSwitch()
|
||||||
|
await bootNode.start()
|
||||||
|
|
||||||
|
# Create 5 nodes in the network
|
||||||
|
var
|
||||||
|
switches: seq[Switch] = @[]
|
||||||
|
discManagers: seq[DiscoveryManager] = @[]
|
||||||
|
|
||||||
|
for i in 0..5:
|
||||||
|
let rdv = RendezVous.new()
|
||||||
|
# Create a remote future to await at the end of the program
|
||||||
|
let switch = createSwitch(rdv)
|
||||||
|
switch.mount(DumbProto.new(i))
|
||||||
|
switches.add(switch)
|
||||||
|
|
||||||
|
# A discovery manager is a simple tool, you can set it up by adding discovery
|
||||||
|
# interfaces (such as RendezVousInterface) then you can use it to advertise
|
||||||
|
# something on the network or to request something from it.
|
||||||
|
let dm = DiscoveryManager()
|
||||||
|
# A RendezVousInterface is a RendezVous protocol wrapped to be usable by the
|
||||||
|
# DiscoveryManager.
|
||||||
|
dm.add(RendezVousInterface.new(rdv))
|
||||||
|
discManagers.add(dm)
|
||||||
|
|
||||||
|
# We can now start the switch and connect to the bootnode
|
||||||
|
await switch.start()
|
||||||
|
await switch.connect(bootNode.peerInfo.peerId, bootNode.peerInfo.addrs)
|
||||||
|
|
||||||
|
# Each nodes of the network will advertise on some topics (EvenGang or OddClub)
|
||||||
|
dm.advertise(RdvNamespace(if i mod 2 == 0: "EvenGang" else: "OddClub"))
|
||||||
|
|
||||||
|
## We can now create the newcomer. This peer will connect to the boot node, and use
|
||||||
|
## it to discover peers & greet them.
|
||||||
|
let
|
||||||
|
rdv = RendezVous.new()
|
||||||
|
newcomer = createSwitch(rdv)
|
||||||
|
dm = DiscoveryManager()
|
||||||
|
await newcomer.start()
|
||||||
|
await newcomer.connect(bootNode.peerInfo.peerId, bootNode.peerInfo.addrs)
|
||||||
|
dm.add(RendezVousInterface.new(rdv, ttr = 250.milliseconds))
|
||||||
|
|
||||||
|
# Use the discovery manager to find peers on the OddClub topic to greet them
|
||||||
|
let queryOddClub = dm.request(RdvNamespace("OddClub"))
|
||||||
|
for _ in 0..2:
|
||||||
|
let
|
||||||
|
# getPeer give you a PeerAttribute containing informations about the peer.
|
||||||
|
res = await queryOddClub.getPeer()
|
||||||
|
# Here we will use the PeerId and the MultiAddress to greet him
|
||||||
|
conn = await newcomer.dial(res[PeerId], res.getAll(MultiAddress), DumbCodec)
|
||||||
|
await conn.writeLp("Odd Club suuuucks! Even Gang is better!")
|
||||||
|
# Uh-oh!
|
||||||
|
await conn.close()
|
||||||
|
# Wait for the peer to close the stream
|
||||||
|
await conn.join()
|
||||||
|
# Queries will run in a loop, so we must stop them when we are done
|
||||||
|
queryOddClub.stop()
|
||||||
|
|
||||||
|
# Maybe it was because he wanted to join the EvenGang
|
||||||
|
let queryEvenGang = dm.request(RdvNamespace("EvenGang"))
|
||||||
|
for _ in 0..2:
|
||||||
|
let
|
||||||
|
res = await queryEvenGang.getPeer()
|
||||||
|
conn = await newcomer.dial(res[PeerId], res.getAll(MultiAddress), DumbCodec)
|
||||||
|
await conn.writeLp("Even Gang is sooo laaame! Odd Club rocks!")
|
||||||
|
# Or maybe not...
|
||||||
|
await conn.close()
|
||||||
|
await conn.join()
|
||||||
|
queryEvenGang.stop()
|
||||||
|
# What can I say, some people just want to watch the world burn... Anyway
|
||||||
|
|
||||||
|
# Stop all the discovery managers
|
||||||
|
for d in discManagers:
|
||||||
|
d.stop()
|
||||||
|
dm.stop()
|
||||||
|
|
||||||
|
# Stop all the switches
|
||||||
|
await allFutures(switches.mapIt(it.stop()))
|
||||||
|
await allFutures(bootNode.stop(), newcomer.stop())
|
||||||
|
|
||||||
|
waitFor(main())
|
259
examples/tutorial_6_game.nim
Normal file
259
examples/tutorial_6_game.nim
Normal file
@ -0,0 +1,259 @@
|
|||||||
|
## # Tron example
|
||||||
|
##
|
||||||
|
## In this tutorial, we will create a video game based on libp2p, using
|
||||||
|
## all of the features we talked about in the last tutorials.
|
||||||
|
##
|
||||||
|
## We will:
|
||||||
|
## - Discover peers using the Discovery Manager
|
||||||
|
## - Use GossipSub to find a play mate
|
||||||
|
## - Create a custom protocol to play with him
|
||||||
|
##
|
||||||
|
## While this may look like a daunting project, it's less than 150 lines of code.
|
||||||
|
##
|
||||||
|
## The game will be a simple Tron. We will use [nico](https://github.com/ftsf/nico)
|
||||||
|
## as a game engine. (you need to run `nimble install nico` to have it available)
|
||||||
|
##
|
||||||
|
## 
|
||||||
|
##
|
||||||
|
## We will start by importing our dependencies and creating our types
|
||||||
|
import os
|
||||||
|
import nico, chronos, stew/byteutils, stew/endians2
|
||||||
|
import libp2p
|
||||||
|
import libp2p/protocols/rendezvous
|
||||||
|
import libp2p/discovery/rendezvousinterface
|
||||||
|
import libp2p/discovery/discoverymngr
|
||||||
|
|
||||||
|
const
|
||||||
|
directions = @[(K_UP, 0, -1), (K_LEFT, -1, 0), (K_DOWN, 0, 1), (K_RIGHT, 1, 0)]
|
||||||
|
mapSize = 32
|
||||||
|
tickPeriod = 0.2
|
||||||
|
|
||||||
|
type
|
||||||
|
Player = ref object
|
||||||
|
x, y: int
|
||||||
|
currentDir, nextDir: int
|
||||||
|
lost: bool
|
||||||
|
color: int
|
||||||
|
|
||||||
|
Game = ref object
|
||||||
|
gameMap: array[mapSize * mapSize, int]
|
||||||
|
tickTime: float
|
||||||
|
localPlayer, remotePlayer: Player
|
||||||
|
peerFound: Future[Connection]
|
||||||
|
hasCandidate: bool
|
||||||
|
tickFinished: Future[int]
|
||||||
|
|
||||||
|
GameProto = ref object of LPProtocol
|
||||||
|
|
||||||
|
proc new(_: type[Game]): Game =
|
||||||
|
# Default state of a game
|
||||||
|
result = Game(
|
||||||
|
tickTime: -3.0, # 3 seconds of "warm-up" time
|
||||||
|
localPlayer: Player(x: 4, y: 16, currentDir: 3, nextDir: 3, color: 8),
|
||||||
|
remotePlayer: Player(x: 27, y: 16, currentDir: 1, nextDir: 1, color: 12),
|
||||||
|
peerFound: newFuture[Connection]()
|
||||||
|
)
|
||||||
|
for pos in 0 .. result.gameMap.high:
|
||||||
|
if pos mod mapSize in [0, mapSize - 1] or pos div mapSize in [0, mapSize - 1]:
|
||||||
|
result.gameMap[pos] = 7
|
||||||
|
|
||||||
|
## ## Game Logic
|
||||||
|
## The networking during the game will work like this:
|
||||||
|
##
|
||||||
|
## * Each player will have `tickPeriod` (0.1) seconds to choose
|
||||||
|
## a direction that he wants to go to (default to current direction)
|
||||||
|
## * After `tickPeriod`, we will send our choosen direction to the peer,
|
||||||
|
## and wait for his direction
|
||||||
|
## * Once we have both direction, we will "tick" the game, and restart the
|
||||||
|
## loop, as long as both player are alive.
|
||||||
|
##
|
||||||
|
## This is a very simplistic scheme, but creating proper networking for
|
||||||
|
## video games is an [art](https://developer.valvesoftware.com/wiki/Latency_Compensating_Methods_in_Client/Server_In-game_Protocol_Design_and_Optimization)
|
||||||
|
##
|
||||||
|
## The main drawback of this scheme is that the more ping you have with
|
||||||
|
## the peer, the slower the game will run. Or invertedly, the less ping you
|
||||||
|
## have, the faster it runs!
|
||||||
|
proc update(g: Game, dt: float32) =
|
||||||
|
# Will be called at each frame of the game.
|
||||||
|
#
|
||||||
|
# Because both Nico and Chronos have a main loop,
|
||||||
|
# they must share the control of the main thread.
|
||||||
|
# This is a hacky way to make this happen
|
||||||
|
waitFor(sleepAsync(1.milliseconds))
|
||||||
|
# Don't do anything if we are still waiting for an opponent
|
||||||
|
if not(g.peerFound.finished()) or isNil(g.tickFinished): return
|
||||||
|
g.tickTime += dt
|
||||||
|
|
||||||
|
# Update the wanted direction, making sure we can't go backward
|
||||||
|
for i in 0 .. directions.high:
|
||||||
|
if i != (g.localPlayer.currentDir + 2 mod 4) and keyp(directions[i][0]):
|
||||||
|
g.localPlayer.nextDir = i
|
||||||
|
|
||||||
|
if g.tickTime > tickPeriod and not g.tickFinished.finished():
|
||||||
|
# We choosen our next direction, let the networking know
|
||||||
|
g.localPlayer.currentDir = g.localPlayer.nextDir
|
||||||
|
g.tickFinished.complete(g.localPlayer.currentDir)
|
||||||
|
|
||||||
|
proc tick(g: Game, p: Player) =
|
||||||
|
# Move player and check if he lost
|
||||||
|
p.x += directions[p.currentDir][1]
|
||||||
|
p.y += directions[p.currentDir][2]
|
||||||
|
if g.gameMap[p.y * mapSize + p.x] != 0: p.lost = true
|
||||||
|
g.gameMap[p.y * mapSize + p.x] = p.color
|
||||||
|
|
||||||
|
proc mainLoop(g: Game, peer: Connection) {.async.} =
|
||||||
|
while not (g.localPlayer.lost or g.remotePlayer.lost):
|
||||||
|
if g.tickTime > 0.0:
|
||||||
|
g.tickTime = 0
|
||||||
|
g.tickFinished = newFuture[int]()
|
||||||
|
|
||||||
|
# Wait for a choosen direction
|
||||||
|
let dir = await g.tickFinished
|
||||||
|
# Send it
|
||||||
|
await peer.writeLp(toBytes(uint32(dir)))
|
||||||
|
|
||||||
|
# Get the one from the peer
|
||||||
|
g.remotePlayer.currentDir = int uint32.fromBytes(await peer.readLp(8))
|
||||||
|
# Tick the players & restart
|
||||||
|
g.tick(g.remotePlayer)
|
||||||
|
g.tick(g.localPlayer)
|
||||||
|
|
||||||
|
## We'll draw the map & put some texts when necessary:
|
||||||
|
proc draw(g: Game) =
|
||||||
|
for pos, color in g.gameMap:
|
||||||
|
setColor(color)
|
||||||
|
boxFill(pos mod 32 * 4, pos div 32 * 4, 4, 4)
|
||||||
|
let text = if not(g.peerFound.finished()): "Matchmaking.."
|
||||||
|
elif g.tickTime < -1.5: "Welcome to Etron"
|
||||||
|
elif g.tickTime < 0.0: "- " & $(int(abs(g.tickTime) / 0.5) + 1) & " -"
|
||||||
|
elif g.remotePlayer.lost and g.localPlayer.lost: "DEUCE"
|
||||||
|
elif g.localPlayer.lost: "YOU LOOSE"
|
||||||
|
elif g.remotePlayer.lost: "YOU WON"
|
||||||
|
else: ""
|
||||||
|
printc(text, screenWidth div 2, screenHeight div 2)
|
||||||
|
|
||||||
|
|
||||||
|
## ## Matchmaking
|
||||||
|
## To find an opponent, we will broadcast our address on a
|
||||||
|
## GossipSub topic, and wait for someone to connect to us.
|
||||||
|
## We will also listen to that topic, and connect to anyone
|
||||||
|
## broadcasting his address.
|
||||||
|
##
|
||||||
|
## If we are looking for a game, we'll send `ok` to let the
|
||||||
|
## peer know that we are available, check that he is also available,
|
||||||
|
## and launch the game.
|
||||||
|
proc new(T: typedesc[GameProto], g: Game): T =
|
||||||
|
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||||
|
defer: await conn.closeWithEof()
|
||||||
|
if g.peerFound.finished or g.hasCandidate:
|
||||||
|
await conn.close()
|
||||||
|
return
|
||||||
|
g.hasCandidate = true
|
||||||
|
await conn.writeLp("ok")
|
||||||
|
if "ok" != string.fromBytes(await conn.readLp(1024)):
|
||||||
|
g.hasCandidate = false
|
||||||
|
return
|
||||||
|
g.peerFound.complete(conn)
|
||||||
|
# The handler of a protocol must wait for the stream to
|
||||||
|
# be finished before returning
|
||||||
|
await conn.join()
|
||||||
|
return T.new(codecs = @["/tron/1.0.0"], handler = handle)
|
||||||
|
|
||||||
|
proc networking(g: Game) {.async.} =
|
||||||
|
# Create our switch, similar to the GossipSub example and
|
||||||
|
# the Discovery examples combined
|
||||||
|
let
|
||||||
|
rdv = RendezVous.new()
|
||||||
|
switch = SwitchBuilder.new()
|
||||||
|
.withRng(newRng())
|
||||||
|
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
|
||||||
|
.withTcpTransport()
|
||||||
|
.withYamux()
|
||||||
|
.withNoise()
|
||||||
|
.withRendezVous(rdv)
|
||||||
|
.build()
|
||||||
|
dm = DiscoveryManager()
|
||||||
|
gameProto = GameProto.new(g)
|
||||||
|
gossip = GossipSub.init(
|
||||||
|
switch = switch,
|
||||||
|
triggerSelf = false)
|
||||||
|
dm.add(RendezVousInterface.new(rdv))
|
||||||
|
|
||||||
|
switch.mount(gossip)
|
||||||
|
switch.mount(gameProto)
|
||||||
|
|
||||||
|
gossip.subscribe(
|
||||||
|
"/tron/matchmaking",
|
||||||
|
proc (topic: string, data: seq[byte]) {.async.} =
|
||||||
|
# If we are still looking for an opponent,
|
||||||
|
# try to match anyone broadcasting it's address
|
||||||
|
if g.peerFound.finished or g.hasCandidate: return
|
||||||
|
g.hasCandidate = true
|
||||||
|
|
||||||
|
try:
|
||||||
|
let
|
||||||
|
(peerId, multiAddress) = parseFullAddress(data).tryGet()
|
||||||
|
stream = await switch.dial(peerId, @[multiAddress], gameProto.codec)
|
||||||
|
|
||||||
|
await stream.writeLp("ok")
|
||||||
|
if (await stream.readLp(10)) != "ok".toBytes:
|
||||||
|
g.hasCandidate = false
|
||||||
|
return
|
||||||
|
g.peerFound.complete(stream)
|
||||||
|
# We are "player 2"
|
||||||
|
swap(g.localPlayer, g.remotePlayer)
|
||||||
|
except CatchableError as exc:
|
||||||
|
discard
|
||||||
|
)
|
||||||
|
|
||||||
|
await switch.start()
|
||||||
|
defer: await switch.stop()
|
||||||
|
|
||||||
|
# As explained in the last tutorial, we need a bootnode to be able
|
||||||
|
# to find peers. We could use any libp2p running rendezvous (or any
|
||||||
|
# node running tron). We will take it's MultiAddress from the command
|
||||||
|
# line parameters
|
||||||
|
if paramCount() > 0:
|
||||||
|
let (peerId, multiAddress) = paramStr(1).parseFullAddress().tryGet()
|
||||||
|
await switch.connect(peerId, @[multiAddress])
|
||||||
|
else:
|
||||||
|
echo "No bootnode provided, listening on: ", switch.peerInfo.fullAddrs.tryGet()
|
||||||
|
|
||||||
|
# Discover peers from the bootnode, and connect to them
|
||||||
|
dm.advertise(RdvNamespace("tron"))
|
||||||
|
let discoveryQuery = dm.request(RdvNamespace("tron"))
|
||||||
|
discoveryQuery.forEach:
|
||||||
|
try:
|
||||||
|
await switch.connect(peer[PeerId], peer.getAll(MultiAddress))
|
||||||
|
except CatchableError as exc:
|
||||||
|
echo "Failed to dial a peer: ", exc.msg
|
||||||
|
|
||||||
|
# We will try to publish our address multiple times, in case
|
||||||
|
# it takes time to establish connections with other GossipSub peers
|
||||||
|
var published = false
|
||||||
|
while not published:
|
||||||
|
await sleepAsync(500.milliseconds)
|
||||||
|
for fullAddr in switch.peerInfo.fullAddrs.tryGet():
|
||||||
|
if (await gossip.publish("/tron/matchmaking", fullAddr.bytes)) == 0:
|
||||||
|
published = false
|
||||||
|
break
|
||||||
|
published = true
|
||||||
|
|
||||||
|
discoveryQuery.stop()
|
||||||
|
|
||||||
|
# We now wait for someone to connect to us (or for us to connect to someone)
|
||||||
|
let peerConn = await g.peerFound
|
||||||
|
defer: await peerConn.closeWithEof()
|
||||||
|
|
||||||
|
await g.mainLoop(peerConn)
|
||||||
|
|
||||||
|
let
|
||||||
|
game = Game.new()
|
||||||
|
netFut = networking(game)
|
||||||
|
nico.init("Status", "Tron")
|
||||||
|
nico.createWindow("Tron", mapSize * 4, mapSize * 4, 4, false)
|
||||||
|
nico.run(proc = discard, proc(dt: float32) = game.update(dt), proc = game.draw())
|
||||||
|
waitFor(netFut.cancelAndWait())
|
||||||
|
|
||||||
|
## And that's it! If you want to run this code locally, the simplest way is to use the
|
||||||
|
## first node as a boot node for the second one. But you can also use any rendezvous node
|
@ -17,7 +17,7 @@ when defined(nimdoc):
|
|||||||
## stay backward compatible during the Major version, whereas private ones can
|
## stay backward compatible during the Major version, whereas private ones can
|
||||||
## change at each new Minor version.
|
## change at each new Minor version.
|
||||||
##
|
##
|
||||||
## If you're new to nim-libp2p, you can find a tutorial `here<https://github.com/status-im/nim-libp2p/blob/master/examples/tutorial_1_connect.md>`_
|
## If you're new to nim-libp2p, you can find a tutorial `here<https://status-im.github.io/nim-libp2p/docs/tutorial_1_connect/>`_
|
||||||
## that can help you get started.
|
## that can help you get started.
|
||||||
|
|
||||||
# Import stuff for doc
|
# Import stuff for doc
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
mode = ScriptMode.Verbose
|
mode = ScriptMode.Verbose
|
||||||
|
|
||||||
packageName = "libp2p"
|
packageName = "libp2p"
|
||||||
version = "0.0.2"
|
version = "1.0.0"
|
||||||
author = "Status Research & Development GmbH"
|
author = "Status Research & Development GmbH"
|
||||||
description = "LibP2P implementation"
|
description = "LibP2P implementation"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
@ -9,7 +9,7 @@ skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
|
|||||||
|
|
||||||
requires "nim >= 1.2.0",
|
requires "nim >= 1.2.0",
|
||||||
"nimcrypto >= 0.4.1",
|
"nimcrypto >= 0.4.1",
|
||||||
"dnsclient >= 0.1.2",
|
"dnsclient >= 0.3.0 & < 0.4.0",
|
||||||
"bearssl >= 0.1.4",
|
"bearssl >= 0.1.4",
|
||||||
"chronicles >= 0.10.2",
|
"chronicles >= 0.10.2",
|
||||||
"chronos >= 3.0.6",
|
"chronos >= 3.0.6",
|
||||||
@ -17,46 +17,36 @@ requires "nim >= 1.2.0",
|
|||||||
"metrics",
|
"metrics",
|
||||||
"secp256k1",
|
"secp256k1",
|
||||||
"stew#head",
|
"stew#head",
|
||||||
"websock"
|
"websock",
|
||||||
|
"unittest2 >= 0.0.5 & < 0.1.0"
|
||||||
const styleCheckStyle =
|
|
||||||
if (NimMajor, NimMinor) < (1, 6):
|
|
||||||
"hint"
|
|
||||||
else:
|
|
||||||
"error"
|
|
||||||
|
|
||||||
const nimflags =
|
|
||||||
"--verbosity:0 --hints:off " &
|
|
||||||
"--warning[CaseTransition]:off --warning[ObservableStores]:off " &
|
|
||||||
"--warning[LockLevel]:off " &
|
|
||||||
"-d:chronosStrictException " &
|
|
||||||
"--styleCheck:usages --styleCheck:" & styleCheckStyle & " "
|
|
||||||
|
|
||||||
|
import hashes
|
||||||
proc runTest(filename: string, verify: bool = true, sign: bool = true,
|
proc runTest(filename: string, verify: bool = true, sign: bool = true,
|
||||||
moreoptions: string = "") =
|
moreoptions: string = "") =
|
||||||
var excstr = "nim c --opt:speed -d:debug -d:libp2p_agents_metrics -d:libp2p_protobuf_metrics -d:libp2p_network_protocols_metrics -d:libp2p_mplex_metrics "
|
var excstr = "nim c --skipParentCfg --opt:speed -d:debug -d:libp2p_agents_metrics -d:libp2p_protobuf_metrics -d:libp2p_network_protocols_metrics -d:libp2p_mplex_metrics "
|
||||||
excstr.add(" -d:chronicles_sinks=textlines[stdout],json[dynamic] -d:chronicles_log_level=TRACE ")
|
excstr.add(" -d:chronicles_sinks=textlines[stdout],json[dynamic] -d:chronicles_log_level=TRACE ")
|
||||||
excstr.add(" -d:chronicles_runtime_filtering=TRUE ")
|
excstr.add(" -d:chronicles_runtime_filtering=TRUE ")
|
||||||
excstr.add(" " & getEnv("NIMFLAGS") & " ")
|
excstr.add(" " & getEnv("NIMFLAGS") & " ")
|
||||||
excstr.add(" " & nimflags & " ")
|
excstr.add(" --verbosity:0 --hints:off ")
|
||||||
excstr.add(" -d:libp2p_pubsub_sign=" & $sign)
|
excstr.add(" -d:libp2p_pubsub_sign=" & $sign)
|
||||||
excstr.add(" -d:libp2p_pubsub_verify=" & $verify)
|
excstr.add(" -d:libp2p_pubsub_verify=" & $verify)
|
||||||
excstr.add(" " & moreoptions & " ")
|
excstr.add(" " & moreoptions & " ")
|
||||||
|
if getEnv("CICOV").len > 0:
|
||||||
|
excstr &= " --nimcache:nimcache/" & filename & "-" & $excstr.hash
|
||||||
exec excstr & " -r " & " tests/" & filename
|
exec excstr & " -r " & " tests/" & filename
|
||||||
rmFile "tests/" & filename.toExe
|
rmFile "tests/" & filename.toExe
|
||||||
|
|
||||||
proc buildSample(filename: string, run = false) =
|
proc buildSample(filename: string, run = false, extraFlags = "") =
|
||||||
var excstr = "nim c --opt:speed --threads:on -d:debug "
|
var excstr = "nim c --opt:speed --threads:on -d:debug --verbosity:0 --hints:off -p:. " & extraFlags
|
||||||
excstr.add(" " & nimflags & " ")
|
|
||||||
excstr.add(" examples/" & filename)
|
excstr.add(" examples/" & filename)
|
||||||
exec excstr
|
exec excstr
|
||||||
if run:
|
if run:
|
||||||
exec "./examples/" & filename.toExe
|
exec "./examples/" & filename.toExe
|
||||||
rmFile "examples/" & filename.toExe
|
rmFile "examples/" & filename.toExe
|
||||||
|
|
||||||
proc buildTutorial(filename: string) =
|
proc tutorialToMd(filename: string) =
|
||||||
discard gorge "cat " & filename & " | nim c -r --hints:off tools/markdown_runner.nim | " &
|
let markdown = gorge "cat " & filename & " | nim c -r --verbosity:0 --hints:off tools/markdown_builder.nim "
|
||||||
" nim " & nimflags & " c -"
|
writeFile(filename.replace(".nim", ".md"), markdown)
|
||||||
|
|
||||||
task testnative, "Runs libp2p native tests":
|
task testnative, "Runs libp2p native tests":
|
||||||
runTest("testnative")
|
runTest("testnative")
|
||||||
@ -101,29 +91,31 @@ task test_slim, "Runs the (slimmed down) test suite":
|
|||||||
exec "nimble testfilter"
|
exec "nimble testfilter"
|
||||||
exec "nimble examples_build"
|
exec "nimble examples_build"
|
||||||
|
|
||||||
|
task website, "Build the website":
|
||||||
|
tutorialToMd("examples/tutorial_1_connect.nim")
|
||||||
|
tutorialToMd("examples/tutorial_2_customproto.nim")
|
||||||
|
tutorialToMd("examples/tutorial_3_protobuf.nim")
|
||||||
|
tutorialToMd("examples/tutorial_4_gossipsub.nim")
|
||||||
|
tutorialToMd("examples/tutorial_5_discovery.nim")
|
||||||
|
tutorialToMd("examples/tutorial_6_game.nim")
|
||||||
|
tutorialToMd("examples/circuitrelay.nim")
|
||||||
|
exec "mkdocs build"
|
||||||
|
|
||||||
task examples_build, "Build the samples":
|
task examples_build, "Build the samples":
|
||||||
buildSample("directchat")
|
buildSample("directchat")
|
||||||
buildSample("helloworld", true)
|
buildSample("helloworld", true)
|
||||||
buildSample("circuitrelay", true)
|
buildSample("circuitrelay", true)
|
||||||
buildTutorial("examples/tutorial_1_connect.md")
|
buildSample("tutorial_1_connect", true)
|
||||||
buildTutorial("examples/tutorial_2_customproto.md")
|
buildSample("tutorial_2_customproto", true)
|
||||||
|
if (NimMajor, NimMinor) > (1, 2):
|
||||||
proc tutorialToHtml(source, output: string) =
|
# These tutorials relies on post 1.4 exception tracking
|
||||||
var html = gorge("./nimbledeps/bin/markdown < " & source)
|
buildSample("tutorial_3_protobuf", true)
|
||||||
html &= """
|
buildSample("tutorial_4_gossipsub", true)
|
||||||
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/water.css@2/out/water.css">
|
buildSample("tutorial_5_discovery", true)
|
||||||
<link rel="stylesheet" href="https://unpkg.com/@highlightjs/cdn-assets@11.5.1/styles/default.min.css">
|
# Nico doesn't work in 1.2
|
||||||
<script src="https://unpkg.com/@highlightjs/cdn-assets@11.5.1/highlight.min.js"></script>
|
exec "nimble install -y nimpng@#HEAD" # this is to fix broken build on 1.7.3, remove it when nimpng version 0.3.2 or later is released
|
||||||
<script src="https://unpkg.com/@highlightjs/cdn-assets@11.5.1/languages/nim.min.js"></script>
|
exec "nimble install -y nico"
|
||||||
<script>hljs.highlightAll();</script>
|
buildSample("tutorial_6_game", false, "--styleCheck:off")
|
||||||
"""
|
|
||||||
writeFile(output, html)
|
|
||||||
|
|
||||||
|
|
||||||
task markdown_to_html, "Build the tutorials HTML":
|
|
||||||
exec "nimble install -y markdown"
|
|
||||||
tutorialToHtml("examples/tutorial_1_connect.md", "tuto1.html")
|
|
||||||
tutorialToHtml("examples/tutorial_2_customproto.md", "tuto2.html")
|
|
||||||
|
|
||||||
# pin system
|
# pin system
|
||||||
# while nimble lockfile
|
# while nimble lockfile
|
||||||
@ -148,9 +140,13 @@ task install_pinned, "Reads the lockfile":
|
|||||||
|
|
||||||
# Remove the automatically installed deps
|
# Remove the automatically installed deps
|
||||||
# (inefficient you say?)
|
# (inefficient you say?)
|
||||||
let allowedDirectories = toInstall.mapIt(it[0] & "-" & it[1].split('@')[1])
|
let nimblePkgs =
|
||||||
for dependency in listDirs("nimbledeps/pkgs"):
|
if system.dirExists("nimbledeps/pkgs"): "nimbledeps/pkgs"
|
||||||
if dependency.extractFilename notin allowedDirectories:
|
else: "nimbledeps/pkgs2"
|
||||||
|
for dependency in listDirs(nimblePkgs):
|
||||||
|
let filename = dependency.extractFilename
|
||||||
|
if toInstall.anyIt(filename.startsWith(it[0]) and
|
||||||
|
filename.endsWith(it[1].split('#')[^1])) == false:
|
||||||
rmDir(dependency)
|
rmDir(dependency)
|
||||||
|
|
||||||
task unpin, "Restore global package use":
|
task unpin, "Restore global package use":
|
||||||
|
@ -26,8 +26,8 @@ import
|
|||||||
switch, peerid, peerinfo, stream/connection, multiaddress,
|
switch, peerid, peerinfo, stream/connection, multiaddress,
|
||||||
crypto/crypto, transports/[transport, tcptransport],
|
crypto/crypto, transports/[transport, tcptransport],
|
||||||
muxers/[muxer, mplex/mplex, yamux/yamux],
|
muxers/[muxer, mplex/mplex, yamux/yamux],
|
||||||
protocols/[identify, secure/secure, secure/noise],
|
protocols/[identify, secure/secure, secure/noise, rendezvous],
|
||||||
protocols/relay/[relay, client, rtransport],
|
protocols/connectivity/[autonat, relay/relay, relay/client, relay/rtransport],
|
||||||
connmanager, upgrademngrs/muxedupgrade,
|
connmanager, upgrademngrs/muxedupgrade,
|
||||||
nameresolving/nameresolver,
|
nameresolving/nameresolver,
|
||||||
errors, utility
|
errors, utility
|
||||||
@ -58,7 +58,10 @@ type
|
|||||||
agentVersion: string
|
agentVersion: string
|
||||||
nameResolver: NameResolver
|
nameResolver: NameResolver
|
||||||
peerStoreCapacity: Option[int]
|
peerStoreCapacity: Option[int]
|
||||||
|
autonat: bool
|
||||||
circuitRelay: Relay
|
circuitRelay: Relay
|
||||||
|
rdv: RendezVous
|
||||||
|
services: seq[Service]
|
||||||
|
|
||||||
proc new*(T: type[SwitchBuilder]): T {.public.} =
|
proc new*(T: type[SwitchBuilder]): T {.public.} =
|
||||||
## Creates a SwitchBuilder
|
## Creates a SwitchBuilder
|
||||||
@ -185,10 +188,22 @@ proc withNameResolver*(b: SwitchBuilder, nameResolver: NameResolver): SwitchBuil
|
|||||||
b.nameResolver = nameResolver
|
b.nameResolver = nameResolver
|
||||||
b
|
b
|
||||||
|
|
||||||
|
proc withAutonat*(b: SwitchBuilder): SwitchBuilder =
|
||||||
|
b.autonat = true
|
||||||
|
b
|
||||||
|
|
||||||
proc withCircuitRelay*(b: SwitchBuilder, r: Relay = Relay.new()): SwitchBuilder =
|
proc withCircuitRelay*(b: SwitchBuilder, r: Relay = Relay.new()): SwitchBuilder =
|
||||||
b.circuitRelay = r
|
b.circuitRelay = r
|
||||||
b
|
b
|
||||||
|
|
||||||
|
proc withRendezVous*(b: SwitchBuilder, rdv: RendezVous = RendezVous.new()): SwitchBuilder =
|
||||||
|
b.rdv = rdv
|
||||||
|
b
|
||||||
|
|
||||||
|
proc withServices*(b: SwitchBuilder, services: seq[Service]): SwitchBuilder =
|
||||||
|
b.services = services
|
||||||
|
b
|
||||||
|
|
||||||
proc build*(b: SwitchBuilder): Switch
|
proc build*(b: SwitchBuilder): Switch
|
||||||
{.raises: [Defect, LPError], public.} =
|
{.raises: [Defect, LPError], public.} =
|
||||||
|
|
||||||
@ -244,7 +259,12 @@ proc build*(b: SwitchBuilder): Switch
|
|||||||
connManager = connManager,
|
connManager = connManager,
|
||||||
ms = ms,
|
ms = ms,
|
||||||
nameResolver = b.nameResolver,
|
nameResolver = b.nameResolver,
|
||||||
peerStore = peerStore)
|
peerStore = peerStore,
|
||||||
|
services = b.services)
|
||||||
|
|
||||||
|
if b.autonat:
|
||||||
|
let autonat = Autonat.new(switch)
|
||||||
|
switch.mount(autonat)
|
||||||
|
|
||||||
if not isNil(b.circuitRelay):
|
if not isNil(b.circuitRelay):
|
||||||
if b.circuitRelay of RelayClient:
|
if b.circuitRelay of RelayClient:
|
||||||
@ -252,6 +272,10 @@ proc build*(b: SwitchBuilder): Switch
|
|||||||
b.circuitRelay.setup(switch)
|
b.circuitRelay.setup(switch)
|
||||||
switch.mount(b.circuitRelay)
|
switch.mount(b.circuitRelay)
|
||||||
|
|
||||||
|
if not isNil(b.rdv):
|
||||||
|
b.rdv.setup(switch)
|
||||||
|
switch.mount(b.rdv)
|
||||||
|
|
||||||
return switch
|
return switch
|
||||||
|
|
||||||
proc newStandardSwitch*(
|
proc newStandardSwitch*(
|
||||||
|
@ -110,6 +110,13 @@ proc new*(C: type ConnManager,
|
|||||||
proc connCount*(c: ConnManager, peerId: PeerId): int =
|
proc connCount*(c: ConnManager, peerId: PeerId): int =
|
||||||
c.conns.getOrDefault(peerId).len
|
c.conns.getOrDefault(peerId).len
|
||||||
|
|
||||||
|
proc connectedPeers*(c: ConnManager, dir: Direction): seq[PeerId] =
|
||||||
|
var peers = newSeq[PeerId]()
|
||||||
|
for peerId, conns in c.conns:
|
||||||
|
if conns.anyIt(it.dir == dir):
|
||||||
|
peers.add(peerId)
|
||||||
|
return peers
|
||||||
|
|
||||||
proc addConnEventHandler*(c: ConnManager,
|
proc addConnEventHandler*(c: ConnManager,
|
||||||
handler: ConnEventHandler,
|
handler: ConnEventHandler,
|
||||||
kind: ConnEventKind) =
|
kind: ConnEventKind) =
|
||||||
@ -537,3 +544,4 @@ proc close*(c: ConnManager) {.async.} =
|
|||||||
await conn.close()
|
await conn.close()
|
||||||
|
|
||||||
trace "Closed ConnManager"
|
trace "Closed ConnManager"
|
||||||
|
|
||||||
|
@ -31,7 +31,6 @@ const
|
|||||||
type
|
type
|
||||||
Curve25519* = object
|
Curve25519* = object
|
||||||
Curve25519Key* = array[Curve25519KeySize, byte]
|
Curve25519Key* = array[Curve25519KeySize, byte]
|
||||||
pcuchar = ptr char
|
|
||||||
Curve25519Error* = enum
|
Curve25519Error* = enum
|
||||||
Curver25519GenError
|
Curver25519GenError
|
||||||
|
|
||||||
|
@ -528,8 +528,7 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
|
|||||||
|
|
||||||
field = Asn1Field(kind: Asn1Tag.Boolean, klass: aclass,
|
field = Asn1Field(kind: Asn1Tag.Boolean, klass: aclass,
|
||||||
index: ttag, offset: int(ab.offset),
|
index: ttag, offset: int(ab.offset),
|
||||||
length: 1)
|
length: 1, buffer: ab.buffer)
|
||||||
shallowCopy(field.buffer, ab.buffer)
|
|
||||||
field.vbool = (b == 0xFF'u8)
|
field.vbool = (b == 0xFF'u8)
|
||||||
ab.offset += 1
|
ab.offset += 1
|
||||||
return ok(field)
|
return ok(field)
|
||||||
@ -554,8 +553,7 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
|
|||||||
# Negative or Positive integer
|
# Negative or Positive integer
|
||||||
field = Asn1Field(kind: Asn1Tag.Integer, klass: aclass,
|
field = Asn1Field(kind: Asn1Tag.Integer, klass: aclass,
|
||||||
index: ttag, offset: int(ab.offset),
|
index: ttag, offset: int(ab.offset),
|
||||||
length: int(length))
|
length: int(length), buffer: ab.buffer)
|
||||||
shallowCopy(field.buffer, ab.buffer)
|
|
||||||
if (ab.buffer[ab.offset] and 0x80'u8) == 0x80'u8:
|
if (ab.buffer[ab.offset] and 0x80'u8) == 0x80'u8:
|
||||||
# Negative integer
|
# Negative integer
|
||||||
if length <= 8:
|
if length <= 8:
|
||||||
@ -579,16 +577,15 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
|
|||||||
# Zero value integer
|
# Zero value integer
|
||||||
field = Asn1Field(kind: Asn1Tag.Integer, klass: aclass,
|
field = Asn1Field(kind: Asn1Tag.Integer, klass: aclass,
|
||||||
index: ttag, offset: int(ab.offset),
|
index: ttag, offset: int(ab.offset),
|
||||||
length: int(length), vint: 0'u64)
|
length: int(length), vint: 0'u64,
|
||||||
shallowCopy(field.buffer, ab.buffer)
|
buffer: ab.buffer)
|
||||||
ab.offset += int(length)
|
ab.offset += int(length)
|
||||||
return ok(field)
|
return ok(field)
|
||||||
else:
|
else:
|
||||||
# Positive integer with leading zero
|
# Positive integer with leading zero
|
||||||
field = Asn1Field(kind: Asn1Tag.Integer, klass: aclass,
|
field = Asn1Field(kind: Asn1Tag.Integer, klass: aclass,
|
||||||
index: ttag, offset: int(ab.offset) + 1,
|
index: ttag, offset: int(ab.offset) + 1,
|
||||||
length: int(length) - 1)
|
length: int(length) - 1, buffer: ab.buffer)
|
||||||
shallowCopy(field.buffer, ab.buffer)
|
|
||||||
if length <= 9:
|
if length <= 9:
|
||||||
for i in 1 ..< int(length):
|
for i in 1 ..< int(length):
|
||||||
field.vint = (field.vint shl 8) or
|
field.vint = (field.vint shl 8) or
|
||||||
@ -610,8 +607,7 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
|
|||||||
# Zero-length BIT STRING.
|
# Zero-length BIT STRING.
|
||||||
field = Asn1Field(kind: Asn1Tag.BitString, klass: aclass,
|
field = Asn1Field(kind: Asn1Tag.BitString, klass: aclass,
|
||||||
index: ttag, offset: int(ab.offset + 1),
|
index: ttag, offset: int(ab.offset + 1),
|
||||||
length: 0, ubits: 0)
|
length: 0, ubits: 0, buffer: ab.buffer)
|
||||||
shallowCopy(field.buffer, ab.buffer)
|
|
||||||
ab.offset += int(length)
|
ab.offset += int(length)
|
||||||
return ok(field)
|
return ok(field)
|
||||||
|
|
||||||
@ -631,8 +627,8 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
|
|||||||
|
|
||||||
field = Asn1Field(kind: Asn1Tag.BitString, klass: aclass,
|
field = Asn1Field(kind: Asn1Tag.BitString, klass: aclass,
|
||||||
index: ttag, offset: int(ab.offset + 1),
|
index: ttag, offset: int(ab.offset + 1),
|
||||||
length: int(length - 1), ubits: int(unused))
|
length: int(length - 1), ubits: int(unused),
|
||||||
shallowCopy(field.buffer, ab.buffer)
|
buffer: ab.buffer)
|
||||||
ab.offset += int(length)
|
ab.offset += int(length)
|
||||||
return ok(field)
|
return ok(field)
|
||||||
|
|
||||||
@ -643,8 +639,7 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
|
|||||||
|
|
||||||
field = Asn1Field(kind: Asn1Tag.OctetString, klass: aclass,
|
field = Asn1Field(kind: Asn1Tag.OctetString, klass: aclass,
|
||||||
index: ttag, offset: int(ab.offset),
|
index: ttag, offset: int(ab.offset),
|
||||||
length: int(length))
|
length: int(length), buffer: ab.buffer)
|
||||||
shallowCopy(field.buffer, ab.buffer)
|
|
||||||
ab.offset += int(length)
|
ab.offset += int(length)
|
||||||
return ok(field)
|
return ok(field)
|
||||||
|
|
||||||
@ -654,8 +649,7 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
|
|||||||
return err(Asn1Error.Incorrect)
|
return err(Asn1Error.Incorrect)
|
||||||
|
|
||||||
field = Asn1Field(kind: Asn1Tag.Null, klass: aclass, index: ttag,
|
field = Asn1Field(kind: Asn1Tag.Null, klass: aclass, index: ttag,
|
||||||
offset: int(ab.offset), length: 0)
|
offset: int(ab.offset), length: 0, buffer: ab.buffer)
|
||||||
shallowCopy(field.buffer, ab.buffer)
|
|
||||||
ab.offset += int(length)
|
ab.offset += int(length)
|
||||||
return ok(field)
|
return ok(field)
|
||||||
|
|
||||||
@ -666,8 +660,7 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
|
|||||||
|
|
||||||
field = Asn1Field(kind: Asn1Tag.Oid, klass: aclass,
|
field = Asn1Field(kind: Asn1Tag.Oid, klass: aclass,
|
||||||
index: ttag, offset: int(ab.offset),
|
index: ttag, offset: int(ab.offset),
|
||||||
length: int(length))
|
length: int(length), buffer: ab.buffer)
|
||||||
shallowCopy(field.buffer, ab.buffer)
|
|
||||||
ab.offset += int(length)
|
ab.offset += int(length)
|
||||||
return ok(field)
|
return ok(field)
|
||||||
|
|
||||||
@ -678,8 +671,7 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
|
|||||||
|
|
||||||
field = Asn1Field(kind: Asn1Tag.Sequence, klass: aclass,
|
field = Asn1Field(kind: Asn1Tag.Sequence, klass: aclass,
|
||||||
index: ttag, offset: int(ab.offset),
|
index: ttag, offset: int(ab.offset),
|
||||||
length: int(length))
|
length: int(length), buffer: ab.buffer)
|
||||||
shallowCopy(field.buffer, ab.buffer)
|
|
||||||
ab.offset += int(length)
|
ab.offset += int(length)
|
||||||
return ok(field)
|
return ok(field)
|
||||||
|
|
||||||
|
@ -492,7 +492,7 @@ proc recvMessage(conn: StreamTransport): Future[seq[byte]] {.async.} =
|
|||||||
res = PB.getUVarint(buffer.toOpenArray(0, i), length, size)
|
res = PB.getUVarint(buffer.toOpenArray(0, i), length, size)
|
||||||
if res.isOk():
|
if res.isOk():
|
||||||
break
|
break
|
||||||
if res.isErr() or size > MaxMessageSize:
|
if res.isErr() or size > 1'u shl 22:
|
||||||
buffer.setLen(0)
|
buffer.setLen(0)
|
||||||
result = buffer
|
result = buffer
|
||||||
return
|
return
|
||||||
|
@ -13,10 +13,13 @@ else:
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import chronos
|
import chronos
|
||||||
|
import stew/results
|
||||||
import peerid,
|
import peerid,
|
||||||
stream/connection,
|
stream/connection,
|
||||||
transports/transport
|
transports/transport
|
||||||
|
|
||||||
|
export results
|
||||||
|
|
||||||
type
|
type
|
||||||
Dial* = ref object of RootObj
|
Dial* = ref object of RootObj
|
||||||
|
|
||||||
@ -31,6 +34,14 @@ method connect*(
|
|||||||
|
|
||||||
doAssert(false, "Not implemented!")
|
doAssert(false, "Not implemented!")
|
||||||
|
|
||||||
|
method connect*(
|
||||||
|
self: Dial,
|
||||||
|
address: MultiAddress,
|
||||||
|
allowUnknownPeerId = false): Future[PeerId] {.async, base.} =
|
||||||
|
## Connects to a peer and retrieve its PeerId
|
||||||
|
|
||||||
|
doAssert(false, "Not implemented!")
|
||||||
|
|
||||||
method dial*(
|
method dial*(
|
||||||
self: Dial,
|
self: Dial,
|
||||||
peerId: PeerId,
|
peerId: PeerId,
|
||||||
@ -58,3 +69,9 @@ method addTransport*(
|
|||||||
self: Dial,
|
self: Dial,
|
||||||
transport: Transport) {.base.} =
|
transport: Transport) {.base.} =
|
||||||
doAssert(false, "Not implemented!")
|
doAssert(false, "Not implemented!")
|
||||||
|
|
||||||
|
method tryDial*(
|
||||||
|
self: Dial,
|
||||||
|
peerId: PeerId,
|
||||||
|
addrs: seq[MultiAddress]): Future[Opt[MultiAddress]] {.async, base.} =
|
||||||
|
doAssert(false, "Not implemented!")
|
||||||
|
@ -7,8 +7,9 @@
|
|||||||
# This file may not be copied, modified, or distributed except according to
|
# This file may not be copied, modified, or distributed except according to
|
||||||
# those terms.
|
# those terms.
|
||||||
|
|
||||||
import std/[sugar, tables]
|
import std/[sugar, tables, sequtils]
|
||||||
|
|
||||||
|
import stew/results
|
||||||
import pkg/[chronos,
|
import pkg/[chronos,
|
||||||
chronicles,
|
chronicles,
|
||||||
metrics]
|
metrics]
|
||||||
@ -16,14 +17,16 @@ import pkg/[chronos,
|
|||||||
import dial,
|
import dial,
|
||||||
peerid,
|
peerid,
|
||||||
peerinfo,
|
peerinfo,
|
||||||
|
multicodec,
|
||||||
multistream,
|
multistream,
|
||||||
connmanager,
|
connmanager,
|
||||||
stream/connection,
|
stream/connection,
|
||||||
transports/transport,
|
transports/transport,
|
||||||
nameresolving/nameresolver,
|
nameresolving/nameresolver,
|
||||||
|
upgrademngrs/upgrade,
|
||||||
errors
|
errors
|
||||||
|
|
||||||
export dial, errors
|
export dial, errors, results
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "libp2p dialer"
|
topics = "libp2p dialer"
|
||||||
@ -46,74 +49,122 @@ type
|
|||||||
|
|
||||||
proc dialAndUpgrade(
|
proc dialAndUpgrade(
|
||||||
self: Dialer,
|
self: Dialer,
|
||||||
peerId: PeerId,
|
peerId: Opt[PeerId],
|
||||||
|
hostname: string,
|
||||||
|
address: MultiAddress):
|
||||||
|
Future[Connection] {.async.} =
|
||||||
|
|
||||||
|
for transport in self.transports: # for each transport
|
||||||
|
if transport.handles(address): # check if it can dial it
|
||||||
|
trace "Dialing address", address, peerId, hostname
|
||||||
|
let dialed =
|
||||||
|
try:
|
||||||
|
libp2p_total_dial_attempts.inc()
|
||||||
|
await transport.dial(hostname, address)
|
||||||
|
except CancelledError as exc:
|
||||||
|
debug "Dialing canceled", msg = exc.msg, peerId
|
||||||
|
raise exc
|
||||||
|
except CatchableError as exc:
|
||||||
|
debug "Dialing failed", msg = exc.msg, peerId
|
||||||
|
libp2p_failed_dials.inc()
|
||||||
|
return nil # Try the next address
|
||||||
|
|
||||||
|
# also keep track of the connection's bottom unsafe transport direction
|
||||||
|
# required by gossipsub scoring
|
||||||
|
dialed.transportDir = Direction.Out
|
||||||
|
|
||||||
|
libp2p_successful_dials.inc()
|
||||||
|
|
||||||
|
let conn =
|
||||||
|
try:
|
||||||
|
await transport.upgradeOutgoing(dialed, peerId)
|
||||||
|
except CatchableError as exc:
|
||||||
|
# If we failed to establish the connection through one transport,
|
||||||
|
# we won't succeeded through another - no use in trying again
|
||||||
|
await dialed.close()
|
||||||
|
debug "Upgrade failed", msg = exc.msg, peerId
|
||||||
|
if exc isnot CancelledError:
|
||||||
|
libp2p_failed_upgrades_outgoing.inc()
|
||||||
|
|
||||||
|
# Try other address
|
||||||
|
return nil
|
||||||
|
|
||||||
|
doAssert not isNil(conn), "connection died after upgradeOutgoing"
|
||||||
|
debug "Dial successful", conn, peerId = conn.peerId
|
||||||
|
return conn
|
||||||
|
return nil
|
||||||
|
|
||||||
|
proc expandDnsAddr(
|
||||||
|
self: Dialer,
|
||||||
|
peerId: Opt[PeerId],
|
||||||
|
address: MultiAddress): Future[seq[(MultiAddress, Opt[PeerId])]] {.async.} =
|
||||||
|
|
||||||
|
if not DNSADDR.matchPartial(address): return @[(address, peerId)]
|
||||||
|
if isNil(self.nameResolver):
|
||||||
|
info "Can't resolve DNSADDR without NameResolver", ma=address
|
||||||
|
return @[]
|
||||||
|
|
||||||
|
let
|
||||||
|
toResolve =
|
||||||
|
if peerId.isSome:
|
||||||
|
address & MultiAddress.init(multiCodec("p2p"), peerId.tryGet()).tryGet()
|
||||||
|
else:
|
||||||
|
address
|
||||||
|
resolved = await self.nameResolver.resolveDnsAddr(toResolve)
|
||||||
|
|
||||||
|
for resolvedAddress in resolved:
|
||||||
|
let lastPart = resolvedAddress[^1].tryGet()
|
||||||
|
if lastPart.protoCode == Result[MultiCodec, string].ok(multiCodec("p2p")):
|
||||||
|
let
|
||||||
|
peerIdBytes = lastPart.protoArgument().tryGet()
|
||||||
|
addrPeerId = PeerId.init(peerIdBytes).tryGet()
|
||||||
|
result.add((resolvedAddress[0..^2].tryGet(), Opt.some(addrPeerId)))
|
||||||
|
else:
|
||||||
|
result.add((resolvedAddress, peerId))
|
||||||
|
|
||||||
|
proc dialAndUpgrade(
|
||||||
|
self: Dialer,
|
||||||
|
peerId: Opt[PeerId],
|
||||||
addrs: seq[MultiAddress]):
|
addrs: seq[MultiAddress]):
|
||||||
Future[Connection] {.async.} =
|
Future[Connection] {.async.} =
|
||||||
|
|
||||||
debug "Dialing peer", peerId
|
debug "Dialing peer", peerId
|
||||||
|
|
||||||
for address in addrs: # for each address
|
for rawAddress in addrs:
|
||||||
let
|
# resolve potential dnsaddr
|
||||||
hostname = address.getHostname()
|
let addresses = await self.expandDnsAddr(peerId, rawAddress)
|
||||||
resolvedAddresses =
|
|
||||||
if isNil(self.nameResolver): @[address]
|
|
||||||
else: await self.nameResolver.resolveMAddress(address)
|
|
||||||
|
|
||||||
for a in resolvedAddresses: # for each resolved address
|
for (expandedAddress, addrPeerId) in addresses:
|
||||||
for transport in self.transports: # for each transport
|
# DNS resolution
|
||||||
if transport.handles(a): # check if it can dial it
|
let
|
||||||
trace "Dialing address", address = $a, peerId, hostname
|
hostname = expandedAddress.getHostname()
|
||||||
let dialed = try:
|
resolvedAddresses =
|
||||||
libp2p_total_dial_attempts.inc()
|
if isNil(self.nameResolver): @[expandedAddress]
|
||||||
await transport.dial(hostname, a)
|
else: await self.nameResolver.resolveMAddress(expandedAddress)
|
||||||
except CancelledError as exc:
|
|
||||||
debug "Dialing canceled", msg = exc.msg, peerId
|
|
||||||
raise exc
|
|
||||||
except CatchableError as exc:
|
|
||||||
debug "Dialing failed", msg = exc.msg, peerId
|
|
||||||
libp2p_failed_dials.inc()
|
|
||||||
continue # Try the next address
|
|
||||||
|
|
||||||
# make sure to assign the peer to the connection
|
for resolvedAddress in resolvedAddresses:
|
||||||
dialed.peerId = peerId
|
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress)
|
||||||
|
if not isNil(result):
|
||||||
# also keep track of the connection's bottom unsafe transport direction
|
return result
|
||||||
# required by gossipsub scoring
|
|
||||||
dialed.transportDir = Direction.Out
|
|
||||||
|
|
||||||
libp2p_successful_dials.inc()
|
|
||||||
|
|
||||||
let conn = try:
|
|
||||||
await transport.upgradeOutgoing(dialed)
|
|
||||||
except CatchableError as exc:
|
|
||||||
# If we failed to establish the connection through one transport,
|
|
||||||
# we won't succeeded through another - no use in trying again
|
|
||||||
# TODO we should try another address though
|
|
||||||
await dialed.close()
|
|
||||||
debug "Upgrade failed", msg = exc.msg, peerId
|
|
||||||
if exc isnot CancelledError:
|
|
||||||
libp2p_failed_upgrades_outgoing.inc()
|
|
||||||
raise exc
|
|
||||||
|
|
||||||
doAssert not isNil(conn), "connection died after upgradeOutgoing"
|
|
||||||
debug "Dial successful", conn, peerId = conn.peerId
|
|
||||||
return conn
|
|
||||||
|
|
||||||
proc internalConnect(
|
proc internalConnect(
|
||||||
self: Dialer,
|
self: Dialer,
|
||||||
peerId: PeerId,
|
peerId: Opt[PeerId],
|
||||||
addrs: seq[MultiAddress],
|
addrs: seq[MultiAddress],
|
||||||
forceDial: bool):
|
forceDial: bool):
|
||||||
Future[Connection] {.async.} =
|
Future[Connection] {.async.} =
|
||||||
if self.localPeerId == peerId:
|
if Opt.some(self.localPeerId) == peerId:
|
||||||
raise newException(CatchableError, "can't dial self!")
|
raise newException(CatchableError, "can't dial self!")
|
||||||
|
|
||||||
# Ensure there's only one in-flight attempt per peer
|
# Ensure there's only one in-flight attempt per peer
|
||||||
let lock = self.dialLock.mgetOrPut(peerId, newAsyncLock())
|
let lock = self.dialLock.mgetOrPut(peerId.get(default(PeerId)), newAsyncLock())
|
||||||
try:
|
try:
|
||||||
await lock.acquire()
|
await lock.acquire()
|
||||||
|
|
||||||
# Check if we have a connection already and try to reuse it
|
# Check if we have a connection already and try to reuse it
|
||||||
var conn = self.connManager.selectConn(peerId)
|
var conn =
|
||||||
|
if peerId.isSome: self.connManager.selectConn(peerId.get())
|
||||||
|
else: nil
|
||||||
if conn != nil:
|
if conn != nil:
|
||||||
if conn.atEof or conn.closed:
|
if conn.atEof or conn.closed:
|
||||||
# This connection should already have been removed from the connection
|
# This connection should already have been removed from the connection
|
||||||
@ -164,7 +215,27 @@ method connect*(
|
|||||||
if self.connManager.connCount(peerId) > 0:
|
if self.connManager.connCount(peerId) > 0:
|
||||||
return
|
return
|
||||||
|
|
||||||
discard await self.internalConnect(peerId, addrs, forceDial)
|
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial)
|
||||||
|
|
||||||
|
method connect*(
|
||||||
|
self: Dialer,
|
||||||
|
address: MultiAddress,
|
||||||
|
allowUnknownPeerId = false): Future[PeerId] {.async.} =
|
||||||
|
## Connects to a peer and retrieve its PeerId
|
||||||
|
|
||||||
|
let fullAddress = parseFullAddress(address)
|
||||||
|
if fullAddress.isOk:
|
||||||
|
return (await self.internalConnect(
|
||||||
|
Opt.some(fullAddress.get()[0]),
|
||||||
|
@[fullAddress.get()[1]],
|
||||||
|
false)).peerId
|
||||||
|
else:
|
||||||
|
if allowUnknownPeerId == false:
|
||||||
|
raise newException(DialFailedError, "Address without PeerID and unknown peer id disabled!")
|
||||||
|
return (await self.internalConnect(
|
||||||
|
Opt.none(PeerId),
|
||||||
|
@[address],
|
||||||
|
false)).peerId
|
||||||
|
|
||||||
proc negotiateStream(
|
proc negotiateStream(
|
||||||
self: Dialer,
|
self: Dialer,
|
||||||
@ -178,6 +249,27 @@ proc negotiateStream(
|
|||||||
|
|
||||||
return conn
|
return conn
|
||||||
|
|
||||||
|
method tryDial*(
|
||||||
|
self: Dialer,
|
||||||
|
peerId: PeerId,
|
||||||
|
addrs: seq[MultiAddress]): Future[Opt[MultiAddress]] {.async.} =
|
||||||
|
## Create a protocol stream in order to check
|
||||||
|
## if a connection is possible.
|
||||||
|
## Doesn't use the Connection Manager to save it.
|
||||||
|
##
|
||||||
|
|
||||||
|
trace "Check if it can dial", peerId, addrs
|
||||||
|
try:
|
||||||
|
let conn = await self.dialAndUpgrade(Opt.some(peerId), addrs)
|
||||||
|
if conn.isNil():
|
||||||
|
raise newException(DialFailedError, "No valid multiaddress")
|
||||||
|
await conn.close()
|
||||||
|
return conn.observedAddr
|
||||||
|
except CancelledError as exc:
|
||||||
|
raise exc
|
||||||
|
except CatchableError as exc:
|
||||||
|
raise newException(DialFailedError, exc.msg)
|
||||||
|
|
||||||
method dial*(
|
method dial*(
|
||||||
self: Dialer,
|
self: Dialer,
|
||||||
peerId: PeerId,
|
peerId: PeerId,
|
||||||
@ -216,7 +308,7 @@ method dial*(
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
trace "Dialing (new)", peerId, protos
|
trace "Dialing (new)", peerId, protos
|
||||||
conn = await self.internalConnect(peerId, addrs, forceDial)
|
conn = await self.internalConnect(Opt.some(peerId), addrs, forceDial)
|
||||||
trace "Opening stream", conn
|
trace "Opening stream", conn
|
||||||
stream = await self.connManager.getStream(conn)
|
stream = await self.connManager.getStream(conn)
|
||||||
|
|
||||||
|
182
libp2p/discovery/discoverymngr.nim
Normal file
182
libp2p/discovery/discoverymngr.nim
Normal file
@ -0,0 +1,182 @@
|
|||||||
|
# Nim-LibP2P
|
||||||
|
# Copyright (c) 2022 Status Research & Development GmbH
|
||||||
|
# Licensed under either of
|
||||||
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||||
|
# at your option.
|
||||||
|
# This file may not be copied, modified, or distributed except according to
|
||||||
|
# those terms.
|
||||||
|
|
||||||
|
when (NimMajor, NimMinor) < (1, 4):
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
else:
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import std/sequtils
|
||||||
|
import chronos, chronicles, stew/results
|
||||||
|
import ../errors
|
||||||
|
|
||||||
|
type
|
||||||
|
BaseAttr = ref object of RootObj
|
||||||
|
comparator: proc(f, c: BaseAttr): bool {.gcsafe, raises: [Defect].}
|
||||||
|
|
||||||
|
Attribute[T] = ref object of BaseAttr
|
||||||
|
value: T
|
||||||
|
|
||||||
|
PeerAttributes* = object
|
||||||
|
attributes: seq[BaseAttr]
|
||||||
|
|
||||||
|
DiscoveryService* = distinct string
|
||||||
|
|
||||||
|
proc `==`*(a, b: DiscoveryService): bool {.borrow.}
|
||||||
|
|
||||||
|
proc ofType*[T](f: BaseAttr, _: type[T]): bool =
|
||||||
|
return f of Attribute[T]
|
||||||
|
|
||||||
|
proc to*[T](f: BaseAttr, _: type[T]): T =
|
||||||
|
Attribute[T](f).value
|
||||||
|
|
||||||
|
proc add*[T](pa: var PeerAttributes,
|
||||||
|
value: T) =
|
||||||
|
pa.attributes.add(Attribute[T](
|
||||||
|
value: value,
|
||||||
|
comparator: proc(f: BaseAttr, c: BaseAttr): bool =
|
||||||
|
f.ofType(T) and c.ofType(T) and f.to(T) == c.to(T)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
iterator items*(pa: PeerAttributes): BaseAttr =
|
||||||
|
for f in pa.attributes:
|
||||||
|
yield f
|
||||||
|
|
||||||
|
proc getAll*[T](pa: PeerAttributes, t: typedesc[T]): seq[T] =
|
||||||
|
for f in pa.attributes:
|
||||||
|
if f.ofType(T):
|
||||||
|
result.add(f.to(T))
|
||||||
|
|
||||||
|
proc `{}`*[T](pa: PeerAttributes, t: typedesc[T]): Opt[T] =
|
||||||
|
for f in pa.attributes:
|
||||||
|
if f.ofType(T):
|
||||||
|
return Opt.some(f.to(T))
|
||||||
|
Opt.none(T)
|
||||||
|
|
||||||
|
proc `[]`*[T](pa: PeerAttributes, t: typedesc[T]): T {.raises: [Defect, KeyError].} =
|
||||||
|
pa{T}.valueOr: raise newException(KeyError, "Attritute not found")
|
||||||
|
|
||||||
|
proc match*(pa, candidate: PeerAttributes): bool =
|
||||||
|
for f in pa.attributes:
|
||||||
|
block oneAttribute:
|
||||||
|
for field in candidate.attributes:
|
||||||
|
if field.comparator(field, f):
|
||||||
|
break oneAttribute
|
||||||
|
return false
|
||||||
|
return true
|
||||||
|
|
||||||
|
type
|
||||||
|
PeerFoundCallback* = proc(pa: PeerAttributes) {.raises: [Defect], gcsafe.}
|
||||||
|
|
||||||
|
DiscoveryInterface* = ref object of RootObj
|
||||||
|
onPeerFound*: PeerFoundCallback
|
||||||
|
toAdvertise*: PeerAttributes
|
||||||
|
advertisementUpdated*: AsyncEvent
|
||||||
|
advertiseLoop*: Future[void]
|
||||||
|
|
||||||
|
method request*(self: DiscoveryInterface, pa: PeerAttributes) {.async, base.} =
|
||||||
|
doAssert(false, "Not implemented!")
|
||||||
|
|
||||||
|
method advertise*(self: DiscoveryInterface) {.async, base.} =
|
||||||
|
doAssert(false, "Not implemented!")
|
||||||
|
|
||||||
|
type
|
||||||
|
DiscoveryError* = object of LPError
|
||||||
|
DiscoveryFinished* = object of LPError
|
||||||
|
|
||||||
|
DiscoveryQuery* = ref object
|
||||||
|
attr: PeerAttributes
|
||||||
|
peers: AsyncQueue[PeerAttributes]
|
||||||
|
finished: bool
|
||||||
|
futs: seq[Future[void]]
|
||||||
|
|
||||||
|
DiscoveryManager* = ref object
|
||||||
|
interfaces: seq[DiscoveryInterface]
|
||||||
|
queries: seq[DiscoveryQuery]
|
||||||
|
|
||||||
|
proc add*(dm: DiscoveryManager, di: DiscoveryInterface) =
|
||||||
|
dm.interfaces &= di
|
||||||
|
|
||||||
|
di.onPeerFound = proc (pa: PeerAttributes) =
|
||||||
|
for query in dm.queries:
|
||||||
|
if query.attr.match(pa):
|
||||||
|
try:
|
||||||
|
query.peers.putNoWait(pa)
|
||||||
|
except AsyncQueueFullError as exc:
|
||||||
|
debug "Cannot push discovered peer to queue"
|
||||||
|
|
||||||
|
proc request*(dm: DiscoveryManager, pa: PeerAttributes): DiscoveryQuery =
|
||||||
|
var query = DiscoveryQuery(attr: pa, peers: newAsyncQueue[PeerAttributes]())
|
||||||
|
for i in dm.interfaces:
|
||||||
|
query.futs.add(i.request(pa))
|
||||||
|
dm.queries.add(query)
|
||||||
|
dm.queries.keepItIf(it.futs.anyIt(not it.finished()))
|
||||||
|
return query
|
||||||
|
|
||||||
|
proc request*[T](dm: DiscoveryManager, value: T): DiscoveryQuery =
|
||||||
|
var pa: PeerAttributes
|
||||||
|
pa.add(value)
|
||||||
|
return dm.request(pa)
|
||||||
|
|
||||||
|
proc advertise*(dm: DiscoveryManager, pa: PeerAttributes) =
|
||||||
|
for i in dm.interfaces:
|
||||||
|
i.toAdvertise = pa
|
||||||
|
if i.advertiseLoop.isNil:
|
||||||
|
i.advertisementUpdated = newAsyncEvent()
|
||||||
|
i.advertiseLoop = i.advertise()
|
||||||
|
else:
|
||||||
|
i.advertisementUpdated.fire()
|
||||||
|
|
||||||
|
proc advertise*[T](dm: DiscoveryManager, value: T) =
|
||||||
|
var pa: PeerAttributes
|
||||||
|
pa.add(value)
|
||||||
|
dm.advertise(pa)
|
||||||
|
|
||||||
|
template forEach*(query: DiscoveryQuery, code: untyped) =
|
||||||
|
## Will execute `code` for each discovered peer. The
|
||||||
|
## peer attritubtes are available through the variable
|
||||||
|
## `peer`
|
||||||
|
|
||||||
|
proc forEachInternal(q: DiscoveryQuery) {.async.} =
|
||||||
|
while true:
|
||||||
|
let peer {.inject.} =
|
||||||
|
try: await q.getPeer()
|
||||||
|
except DiscoveryFinished: return
|
||||||
|
code
|
||||||
|
|
||||||
|
asyncSpawn forEachInternal(query)
|
||||||
|
|
||||||
|
proc stop*(query: DiscoveryQuery) =
|
||||||
|
query.finished = true
|
||||||
|
for r in query.futs:
|
||||||
|
if not r.finished(): r.cancel()
|
||||||
|
|
||||||
|
proc stop*(dm: DiscoveryManager) =
|
||||||
|
for q in dm.queries:
|
||||||
|
q.stop()
|
||||||
|
for i in dm.interfaces:
|
||||||
|
if isNil(i.advertiseLoop): continue
|
||||||
|
i.advertiseLoop.cancel()
|
||||||
|
|
||||||
|
proc getPeer*(query: DiscoveryQuery): Future[PeerAttributes] {.async.} =
|
||||||
|
let getter = query.peers.popFirst()
|
||||||
|
|
||||||
|
try:
|
||||||
|
await getter or allFinished(query.futs)
|
||||||
|
except CancelledError as exc:
|
||||||
|
getter.cancel()
|
||||||
|
raise exc
|
||||||
|
|
||||||
|
if not finished(getter):
|
||||||
|
if query.finished:
|
||||||
|
raise newException(DiscoveryFinished, "Discovery query stopped")
|
||||||
|
# discovery loops only finish when they don't handle the query
|
||||||
|
raise newException(DiscoveryError, "Unable to find any peer matching this request")
|
||||||
|
return await getter
|
77
libp2p/discovery/rendezvousinterface.nim
Normal file
77
libp2p/discovery/rendezvousinterface.nim
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
# Nim-LibP2P
|
||||||
|
# Copyright (c) 2022 Status Research & Development GmbH
|
||||||
|
# Licensed under either of
|
||||||
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||||
|
# at your option.
|
||||||
|
# This file may not be copied, modified, or distributed except according to
|
||||||
|
# those terms.
|
||||||
|
|
||||||
|
when (NimMajor, NimMinor) < (1, 4):
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
else:
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import sequtils
|
||||||
|
import chronos
|
||||||
|
import ./discoverymngr,
|
||||||
|
../protocols/rendezvous,
|
||||||
|
../peerid
|
||||||
|
|
||||||
|
type
|
||||||
|
RendezVousInterface* = ref object of DiscoveryInterface
|
||||||
|
rdv*: RendezVous
|
||||||
|
timeToRequest: Duration
|
||||||
|
timeToAdvertise: Duration
|
||||||
|
|
||||||
|
RdvNamespace* = distinct string
|
||||||
|
|
||||||
|
proc `==`*(a, b: RdvNamespace): bool {.borrow.}
|
||||||
|
|
||||||
|
method request*(self: RendezVousInterface, pa: PeerAttributes) {.async.} =
|
||||||
|
var namespace = ""
|
||||||
|
for attr in pa:
|
||||||
|
if attr.ofType(RdvNamespace):
|
||||||
|
namespace = string attr.to(RdvNamespace)
|
||||||
|
elif attr.ofType(DiscoveryService):
|
||||||
|
namespace = string attr.to(DiscoveryService)
|
||||||
|
elif attr.ofType(PeerId):
|
||||||
|
namespace = $attr.to(PeerId)
|
||||||
|
else:
|
||||||
|
# unhandled type
|
||||||
|
return
|
||||||
|
while true:
|
||||||
|
for pr in await self.rdv.request(namespace):
|
||||||
|
var peer: PeerAttributes
|
||||||
|
peer.add(pr.peerId)
|
||||||
|
for address in pr.addresses:
|
||||||
|
peer.add(address.address)
|
||||||
|
|
||||||
|
peer.add(DiscoveryService(namespace))
|
||||||
|
peer.add(RdvNamespace(namespace))
|
||||||
|
self.onPeerFound(peer)
|
||||||
|
|
||||||
|
await sleepAsync(self.timeToRequest)
|
||||||
|
|
||||||
|
method advertise*(self: RendezVousInterface) {.async.} =
|
||||||
|
while true:
|
||||||
|
var toAdvertise: seq[string]
|
||||||
|
for attr in self.toAdvertise:
|
||||||
|
if attr.ofType(RdvNamespace):
|
||||||
|
toAdvertise.add string attr.to(RdvNamespace)
|
||||||
|
elif attr.ofType(DiscoveryService):
|
||||||
|
toAdvertise.add string attr.to(DiscoveryService)
|
||||||
|
elif attr.ofType(PeerId):
|
||||||
|
toAdvertise.add $attr.to(PeerId)
|
||||||
|
|
||||||
|
self.advertisementUpdated.clear()
|
||||||
|
for toAdv in toAdvertise:
|
||||||
|
await self.rdv.advertise(toAdv, self.timeToAdvertise)
|
||||||
|
|
||||||
|
await sleepAsync(self.timeToAdvertise) or self.advertisementUpdated.wait()
|
||||||
|
|
||||||
|
proc new*(T: typedesc[RendezVousInterface],
|
||||||
|
rdv: RendezVous,
|
||||||
|
ttr: Duration = 1.minutes,
|
||||||
|
tta: Duration = MinimumDuration): RendezVousInterface =
|
||||||
|
T(rdv: rdv, timeToRequest: ttr, timeToAdvertise: tta)
|
@ -222,6 +222,40 @@ proc onionVB(vb: var VBuffer): bool =
|
|||||||
if vb.readArray(buf) == 12:
|
if vb.readArray(buf) == 12:
|
||||||
result = true
|
result = true
|
||||||
|
|
||||||
|
proc onion3StB(s: string, vb: var VBuffer): bool =
|
||||||
|
try:
|
||||||
|
var parts = s.split(':')
|
||||||
|
if len(parts) != 2:
|
||||||
|
return false
|
||||||
|
if len(parts[0]) != 56:
|
||||||
|
return false
|
||||||
|
var address = Base32Lower.decode(parts[0].toLowerAscii())
|
||||||
|
var nport = parseInt(parts[1])
|
||||||
|
if (nport > 0 and nport < 65536) and len(address) == 35:
|
||||||
|
address.setLen(37)
|
||||||
|
address[35] = cast[byte]((nport shr 8) and 0xFF)
|
||||||
|
address[36] = cast[byte](nport and 0xFF)
|
||||||
|
vb.writeArray(address)
|
||||||
|
result = true
|
||||||
|
except:
|
||||||
|
discard
|
||||||
|
|
||||||
|
proc onion3BtS(vb: var VBuffer, s: var string): bool =
|
||||||
|
## ONION address bufferToString() implementation.
|
||||||
|
var buf: array[37, byte]
|
||||||
|
if vb.readArray(buf) == 37:
|
||||||
|
var nport = (cast[uint16](buf[35]) shl 8) or cast[uint16](buf[36])
|
||||||
|
s = Base32Lower.encode(buf.toOpenArray(0, 34))
|
||||||
|
s.add(":")
|
||||||
|
s.add($nport)
|
||||||
|
result = true
|
||||||
|
|
||||||
|
proc onion3VB(vb: var VBuffer): bool =
|
||||||
|
## ONION address validateBuffer() implementation.
|
||||||
|
var buf: array[37, byte]
|
||||||
|
if vb.readArray(buf) == 37:
|
||||||
|
result = true
|
||||||
|
|
||||||
proc unixStB(s: string, vb: var VBuffer): bool =
|
proc unixStB(s: string, vb: var VBuffer): bool =
|
||||||
## Unix socket name stringToBuffer() implementation.
|
## Unix socket name stringToBuffer() implementation.
|
||||||
if len(s) > 0:
|
if len(s) > 0:
|
||||||
@ -310,6 +344,11 @@ const
|
|||||||
bufferToString: onionBtS,
|
bufferToString: onionBtS,
|
||||||
validateBuffer: onionVB
|
validateBuffer: onionVB
|
||||||
)
|
)
|
||||||
|
TranscoderOnion3* = Transcoder(
|
||||||
|
stringToBuffer: onion3StB,
|
||||||
|
bufferToString: onion3BtS,
|
||||||
|
validateBuffer: onion3VB
|
||||||
|
)
|
||||||
TranscoderDNS* = Transcoder(
|
TranscoderDNS* = Transcoder(
|
||||||
stringToBuffer: dnsStB,
|
stringToBuffer: dnsStB,
|
||||||
bufferToString: dnsBtS,
|
bufferToString: dnsBtS,
|
||||||
@ -363,6 +402,10 @@ const
|
|||||||
mcodec: multiCodec("onion"), kind: Fixed, size: 10,
|
mcodec: multiCodec("onion"), kind: Fixed, size: 10,
|
||||||
coder: TranscoderOnion
|
coder: TranscoderOnion
|
||||||
),
|
),
|
||||||
|
MAProtocol(
|
||||||
|
mcodec: multiCodec("onion3"), kind: Fixed, size: 37,
|
||||||
|
coder: TranscoderOnion3
|
||||||
|
),
|
||||||
MAProtocol(
|
MAProtocol(
|
||||||
mcodec: multiCodec("ws"), kind: Marker, size: 0
|
mcodec: multiCodec("ws"), kind: Marker, size: 0
|
||||||
),
|
),
|
||||||
@ -427,6 +470,8 @@ const
|
|||||||
WS* = mapAnd(TCP, mapEq("ws"))
|
WS* = mapAnd(TCP, mapEq("ws"))
|
||||||
WSS* = mapAnd(TCP, mapEq("wss"))
|
WSS* = mapAnd(TCP, mapEq("wss"))
|
||||||
WebSockets* = mapOr(WS, WSS)
|
WebSockets* = mapOr(WS, WSS)
|
||||||
|
Onion3* = mapEq("onion3")
|
||||||
|
TcpOnion3* = mapAnd(TCP, Onion3)
|
||||||
|
|
||||||
Unreliable* = mapOr(UDP)
|
Unreliable* = mapOr(UDP)
|
||||||
|
|
||||||
@ -473,15 +518,10 @@ proc trimRight(s: string, ch: char): string =
|
|||||||
break
|
break
|
||||||
result = s[0..(s.high - m)]
|
result = s[0..(s.high - m)]
|
||||||
|
|
||||||
proc shcopy*(m1: var MultiAddress, m2: MultiAddress) =
|
|
||||||
shallowCopy(m1.data.buffer, m2.data.buffer)
|
|
||||||
m1.data.offset = m2.data.offset
|
|
||||||
|
|
||||||
proc protoCode*(ma: MultiAddress): MaResult[MultiCodec] =
|
proc protoCode*(ma: MultiAddress): MaResult[MultiCodec] =
|
||||||
## Returns MultiAddress ``ma`` protocol code.
|
## Returns MultiAddress ``ma`` protocol code.
|
||||||
var header: uint64
|
var header: uint64
|
||||||
var vb: MultiAddress
|
var vb = ma
|
||||||
shcopy(vb, ma)
|
|
||||||
if vb.data.readVarint(header) == -1:
|
if vb.data.readVarint(header) == -1:
|
||||||
err("multiaddress: Malformed binary address!")
|
err("multiaddress: Malformed binary address!")
|
||||||
else:
|
else:
|
||||||
@ -494,8 +534,7 @@ proc protoCode*(ma: MultiAddress): MaResult[MultiCodec] =
|
|||||||
proc protoName*(ma: MultiAddress): MaResult[string] =
|
proc protoName*(ma: MultiAddress): MaResult[string] =
|
||||||
## Returns MultiAddress ``ma`` protocol name.
|
## Returns MultiAddress ``ma`` protocol name.
|
||||||
var header: uint64
|
var header: uint64
|
||||||
var vb: MultiAddress
|
var vb = ma
|
||||||
shcopy(vb, ma)
|
|
||||||
if vb.data.readVarint(header) == -1:
|
if vb.data.readVarint(header) == -1:
|
||||||
err("multiaddress: Malformed binary address!")
|
err("multiaddress: Malformed binary address!")
|
||||||
else:
|
else:
|
||||||
@ -512,9 +551,8 @@ proc protoArgument*(ma: MultiAddress,
|
|||||||
## If current MultiAddress do not have argument value, then result will be
|
## If current MultiAddress do not have argument value, then result will be
|
||||||
## ``0``.
|
## ``0``.
|
||||||
var header: uint64
|
var header: uint64
|
||||||
var vb: MultiAddress
|
var vb = ma
|
||||||
var buffer: seq[byte]
|
var buffer: seq[byte]
|
||||||
shcopy(vb, ma)
|
|
||||||
if vb.data.readVarint(header) == -1:
|
if vb.data.readVarint(header) == -1:
|
||||||
err("multiaddress: Malformed binary address!")
|
err("multiaddress: Malformed binary address!")
|
||||||
else:
|
else:
|
||||||
@ -530,7 +568,7 @@ proc protoArgument*(ma: MultiAddress,
|
|||||||
err("multiaddress: Decoding protocol error")
|
err("multiaddress: Decoding protocol error")
|
||||||
else:
|
else:
|
||||||
ok(res)
|
ok(res)
|
||||||
elif proto.kind in {Length, Path}:
|
elif proto.kind in {MAKind.Length, Path}:
|
||||||
if vb.data.readSeq(buffer) == -1:
|
if vb.data.readSeq(buffer) == -1:
|
||||||
err("multiaddress: Decoding protocol error")
|
err("multiaddress: Decoding protocol error")
|
||||||
else:
|
else:
|
||||||
@ -551,6 +589,13 @@ proc protoAddress*(ma: MultiAddress): MaResult[seq[byte]] =
|
|||||||
buffer.setLen(res)
|
buffer.setLen(res)
|
||||||
ok(buffer)
|
ok(buffer)
|
||||||
|
|
||||||
|
proc protoArgument*(ma: MultiAddress): MaResult[seq[byte]] =
|
||||||
|
## Returns MultiAddress ``ma`` protocol address binary blob.
|
||||||
|
##
|
||||||
|
## If current MultiAddress do not have argument value, then result array will
|
||||||
|
## be empty.
|
||||||
|
ma.protoAddress()
|
||||||
|
|
||||||
proc getPart(ma: MultiAddress, index: int): MaResult[MultiAddress] =
|
proc getPart(ma: MultiAddress, index: int): MaResult[MultiAddress] =
|
||||||
var header: uint64
|
var header: uint64
|
||||||
var data = newSeq[byte]()
|
var data = newSeq[byte]()
|
||||||
@ -558,6 +603,9 @@ proc getPart(ma: MultiAddress, index: int): MaResult[MultiAddress] =
|
|||||||
var vb = ma
|
var vb = ma
|
||||||
var res: MultiAddress
|
var res: MultiAddress
|
||||||
res.data = initVBuffer()
|
res.data = initVBuffer()
|
||||||
|
|
||||||
|
if index < 0: return err("multiaddress: negative index gived to getPart")
|
||||||
|
|
||||||
while offset <= index:
|
while offset <= index:
|
||||||
if vb.data.readVarint(header) == -1:
|
if vb.data.readVarint(header) == -1:
|
||||||
return err("multiaddress: Malformed binary address!")
|
return err("multiaddress: Malformed binary address!")
|
||||||
@ -575,7 +623,7 @@ proc getPart(ma: MultiAddress, index: int): MaResult[MultiAddress] =
|
|||||||
res.data.writeVarint(header)
|
res.data.writeVarint(header)
|
||||||
res.data.writeArray(data)
|
res.data.writeArray(data)
|
||||||
res.data.finish()
|
res.data.finish()
|
||||||
elif proto.kind in {Length, Path}:
|
elif proto.kind in {MAKind.Length, Path}:
|
||||||
if vb.data.readSeq(data) == -1:
|
if vb.data.readSeq(data) == -1:
|
||||||
return err("multiaddress: Decoding protocol error")
|
return err("multiaddress: Decoding protocol error")
|
||||||
|
|
||||||
@ -590,9 +638,31 @@ proc getPart(ma: MultiAddress, index: int): MaResult[MultiAddress] =
|
|||||||
inc(offset)
|
inc(offset)
|
||||||
ok(res)
|
ok(res)
|
||||||
|
|
||||||
proc `[]`*(ma: MultiAddress, i: int): MaResult[MultiAddress] {.inline.} =
|
proc getParts[U, V](ma: MultiAddress, slice: HSlice[U, V]): MaResult[MultiAddress] =
|
||||||
|
when slice.a is BackwardsIndex or slice.b is BackwardsIndex:
|
||||||
|
let maLength = ? len(ma)
|
||||||
|
template normalizeIndex(index): int =
|
||||||
|
when index is BackwardsIndex: maLength - int(index)
|
||||||
|
else: int(index)
|
||||||
|
let
|
||||||
|
indexStart = normalizeIndex(slice.a)
|
||||||
|
indexEnd = normalizeIndex(slice.b)
|
||||||
|
var res: MultiAddress
|
||||||
|
for i in indexStart..indexEnd:
|
||||||
|
? res.append(? ma[i])
|
||||||
|
ok(res)
|
||||||
|
|
||||||
|
proc `[]`*(ma: MultiAddress, i: int | BackwardsIndex): MaResult[MultiAddress] {.inline.} =
|
||||||
## Returns part with index ``i`` of MultiAddress ``ma``.
|
## Returns part with index ``i`` of MultiAddress ``ma``.
|
||||||
ma.getPart(i)
|
when i is BackwardsIndex:
|
||||||
|
let maLength = ? len(ma)
|
||||||
|
ma.getPart(maLength - int(i))
|
||||||
|
else:
|
||||||
|
ma.getPart(i)
|
||||||
|
|
||||||
|
proc `[]`*(ma: MultiAddress, slice: HSlice): MaResult[MultiAddress] {.inline.} =
|
||||||
|
## Returns parts with slice ``slice`` of MultiAddress ``ma``.
|
||||||
|
ma.getParts(slice)
|
||||||
|
|
||||||
iterator items*(ma: MultiAddress): MaResult[MultiAddress] =
|
iterator items*(ma: MultiAddress): MaResult[MultiAddress] =
|
||||||
## Iterates over all addresses inside of MultiAddress ``ma``.
|
## Iterates over all addresses inside of MultiAddress ``ma``.
|
||||||
@ -619,7 +689,7 @@ iterator items*(ma: MultiAddress): MaResult[MultiAddress] =
|
|||||||
|
|
||||||
res.data.writeVarint(header)
|
res.data.writeVarint(header)
|
||||||
res.data.writeArray(data)
|
res.data.writeArray(data)
|
||||||
elif proto.kind in {Length, Path}:
|
elif proto.kind in {MAKind.Length, Path}:
|
||||||
if vb.data.readSeq(data) == -1:
|
if vb.data.readSeq(data) == -1:
|
||||||
yield err(MaResult[MultiAddress], "Decoding protocol error")
|
yield err(MaResult[MultiAddress], "Decoding protocol error")
|
||||||
|
|
||||||
@ -630,6 +700,13 @@ iterator items*(ma: MultiAddress): MaResult[MultiAddress] =
|
|||||||
res.data.finish()
|
res.data.finish()
|
||||||
yield ok(MaResult[MultiAddress], res)
|
yield ok(MaResult[MultiAddress], res)
|
||||||
|
|
||||||
|
proc len*(ma: MultiAddress): MaResult[int] =
|
||||||
|
var counter: int
|
||||||
|
for part in ma:
|
||||||
|
if part.isErr: return err(part.error)
|
||||||
|
counter.inc()
|
||||||
|
ok(counter)
|
||||||
|
|
||||||
proc contains*(ma: MultiAddress, codec: MultiCodec): MaResult[bool] {.inline.} =
|
proc contains*(ma: MultiAddress, codec: MultiCodec): MaResult[bool] {.inline.} =
|
||||||
## Returns ``true``, if address with MultiCodec ``codec`` present in
|
## Returns ``true``, if address with MultiCodec ``codec`` present in
|
||||||
## MultiAddress ``ma``.
|
## MultiAddress ``ma``.
|
||||||
@ -710,8 +787,7 @@ proc encode*(mbtype: typedesc[MultiBase], encoding: string,
|
|||||||
proc validate*(ma: MultiAddress): bool =
|
proc validate*(ma: MultiAddress): bool =
|
||||||
## Returns ``true`` if MultiAddress ``ma`` is valid.
|
## Returns ``true`` if MultiAddress ``ma`` is valid.
|
||||||
var header: uint64
|
var header: uint64
|
||||||
var vb: MultiAddress
|
var vb = ma
|
||||||
shcopy(vb, ma)
|
|
||||||
while true:
|
while true:
|
||||||
if vb.data.isEmpty():
|
if vb.data.isEmpty():
|
||||||
break
|
break
|
||||||
@ -1010,6 +1086,9 @@ proc `$`*(pat: MaPattern): string =
|
|||||||
elif pat.operator == Eq:
|
elif pat.operator == Eq:
|
||||||
result = $pat.value
|
result = $pat.value
|
||||||
|
|
||||||
|
proc bytes*(value: MultiAddress): seq[byte] =
|
||||||
|
value.data.buffer
|
||||||
|
|
||||||
proc write*(pb: var ProtoBuffer, field: int, value: MultiAddress) {.inline.} =
|
proc write*(pb: var ProtoBuffer, field: int, value: MultiAddress) {.inline.} =
|
||||||
write(pb, field, value.data.buffer)
|
write(pb, field, value.data.buffer)
|
||||||
|
|
||||||
|
@ -203,6 +203,7 @@ const MultiCodecList = [
|
|||||||
("p2p-webrtc-star", 0x0113), # not in multicodec list
|
("p2p-webrtc-star", 0x0113), # not in multicodec list
|
||||||
("p2p-webrtc-direct", 0x0114), # not in multicodec list
|
("p2p-webrtc-direct", 0x0114), # not in multicodec list
|
||||||
("onion", 0x01BC),
|
("onion", 0x01BC),
|
||||||
|
("onion3", 0x01BD),
|
||||||
("p2p-circuit", 0x0122),
|
("p2p-circuit", 0x0122),
|
||||||
("libp2p-peer-record", 0x0301),
|
("libp2p-peer-record", 0x0301),
|
||||||
("dns", 0x35),
|
("dns", 0x35),
|
||||||
|
@ -12,7 +12,7 @@ when (NimMajor, NimMinor) < (1, 4):
|
|||||||
else:
|
else:
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import std/[strutils, sequtils]
|
import std/[strutils, sequtils, tables]
|
||||||
import chronos, chronicles, stew/byteutils
|
import chronos, chronicles, stew/byteutils
|
||||||
import stream/connection,
|
import stream/connection,
|
||||||
protocols/protocol
|
protocols/protocol
|
||||||
@ -21,7 +21,7 @@ logScope:
|
|||||||
topics = "libp2p multistream"
|
topics = "libp2p multistream"
|
||||||
|
|
||||||
const
|
const
|
||||||
MsgSize* = 64*1024
|
MsgSize* = 1024
|
||||||
Codec* = "/multistream/1.0.0"
|
Codec* = "/multistream/1.0.0"
|
||||||
|
|
||||||
MSCodec* = "\x13" & Codec & "\n"
|
MSCodec* = "\x13" & Codec & "\n"
|
||||||
@ -33,17 +33,20 @@ type
|
|||||||
|
|
||||||
MultiStreamError* = object of LPError
|
MultiStreamError* = object of LPError
|
||||||
|
|
||||||
HandlerHolder* = object
|
HandlerHolder* = ref object
|
||||||
protos*: seq[string]
|
protos*: seq[string]
|
||||||
protocol*: LPProtocol
|
protocol*: LPProtocol
|
||||||
match*: Matcher
|
match*: Matcher
|
||||||
|
openedStreams: CountTable[PeerId]
|
||||||
|
|
||||||
MultistreamSelect* = ref object of RootObj
|
MultistreamSelect* = ref object of RootObj
|
||||||
handlers*: seq[HandlerHolder]
|
handlers*: seq[HandlerHolder]
|
||||||
codec*: string
|
codec*: string
|
||||||
|
|
||||||
proc new*(T: typedesc[MultistreamSelect]): T =
|
proc new*(T: typedesc[MultistreamSelect]): T =
|
||||||
T(codec: MSCodec)
|
T(
|
||||||
|
codec: MSCodec,
|
||||||
|
)
|
||||||
|
|
||||||
template validateSuffix(str: string): untyped =
|
template validateSuffix(str: string): untyped =
|
||||||
if str.endsWith("\n"):
|
if str.endsWith("\n"):
|
||||||
@ -169,9 +172,22 @@ proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.asy
|
|||||||
for h in m.handlers:
|
for h in m.handlers:
|
||||||
if (not isNil(h.match) and h.match(ms)) or h.protos.contains(ms):
|
if (not isNil(h.match) and h.match(ms)) or h.protos.contains(ms):
|
||||||
trace "found handler", conn, protocol = ms
|
trace "found handler", conn, protocol = ms
|
||||||
await conn.writeLp(ms & "\n")
|
|
||||||
conn.protocol = ms
|
var protocolHolder = h
|
||||||
await h.protocol.handler(conn, ms)
|
let maxIncomingStreams = protocolHolder.protocol.maxIncomingStreams
|
||||||
|
if protocolHolder.openedStreams.getOrDefault(conn.peerId) >= maxIncomingStreams:
|
||||||
|
debug "Max streams for protocol reached, blocking new stream",
|
||||||
|
conn, protocol = ms, maxIncomingStreams
|
||||||
|
return
|
||||||
|
protocolHolder.openedStreams.inc(conn.peerId)
|
||||||
|
try:
|
||||||
|
await conn.writeLp(ms & "\n")
|
||||||
|
conn.protocol = ms
|
||||||
|
await protocolHolder.protocol.handler(conn, ms)
|
||||||
|
finally:
|
||||||
|
protocolHolder.openedStreams.inc(conn.peerId, -1)
|
||||||
|
if protocolHolder.openedStreams[conn.peerId] == 0:
|
||||||
|
protocolHolder.openedStreams.del(conn.peerId)
|
||||||
return
|
return
|
||||||
debug "no handlers", conn, protocol = ms
|
debug "no handlers", conn, protocol = ms
|
||||||
await conn.write(Na)
|
await conn.write(Na)
|
||||||
|
@ -58,6 +58,8 @@ type
|
|||||||
initiator*: bool # initiated remotely or locally flag
|
initiator*: bool # initiated remotely or locally flag
|
||||||
isOpen*: bool # has channel been opened
|
isOpen*: bool # has channel been opened
|
||||||
closedLocal*: bool # has channel been closed locally
|
closedLocal*: bool # has channel been closed locally
|
||||||
|
remoteReset*: bool # has channel been remotely reset
|
||||||
|
localReset*: bool # has channel been reset locally
|
||||||
msgCode*: MessageType # cached in/out message code
|
msgCode*: MessageType # cached in/out message code
|
||||||
closeCode*: MessageType # cached in/out close code
|
closeCode*: MessageType # cached in/out close code
|
||||||
resetCode*: MessageType # cached in/out reset code
|
resetCode*: MessageType # cached in/out reset code
|
||||||
@ -103,6 +105,7 @@ proc reset*(s: LPChannel) {.async, gcsafe.} =
|
|||||||
|
|
||||||
s.isClosed = true
|
s.isClosed = true
|
||||||
s.closedLocal = true
|
s.closedLocal = true
|
||||||
|
s.localReset = not s.remoteReset
|
||||||
|
|
||||||
trace "Resetting channel", s, len = s.len
|
trace "Resetting channel", s, len = s.len
|
||||||
|
|
||||||
@ -168,6 +171,14 @@ method readOnce*(s: LPChannel,
|
|||||||
## channels are blocked - in particular, this means that reading from one
|
## channels are blocked - in particular, this means that reading from one
|
||||||
## channel must not be done from within a callback / read handler of another
|
## channel must not be done from within a callback / read handler of another
|
||||||
## or the reads will lock each other.
|
## or the reads will lock each other.
|
||||||
|
if s.remoteReset:
|
||||||
|
raise newLPStreamResetError()
|
||||||
|
if s.localReset:
|
||||||
|
raise newLPStreamClosedError()
|
||||||
|
if s.atEof():
|
||||||
|
raise newLPStreamRemoteClosedError()
|
||||||
|
if s.conn.closed:
|
||||||
|
raise newLPStreamConnDownError()
|
||||||
try:
|
try:
|
||||||
let bytes = await procCall BufferStream(s).readOnce(pbytes, nbytes)
|
let bytes = await procCall BufferStream(s).readOnce(pbytes, nbytes)
|
||||||
when defined(libp2p_network_protocols_metrics):
|
when defined(libp2p_network_protocols_metrics):
|
||||||
@ -184,13 +195,17 @@ method readOnce*(s: LPChannel,
|
|||||||
# data has been lost in s.readBuf and there's no way to gracefully recover /
|
# data has been lost in s.readBuf and there's no way to gracefully recover /
|
||||||
# use the channel any more
|
# use the channel any more
|
||||||
await s.reset()
|
await s.reset()
|
||||||
raise exc
|
raise newLPStreamConnDownError(exc)
|
||||||
|
|
||||||
proc prepareWrite(s: LPChannel, msg: seq[byte]): Future[void] {.async.} =
|
proc prepareWrite(s: LPChannel, msg: seq[byte]): Future[void] {.async.} =
|
||||||
# prepareWrite is the slow path of writing a message - see conditions in
|
# prepareWrite is the slow path of writing a message - see conditions in
|
||||||
# write
|
# write
|
||||||
if s.closedLocal or s.conn.closed:
|
if s.remoteReset:
|
||||||
|
raise newLPStreamResetError()
|
||||||
|
if s.closedLocal:
|
||||||
raise newLPStreamClosedError()
|
raise newLPStreamClosedError()
|
||||||
|
if s.conn.closed:
|
||||||
|
raise newLPStreamConnDownError()
|
||||||
|
|
||||||
if msg.len == 0:
|
if msg.len == 0:
|
||||||
return
|
return
|
||||||
@ -235,7 +250,7 @@ proc completeWrite(
|
|||||||
trace "exception in lpchannel write handler", s, msg = exc.msg
|
trace "exception in lpchannel write handler", s, msg = exc.msg
|
||||||
await s.reset()
|
await s.reset()
|
||||||
await s.conn.close()
|
await s.conn.close()
|
||||||
raise exc
|
raise newLPStreamConnDownError(exc)
|
||||||
finally:
|
finally:
|
||||||
s.writes -= 1
|
s.writes -= 1
|
||||||
|
|
||||||
|
@ -183,6 +183,7 @@ method handle*(m: Mplex) {.async, gcsafe.} =
|
|||||||
of MessageType.CloseIn, MessageType.CloseOut:
|
of MessageType.CloseIn, MessageType.CloseOut:
|
||||||
await channel.pushEof()
|
await channel.pushEof()
|
||||||
of MessageType.ResetIn, MessageType.ResetOut:
|
of MessageType.ResetIn, MessageType.ResetOut:
|
||||||
|
channel.remoteReset = true
|
||||||
await channel.reset()
|
await channel.reset()
|
||||||
except CancelledError:
|
except CancelledError:
|
||||||
debug "Unexpected cancellation in mplex handler", m
|
debug "Unexpected cancellation in mplex handler", m
|
||||||
|
@ -153,6 +153,7 @@ type
|
|||||||
sendQueue: seq[ToSend]
|
sendQueue: seq[ToSend]
|
||||||
recvQueue: seq[byte]
|
recvQueue: seq[byte]
|
||||||
isReset: bool
|
isReset: bool
|
||||||
|
remoteReset: bool
|
||||||
closedRemotely: Future[void]
|
closedRemotely: Future[void]
|
||||||
closedLocally: bool
|
closedLocally: bool
|
||||||
receivedData: AsyncEvent
|
receivedData: AsyncEvent
|
||||||
@ -194,23 +195,25 @@ method closeImpl*(channel: YamuxChannel) {.async, gcsafe.} =
|
|||||||
await channel.actuallyClose()
|
await channel.actuallyClose()
|
||||||
|
|
||||||
proc reset(channel: YamuxChannel, isLocal: bool = false) {.async.} =
|
proc reset(channel: YamuxChannel, isLocal: bool = false) {.async.} =
|
||||||
if not channel.isReset:
|
if channel.isReset:
|
||||||
trace "Reset channel"
|
return
|
||||||
channel.isReset = true
|
trace "Reset channel"
|
||||||
for (d, s, fut) in channel.sendQueue:
|
channel.isReset = true
|
||||||
fut.fail(newLPStreamEOFError())
|
channel.remoteReset = not isLocal
|
||||||
channel.sendQueue = @[]
|
for (d, s, fut) in channel.sendQueue:
|
||||||
channel.recvQueue = @[]
|
fut.fail(newLPStreamEOFError())
|
||||||
channel.sendWindow = 0
|
channel.sendQueue = @[]
|
||||||
if not channel.closedLocally:
|
channel.recvQueue = @[]
|
||||||
if isLocal:
|
channel.sendWindow = 0
|
||||||
try: await channel.conn.write(YamuxHeader.data(channel.id, 0, {Rst}))
|
if not channel.closedLocally:
|
||||||
except LPStreamEOFError as exc: discard
|
if isLocal:
|
||||||
except LPStreamClosedError as exc: discard
|
try: await channel.conn.write(YamuxHeader.data(channel.id, 0, {Rst}))
|
||||||
await channel.close()
|
except LPStreamEOFError as exc: discard
|
||||||
if not channel.closedRemotely.done():
|
except LPStreamClosedError as exc: discard
|
||||||
await channel.remoteClosed()
|
await channel.close()
|
||||||
channel.receivedData.fire()
|
if not channel.closedRemotely.done():
|
||||||
|
await channel.remoteClosed()
|
||||||
|
channel.receivedData.fire()
|
||||||
if not isLocal:
|
if not isLocal:
|
||||||
# If we reset locally, we want to flush up to a maximum of recvWindow
|
# If we reset locally, we want to flush up to a maximum of recvWindow
|
||||||
# bytes. We use the recvWindow in the proc cleanupChann.
|
# bytes. We use the recvWindow in the proc cleanupChann.
|
||||||
@ -235,7 +238,15 @@ method readOnce*(
|
|||||||
nbytes: int):
|
nbytes: int):
|
||||||
Future[int] {.async.} =
|
Future[int] {.async.} =
|
||||||
|
|
||||||
if channel.returnedEof: raise newLPStreamEOFError()
|
if channel.isReset:
|
||||||
|
raise if channel.remoteReset:
|
||||||
|
newLPStreamResetError()
|
||||||
|
elif channel.closedLocally:
|
||||||
|
newLPStreamClosedError()
|
||||||
|
else:
|
||||||
|
newLPStreamConnDownError()
|
||||||
|
if channel.returnedEof:
|
||||||
|
raise newLPStreamRemoteClosedError()
|
||||||
if channel.recvQueue.len == 0:
|
if channel.recvQueue.len == 0:
|
||||||
channel.receivedData.clear()
|
channel.receivedData.clear()
|
||||||
await channel.closedRemotely or channel.receivedData.wait()
|
await channel.closedRemotely or channel.receivedData.wait()
|
||||||
@ -313,8 +324,9 @@ proc trySend(channel: YamuxChannel) {.async.} =
|
|||||||
channel.sendWindow.dec(toSend)
|
channel.sendWindow.dec(toSend)
|
||||||
try: await channel.conn.write(sendBuffer)
|
try: await channel.conn.write(sendBuffer)
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
|
let connDown = newLPStreamConnDownError(exc)
|
||||||
for fut in futures.items():
|
for fut in futures.items():
|
||||||
fut.fail(exc)
|
fut.fail(connDown)
|
||||||
await channel.reset()
|
await channel.reset()
|
||||||
break
|
break
|
||||||
for fut in futures.items():
|
for fut in futures.items():
|
||||||
@ -323,8 +335,11 @@ proc trySend(channel: YamuxChannel) {.async.} =
|
|||||||
|
|
||||||
method write*(channel: YamuxChannel, msg: seq[byte]): Future[void] =
|
method write*(channel: YamuxChannel, msg: seq[byte]): Future[void] =
|
||||||
result = newFuture[void]("Yamux Send")
|
result = newFuture[void]("Yamux Send")
|
||||||
|
if channel.remoteReset:
|
||||||
|
result.fail(newLPStreamResetError())
|
||||||
|
return result
|
||||||
if channel.closedLocally or channel.isReset:
|
if channel.closedLocally or channel.isReset:
|
||||||
result.fail(newLPStreamEOFError())
|
result.fail(newLPStreamClosedError())
|
||||||
return result
|
return result
|
||||||
if msg.len == 0:
|
if msg.len == 0:
|
||||||
result.complete()
|
result.complete()
|
||||||
@ -396,9 +411,11 @@ method close*(m: Yamux) {.async.} =
|
|||||||
m.isClosed = true
|
m.isClosed = true
|
||||||
|
|
||||||
trace "Closing yamux"
|
trace "Closing yamux"
|
||||||
for channel in m.channels.values:
|
let channels = toSeq(m.channels.values())
|
||||||
await channel.reset()
|
for channel in channels:
|
||||||
await m.connection.write(YamuxHeader.goAway(NormalTermination))
|
await channel.reset(true)
|
||||||
|
try: await m.connection.write(YamuxHeader.goAway(NormalTermination))
|
||||||
|
except CatchableError as exc: trace "failed to send goAway", msg=exc.msg
|
||||||
await m.connection.close()
|
await m.connection.close()
|
||||||
trace "Closed yamux"
|
trace "Closed yamux"
|
||||||
|
|
||||||
@ -453,8 +470,9 @@ method handle*(m: Yamux) {.async, gcsafe.} =
|
|||||||
m.flushed[header.streamId].dec(int(header.length))
|
m.flushed[header.streamId].dec(int(header.length))
|
||||||
if m.flushed[header.streamId] < 0:
|
if m.flushed[header.streamId] < 0:
|
||||||
raise newException(YamuxError, "Peer exhausted the recvWindow after reset")
|
raise newException(YamuxError, "Peer exhausted the recvWindow after reset")
|
||||||
var buffer = newSeqUninitialized[byte](header.length)
|
if header.length > 0:
|
||||||
await m.connection.readExactly(addr buffer[0], int(header.length))
|
var buffer = newSeqUninitialized[byte](header.length)
|
||||||
|
await m.connection.readExactly(addr buffer[0], int(header.length))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
let channel = m.channels[header.streamId]
|
let channel = m.channels[header.streamId]
|
||||||
|
@ -14,7 +14,7 @@ else:
|
|||||||
|
|
||||||
import
|
import
|
||||||
std/[streams, strutils, sets, sequtils],
|
std/[streams, strutils, sets, sequtils],
|
||||||
chronos, chronicles,
|
chronos, chronicles, stew/byteutils,
|
||||||
dnsclientpkg/[protocol, types]
|
dnsclientpkg/[protocol, types]
|
||||||
|
|
||||||
import
|
import
|
||||||
@ -76,15 +76,11 @@ proc getDnsResponse(
|
|||||||
if not receivedDataFuture.finished:
|
if not receivedDataFuture.finished:
|
||||||
raise newException(IOError, "DNS server timeout")
|
raise newException(IOError, "DNS server timeout")
|
||||||
|
|
||||||
var
|
let rawResponse = sock.getMessage()
|
||||||
rawResponse = sock.getMessage()
|
|
||||||
dataStream = newStringStream()
|
|
||||||
dataStream.writeData(addr rawResponse[0], rawResponse.len)
|
|
||||||
dataStream.setPosition(0)
|
|
||||||
# parseResponse can has a raises: [Exception, ..] because of
|
# parseResponse can has a raises: [Exception, ..] because of
|
||||||
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
|
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
|
||||||
# it can't actually raise though
|
# it can't actually raise though
|
||||||
return parseResponse(dataStream)
|
return parseResponse(string.fromBytes(rawResponse))
|
||||||
except CatchableError as exc: raise exc
|
except CatchableError as exc: raise exc
|
||||||
except Exception as exc: raiseAssert exc.msg
|
except Exception as exc: raiseAssert exc.msg
|
||||||
finally:
|
finally:
|
||||||
@ -118,7 +114,14 @@ method resolveIp*(
|
|||||||
try:
|
try:
|
||||||
let resp = await fut
|
let resp = await fut
|
||||||
for answer in resp.answers:
|
for answer in resp.answers:
|
||||||
resolvedAddresses.incl(answer.toString())
|
# toString can has a raises: [Exception, ..] because of
|
||||||
|
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
|
||||||
|
# it can't actually raise though
|
||||||
|
resolvedAddresses.incl(
|
||||||
|
try: answer.toString()
|
||||||
|
except CatchableError as exc: raise exc
|
||||||
|
except Exception as exc: raiseAssert exc.msg
|
||||||
|
)
|
||||||
except CancelledError as e:
|
except CancelledError as e:
|
||||||
raise e
|
raise e
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
@ -158,6 +161,11 @@ method resolveTxt*(
|
|||||||
self.nameServers.add(self.nameServers[0])
|
self.nameServers.add(self.nameServers[0])
|
||||||
self.nameServers.delete(0)
|
self.nameServers.delete(0)
|
||||||
continue
|
continue
|
||||||
|
except Exception as e:
|
||||||
|
# toString can has a raises: [Exception, ..] because of
|
||||||
|
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
|
||||||
|
# it can't actually raise though
|
||||||
|
raiseAssert e.msg
|
||||||
|
|
||||||
debug "Failed to resolve TXT, returning empty set"
|
debug "Failed to resolve TXT, returning empty set"
|
||||||
return @[]
|
return @[]
|
||||||
|
@ -44,11 +44,13 @@ method resolveIp*(
|
|||||||
doAssert(false, "Not implemented!")
|
doAssert(false, "Not implemented!")
|
||||||
|
|
||||||
proc getHostname*(ma: MultiAddress): string =
|
proc getHostname*(ma: MultiAddress): string =
|
||||||
let firstPart = ($ma[0].get()).split('/')
|
let
|
||||||
if firstPart.len > 1: firstPart[2]
|
firstPart = ma[0].valueOr: return ""
|
||||||
|
fpSplitted = ($firstPart).split('/', 2)
|
||||||
|
if fpSplitted.len > 2: fpSplitted[2]
|
||||||
else: ""
|
else: ""
|
||||||
|
|
||||||
proc resolveDnsAddress(
|
proc resolveOneAddress(
|
||||||
self: NameResolver,
|
self: NameResolver,
|
||||||
ma: MultiAddress,
|
ma: MultiAddress,
|
||||||
domain: Domain = Domain.AF_UNSPEC,
|
domain: Domain = Domain.AF_UNSPEC,
|
||||||
@ -69,24 +71,17 @@ proc resolveDnsAddress(
|
|||||||
for address in resolvedAddresses:
|
for address in resolvedAddresses:
|
||||||
var createdAddress = MultiAddress.init(address).tryGet()[0].tryGet()
|
var createdAddress = MultiAddress.init(address).tryGet()[0].tryGet()
|
||||||
for part in ma:
|
for part in ma:
|
||||||
if DNS.match(part.get()): continue
|
if DNS.match(part.tryGet()): continue
|
||||||
createdAddress &= part.tryGet()
|
createdAddress &= part.tryGet()
|
||||||
createdAddress
|
createdAddress
|
||||||
|
|
||||||
func matchDnsSuffix(m1, m2: MultiAddress): MaResult[bool] =
|
proc resolveDnsAddr*(
|
||||||
for partMaybe in m1:
|
|
||||||
let part = ?partMaybe
|
|
||||||
if DNS.match(part): continue
|
|
||||||
let entryProt = ?m2[?part.protoCode()]
|
|
||||||
if entryProt != part:
|
|
||||||
return ok(false)
|
|
||||||
return ok(true)
|
|
||||||
|
|
||||||
proc resolveDnsAddr(
|
|
||||||
self: NameResolver,
|
self: NameResolver,
|
||||||
ma: MultiAddress,
|
ma: MultiAddress,
|
||||||
depth: int = 0): Future[seq[MultiAddress]]
|
depth: int = 0): Future[seq[MultiAddress]] {.async.} =
|
||||||
{.async.} =
|
|
||||||
|
if not DNSADDR.matchPartial(ma):
|
||||||
|
return @[ma]
|
||||||
|
|
||||||
trace "Resolving dnsaddr", ma
|
trace "Resolving dnsaddr", ma
|
||||||
if depth > 6:
|
if depth > 6:
|
||||||
@ -104,21 +99,17 @@ proc resolveDnsAddr(
|
|||||||
if not entry.startsWith("dnsaddr="): continue
|
if not entry.startsWith("dnsaddr="): continue
|
||||||
let entryValue = MultiAddress.init(entry[8..^1]).tryGet()
|
let entryValue = MultiAddress.init(entry[8..^1]).tryGet()
|
||||||
|
|
||||||
if not matchDnsSuffix(ma, entryValue).tryGet(): continue
|
if entryValue.contains(multiCodec("p2p")).tryGet() and ma.contains(multiCodec("p2p")).tryGet():
|
||||||
|
if entryValue[multiCodec("p2p")] != ma[multiCodec("p2p")]:
|
||||||
|
continue
|
||||||
|
|
||||||
# The spec is not clear wheter only DNSADDR can be recursived
|
let resolved = await self.resolveDnsAddr(entryValue, depth + 1)
|
||||||
# or any DNS addr. Only handling DNSADDR because it's simpler
|
for r in resolved:
|
||||||
# to avoid infinite recursion
|
result.add(r)
|
||||||
if DNSADDR.matchPartial(entryValue):
|
|
||||||
let resolved = await self.resolveDnsAddr(entryValue, depth + 1)
|
|
||||||
for r in resolved:
|
|
||||||
result.add(r)
|
|
||||||
else:
|
|
||||||
result.add(entryValue)
|
|
||||||
|
|
||||||
if result.len == 0:
|
if result.len == 0:
|
||||||
debug "Failed to resolve any DNSADDR", ma
|
debug "Failed to resolve a DNSADDR", ma
|
||||||
return @[ma]
|
return @[]
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
@ -133,14 +124,15 @@ proc resolveMAddress*(
|
|||||||
let code = address[0].get().protoCode().get()
|
let code = address[0].get().protoCode().get()
|
||||||
let seq = case code:
|
let seq = case code:
|
||||||
of multiCodec("dns"):
|
of multiCodec("dns"):
|
||||||
await self.resolveDnsAddress(address)
|
await self.resolveOneAddress(address)
|
||||||
of multiCodec("dns4"):
|
of multiCodec("dns4"):
|
||||||
await self.resolveDnsAddress(address, Domain.AF_INET)
|
await self.resolveOneAddress(address, Domain.AF_INET)
|
||||||
of multiCodec("dns6"):
|
of multiCodec("dns6"):
|
||||||
await self.resolveDnsAddress(address, Domain.AF_INET6)
|
await self.resolveOneAddress(address, Domain.AF_INET6)
|
||||||
of multiCodec("dnsaddr"):
|
of multiCodec("dnsaddr"):
|
||||||
await self.resolveDnsAddr(address)
|
await self.resolveDnsAddr(address)
|
||||||
else:
|
else:
|
||||||
|
doAssert false
|
||||||
@[address]
|
@[address]
|
||||||
for ad in seq:
|
for ad in seq:
|
||||||
res.incl(ad)
|
res.incl(ad)
|
||||||
|
@ -148,7 +148,7 @@ func init*(pid: var PeerId, data: string): bool =
|
|||||||
if Base58.decode(data, p, length) == Base58Status.Success:
|
if Base58.decode(data, p, length) == Base58Status.Success:
|
||||||
p.setLen(length)
|
p.setLen(length)
|
||||||
var opid: PeerId
|
var opid: PeerId
|
||||||
shallowCopy(opid.data, p)
|
opid.data = p
|
||||||
if opid.validate():
|
if opid.validate():
|
||||||
pid = opid
|
pid = opid
|
||||||
result = true
|
result = true
|
||||||
@ -184,6 +184,11 @@ func init*(t: typedesc[PeerId], seckey: PrivateKey): Result[PeerId, cstring] =
|
|||||||
## Create new peer id from private key ``seckey``.
|
## Create new peer id from private key ``seckey``.
|
||||||
PeerId.init(? seckey.getPublicKey().orError(cstring("invalid private key")))
|
PeerId.init(? seckey.getPublicKey().orError(cstring("invalid private key")))
|
||||||
|
|
||||||
|
proc random*(t: typedesc[PeerId], rng = newRng()): Result[PeerId, cstring] =
|
||||||
|
## Create new peer id with random public key.
|
||||||
|
let randomKey = PrivateKey.random(Secp256k1, rng[])[]
|
||||||
|
PeerId.init(randomKey).orError(cstring("failed to generate random key"))
|
||||||
|
|
||||||
func match*(pid: PeerId, pubkey: PublicKey): bool =
|
func match*(pid: PeerId, pubkey: PublicKey): bool =
|
||||||
## Returns ``true`` if ``pid`` matches public key ``pubkey``.
|
## Returns ``true`` if ``pid`` matches public key ``pubkey``.
|
||||||
let p = PeerId.init(pubkey)
|
let p = PeerId.init(pubkey)
|
||||||
|
@ -15,18 +15,24 @@ else:
|
|||||||
|
|
||||||
import std/[options, sequtils]
|
import std/[options, sequtils]
|
||||||
import pkg/[chronos, chronicles, stew/results]
|
import pkg/[chronos, chronicles, stew/results]
|
||||||
import peerid, multiaddress, crypto/crypto, routing_record, errors, utility
|
import peerid, multiaddress, multicodec, crypto/crypto, routing_record, errors, utility
|
||||||
|
|
||||||
export peerid, multiaddress, crypto, routing_record, errors, results
|
export peerid, multiaddress, crypto, routing_record, errors, results
|
||||||
|
|
||||||
## Our local peer info
|
## Our local peer info
|
||||||
|
|
||||||
type
|
type
|
||||||
PeerInfoError* = LPError
|
PeerInfoError* = object of LPError
|
||||||
|
|
||||||
|
AddressMapper* =
|
||||||
|
proc(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]]
|
||||||
|
{.gcsafe, raises: [Defect].}
|
||||||
|
|
||||||
PeerInfo* {.public.} = ref object
|
PeerInfo* {.public.} = ref object
|
||||||
peerId*: PeerId
|
peerId*: PeerId
|
||||||
addrs*: seq[MultiAddress]
|
listenAddrs*: seq[MultiAddress]
|
||||||
|
addrs: seq[MultiAddress]
|
||||||
|
addressMappers*: seq[AddressMapper]
|
||||||
protocols*: seq[string]
|
protocols*: seq[string]
|
||||||
protoVersion*: string
|
protoVersion*: string
|
||||||
agentVersion*: string
|
agentVersion*: string
|
||||||
@ -37,6 +43,7 @@ type
|
|||||||
func shortLog*(p: PeerInfo): auto =
|
func shortLog*(p: PeerInfo): auto =
|
||||||
(
|
(
|
||||||
peerId: $p.peerId,
|
peerId: $p.peerId,
|
||||||
|
listenAddrs: mapIt(p.listenAddrs, $it),
|
||||||
addrs: mapIt(p.addrs, $it),
|
addrs: mapIt(p.addrs, $it),
|
||||||
protocols: mapIt(p.protocols, $it),
|
protocols: mapIt(p.protocols, $it),
|
||||||
protoVersion: p.protoVersion,
|
protoVersion: p.protoVersion,
|
||||||
@ -44,7 +51,11 @@ func shortLog*(p: PeerInfo): auto =
|
|||||||
)
|
)
|
||||||
chronicles.formatIt(PeerInfo): shortLog(it)
|
chronicles.formatIt(PeerInfo): shortLog(it)
|
||||||
|
|
||||||
proc update*(p: PeerInfo) =
|
proc update*(p: PeerInfo) {.async.} =
|
||||||
|
p.addrs = p.listenAddrs
|
||||||
|
for mapper in p.addressMappers:
|
||||||
|
p.addrs = await mapper(p.addrs)
|
||||||
|
|
||||||
let sprRes = SignedPeerRecord.init(
|
let sprRes = SignedPeerRecord.init(
|
||||||
p.privateKey,
|
p.privateKey,
|
||||||
PeerRecord.init(p.peerId, p.addrs)
|
PeerRecord.init(p.peerId, p.addrs)
|
||||||
@ -55,14 +66,40 @@ proc update*(p: PeerInfo) =
|
|||||||
discard
|
discard
|
||||||
#info "Can't update the signed peer record"
|
#info "Can't update the signed peer record"
|
||||||
|
|
||||||
|
proc addrs*(p: PeerInfo): seq[MultiAddress] =
|
||||||
|
p.addrs
|
||||||
|
|
||||||
|
proc fullAddrs*(p: PeerInfo): MaResult[seq[MultiAddress]] =
|
||||||
|
let peerIdPart = ? MultiAddress.init(multiCodec("p2p"), p.peerId.data)
|
||||||
|
var res: seq[MultiAddress]
|
||||||
|
for address in p.addrs:
|
||||||
|
res.add(? concat(address, peerIdPart))
|
||||||
|
ok(res)
|
||||||
|
|
||||||
|
proc parseFullAddress*(ma: MultiAddress): MaResult[(PeerId, MultiAddress)] =
|
||||||
|
let p2pPart = ? ma[^1]
|
||||||
|
if ? p2pPart.protoCode != multiCodec("p2p"):
|
||||||
|
return err("Missing p2p part from multiaddress!")
|
||||||
|
|
||||||
|
let res = (
|
||||||
|
? PeerId.init(? p2pPart.protoArgument()).orErr("invalid peerid"),
|
||||||
|
? ma[0 .. ^2]
|
||||||
|
)
|
||||||
|
ok(res)
|
||||||
|
|
||||||
|
proc parseFullAddress*(ma: string | seq[byte]): MaResult[(PeerId, MultiAddress)] =
|
||||||
|
parseFullAddress(? MultiAddress.init(ma))
|
||||||
|
|
||||||
proc new*(
|
proc new*(
|
||||||
p: typedesc[PeerInfo],
|
p: typedesc[PeerInfo],
|
||||||
key: PrivateKey,
|
key: PrivateKey,
|
||||||
addrs: openArray[MultiAddress] = [],
|
listenAddrs: openArray[MultiAddress] = [],
|
||||||
protocols: openArray[string] = [],
|
protocols: openArray[string] = [],
|
||||||
protoVersion: string = "",
|
protoVersion: string = "",
|
||||||
agentVersion: string = ""): PeerInfo
|
agentVersion: string = "",
|
||||||
{.raises: [Defect, PeerInfoError].} =
|
addressMappers = newSeq[AddressMapper](),
|
||||||
|
): PeerInfo
|
||||||
|
{.raises: [Defect, LPError].} =
|
||||||
|
|
||||||
let pubkey = try:
|
let pubkey = try:
|
||||||
key.getPublicKey().tryGet()
|
key.getPublicKey().tryGet()
|
||||||
@ -77,10 +114,9 @@ proc new*(
|
|||||||
privateKey: key,
|
privateKey: key,
|
||||||
protoVersion: protoVersion,
|
protoVersion: protoVersion,
|
||||||
agentVersion: agentVersion,
|
agentVersion: agentVersion,
|
||||||
addrs: @addrs,
|
listenAddrs: @listenAddrs,
|
||||||
protocols: @protocols,
|
protocols: @protocols,
|
||||||
|
addressMappers: addressMappers
|
||||||
)
|
)
|
||||||
|
|
||||||
peerInfo.update()
|
|
||||||
|
|
||||||
return peerInfo
|
return peerInfo
|
||||||
|
@ -16,8 +16,7 @@ runnableExamples:
|
|||||||
# Create a custom book type
|
# Create a custom book type
|
||||||
type MoodBook = ref object of PeerBook[string]
|
type MoodBook = ref object of PeerBook[string]
|
||||||
|
|
||||||
var somePeerId: PeerId
|
var somePeerId = PeerId.random().get()
|
||||||
discard somePeerId.init("")
|
|
||||||
|
|
||||||
peerStore[MoodBook][somePeerId] = "Happy"
|
peerStore[MoodBook][somePeerId] = "Happy"
|
||||||
doAssert peerStore[MoodBook][somePeerId] == "Happy"
|
doAssert peerStore[MoodBook][somePeerId] == "Happy"
|
||||||
@ -153,6 +152,9 @@ proc updatePeerInfo*(
|
|||||||
if info.addrs.len > 0:
|
if info.addrs.len > 0:
|
||||||
peerStore[AddressBook][info.peerId] = info.addrs
|
peerStore[AddressBook][info.peerId] = info.addrs
|
||||||
|
|
||||||
|
if info.pubkey.isSome:
|
||||||
|
peerStore[KeyBook][info.peerId] = info.pubkey.get()
|
||||||
|
|
||||||
if info.agentVersion.isSome:
|
if info.agentVersion.isSome:
|
||||||
peerStore[AgentBook][info.peerId] = info.agentVersion.get().string
|
peerStore[AgentBook][info.peerId] = info.agentVersion.get().string
|
||||||
|
|
||||||
|
@ -19,8 +19,7 @@ export results, utility
|
|||||||
|
|
||||||
{.push public.}
|
{.push public.}
|
||||||
|
|
||||||
const
|
const MaxMessageSize = 1'u shl 22
|
||||||
MaxMessageSize* = 1'u shl 22
|
|
||||||
|
|
||||||
type
|
type
|
||||||
ProtoFieldKind* = enum
|
ProtoFieldKind* = enum
|
||||||
@ -37,6 +36,7 @@ type
|
|||||||
buffer*: seq[byte]
|
buffer*: seq[byte]
|
||||||
offset*: int
|
offset*: int
|
||||||
length*: int
|
length*: int
|
||||||
|
maxSize*: uint
|
||||||
|
|
||||||
ProtoHeader* = object
|
ProtoHeader* = object
|
||||||
wire*: ProtoFieldKind
|
wire*: ProtoFieldKind
|
||||||
@ -122,23 +122,28 @@ proc vsizeof*(field: ProtoField): int {.inline.} =
|
|||||||
0
|
0
|
||||||
|
|
||||||
proc initProtoBuffer*(data: seq[byte], offset = 0,
|
proc initProtoBuffer*(data: seq[byte], offset = 0,
|
||||||
options: set[ProtoFlags] = {}): ProtoBuffer =
|
options: set[ProtoFlags] = {},
|
||||||
|
maxSize = MaxMessageSize): ProtoBuffer =
|
||||||
## Initialize ProtoBuffer with shallow copy of ``data``.
|
## Initialize ProtoBuffer with shallow copy of ``data``.
|
||||||
shallowCopy(result.buffer, data)
|
result.buffer = data
|
||||||
result.offset = offset
|
result.offset = offset
|
||||||
result.options = options
|
result.options = options
|
||||||
|
result.maxSize = maxSize
|
||||||
|
|
||||||
proc initProtoBuffer*(data: openArray[byte], offset = 0,
|
proc initProtoBuffer*(data: openArray[byte], offset = 0,
|
||||||
options: set[ProtoFlags] = {}): ProtoBuffer =
|
options: set[ProtoFlags] = {},
|
||||||
|
maxSize = MaxMessageSize): ProtoBuffer =
|
||||||
## Initialize ProtoBuffer with copy of ``data``.
|
## Initialize ProtoBuffer with copy of ``data``.
|
||||||
result.buffer = @data
|
result.buffer = @data
|
||||||
result.offset = offset
|
result.offset = offset
|
||||||
result.options = options
|
result.options = options
|
||||||
|
result.maxSize = maxSize
|
||||||
|
|
||||||
proc initProtoBuffer*(options: set[ProtoFlags] = {}): ProtoBuffer =
|
proc initProtoBuffer*(options: set[ProtoFlags] = {}, maxSize = MaxMessageSize): ProtoBuffer =
|
||||||
## Initialize ProtoBuffer with new sequence of capacity ``cap``.
|
## Initialize ProtoBuffer with new sequence of capacity ``cap``.
|
||||||
result.buffer = newSeq[byte]()
|
result.buffer = newSeq[byte]()
|
||||||
result.options = options
|
result.options = options
|
||||||
|
result.maxSize = maxSize
|
||||||
if WithVarintLength in options:
|
if WithVarintLength in options:
|
||||||
# Our buffer will start from position 10, so we can store length of buffer
|
# Our buffer will start from position 10, so we can store length of buffer
|
||||||
# in [0, 9].
|
# in [0, 9].
|
||||||
@ -335,7 +340,7 @@ proc skipValue(data: var ProtoBuffer, header: ProtoHeader): ProtoResult[void] =
|
|||||||
var bsize = 0'u64
|
var bsize = 0'u64
|
||||||
if PB.getUVarint(data.toOpenArray(), length, bsize).isOk():
|
if PB.getUVarint(data.toOpenArray(), length, bsize).isOk():
|
||||||
data.offset += length
|
data.offset += length
|
||||||
if bsize <= uint64(MaxMessageSize):
|
if bsize <= uint64(data.maxSize):
|
||||||
if data.isEnough(int(bsize)):
|
if data.isEnough(int(bsize)):
|
||||||
data.offset += int(bsize)
|
data.offset += int(bsize)
|
||||||
ok()
|
ok()
|
||||||
@ -399,7 +404,7 @@ proc getValue[T:byte|char](data: var ProtoBuffer, header: ProtoHeader,
|
|||||||
outLength = 0
|
outLength = 0
|
||||||
if PB.getUVarint(data.toOpenArray(), length, bsize).isOk():
|
if PB.getUVarint(data.toOpenArray(), length, bsize).isOk():
|
||||||
data.offset += length
|
data.offset += length
|
||||||
if bsize <= uint64(MaxMessageSize):
|
if bsize <= uint64(data.maxSize):
|
||||||
if data.isEnough(int(bsize)):
|
if data.isEnough(int(bsize)):
|
||||||
outLength = int(bsize)
|
outLength = int(bsize)
|
||||||
if len(outBytes) >= int(bsize):
|
if len(outBytes) >= int(bsize):
|
||||||
@ -427,7 +432,7 @@ proc getValue[T:seq[byte]|string](data: var ProtoBuffer, header: ProtoHeader,
|
|||||||
|
|
||||||
if PB.getUVarint(data.toOpenArray(), length, bsize).isOk():
|
if PB.getUVarint(data.toOpenArray(), length, bsize).isOk():
|
||||||
data.offset += length
|
data.offset += length
|
||||||
if bsize <= uint64(MaxMessageSize):
|
if bsize <= uint64(data.maxSize):
|
||||||
if data.isEnough(int(bsize)):
|
if data.isEnough(int(bsize)):
|
||||||
outBytes.setLen(bsize)
|
outBytes.setLen(bsize)
|
||||||
if bsize > 0'u64:
|
if bsize > 0'u64:
|
||||||
|
324
libp2p/protocols/connectivity/autonat.nim
Normal file
324
libp2p/protocols/connectivity/autonat.nim
Normal file
@ -0,0 +1,324 @@
|
|||||||
|
# Nim-LibP2P
|
||||||
|
# Copyright (c) 2022 Status Research & Development GmbH
|
||||||
|
# Licensed under either of
|
||||||
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||||
|
# at your option.
|
||||||
|
# This file may not be copied, modified, or distributed except according to
|
||||||
|
# those terms.
|
||||||
|
|
||||||
|
when (NimMajor, NimMinor) < (1, 4):
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
else:
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import std/[options, sets, sequtils]
|
||||||
|
import stew/results
|
||||||
|
import chronos, chronicles, stew/objects
|
||||||
|
import ../protocol,
|
||||||
|
../../switch,
|
||||||
|
../../multiaddress,
|
||||||
|
../../multicodec,
|
||||||
|
../../peerid,
|
||||||
|
../../utils/semaphore,
|
||||||
|
../../errors
|
||||||
|
|
||||||
|
logScope:
|
||||||
|
topics = "libp2p autonat"
|
||||||
|
|
||||||
|
const
|
||||||
|
AutonatCodec* = "/libp2p/autonat/1.0.0"
|
||||||
|
AddressLimit = 8
|
||||||
|
|
||||||
|
type
|
||||||
|
AutonatError* = object of LPError
|
||||||
|
AutonatUnreachableError* = object of LPError
|
||||||
|
|
||||||
|
MsgType* = enum
|
||||||
|
Dial = 0
|
||||||
|
DialResponse = 1
|
||||||
|
|
||||||
|
ResponseStatus* = enum
|
||||||
|
Ok = 0
|
||||||
|
DialError = 100
|
||||||
|
DialRefused = 101
|
||||||
|
BadRequest = 200
|
||||||
|
InternalError = 300
|
||||||
|
|
||||||
|
AutonatPeerInfo* = object
|
||||||
|
id*: Option[PeerId]
|
||||||
|
addrs*: seq[MultiAddress]
|
||||||
|
|
||||||
|
AutonatDial* = object
|
||||||
|
peerInfo*: Option[AutonatPeerInfo]
|
||||||
|
|
||||||
|
AutonatDialResponse* = object
|
||||||
|
status*: ResponseStatus
|
||||||
|
text*: Option[string]
|
||||||
|
ma*: Option[MultiAddress]
|
||||||
|
|
||||||
|
AutonatMsg* = object
|
||||||
|
msgType*: MsgType
|
||||||
|
dial*: Option[AutonatDial]
|
||||||
|
response*: Option[AutonatDialResponse]
|
||||||
|
|
||||||
|
proc encode*(msg: AutonatMsg): ProtoBuffer =
|
||||||
|
result = initProtoBuffer()
|
||||||
|
result.write(1, msg.msgType.uint)
|
||||||
|
if msg.dial.isSome():
|
||||||
|
var dial = initProtoBuffer()
|
||||||
|
if msg.dial.get().peerInfo.isSome():
|
||||||
|
var bufferPeerInfo = initProtoBuffer()
|
||||||
|
let peerInfo = msg.dial.get().peerInfo.get()
|
||||||
|
if peerInfo.id.isSome():
|
||||||
|
bufferPeerInfo.write(1, peerInfo.id.get())
|
||||||
|
for ma in peerInfo.addrs:
|
||||||
|
bufferPeerInfo.write(2, ma.data.buffer)
|
||||||
|
bufferPeerInfo.finish()
|
||||||
|
dial.write(1, bufferPeerInfo.buffer)
|
||||||
|
dial.finish()
|
||||||
|
result.write(2, dial.buffer)
|
||||||
|
if msg.response.isSome():
|
||||||
|
var bufferResponse = initProtoBuffer()
|
||||||
|
let response = msg.response.get()
|
||||||
|
bufferResponse.write(1, response.status.uint)
|
||||||
|
if response.text.isSome():
|
||||||
|
bufferResponse.write(2, response.text.get())
|
||||||
|
if response.ma.isSome():
|
||||||
|
bufferResponse.write(3, response.ma.get())
|
||||||
|
bufferResponse.finish()
|
||||||
|
result.write(3, bufferResponse.buffer)
|
||||||
|
result.finish()
|
||||||
|
|
||||||
|
proc encode*(d: AutonatDial): ProtoBuffer =
|
||||||
|
result = initProtoBuffer()
|
||||||
|
result.write(1, MsgType.Dial.uint)
|
||||||
|
var dial = initProtoBuffer()
|
||||||
|
if d.peerInfo.isSome():
|
||||||
|
var bufferPeerInfo = initProtoBuffer()
|
||||||
|
let peerInfo = d.peerInfo.get()
|
||||||
|
if peerInfo.id.isSome():
|
||||||
|
bufferPeerInfo.write(1, peerInfo.id.get())
|
||||||
|
for ma in peerInfo.addrs:
|
||||||
|
bufferPeerInfo.write(2, ma.data.buffer)
|
||||||
|
bufferPeerInfo.finish()
|
||||||
|
dial.write(1, bufferPeerInfo.buffer)
|
||||||
|
dial.finish()
|
||||||
|
result.write(2, dial.buffer)
|
||||||
|
result.finish()
|
||||||
|
|
||||||
|
proc encode*(r: AutonatDialResponse): ProtoBuffer =
|
||||||
|
result = initProtoBuffer()
|
||||||
|
result.write(1, MsgType.DialResponse.uint)
|
||||||
|
var bufferResponse = initProtoBuffer()
|
||||||
|
bufferResponse.write(1, r.status.uint)
|
||||||
|
if r.text.isSome():
|
||||||
|
bufferResponse.write(2, r.text.get())
|
||||||
|
if r.ma.isSome():
|
||||||
|
bufferResponse.write(3, r.ma.get())
|
||||||
|
bufferResponse.finish()
|
||||||
|
result.write(3, bufferResponse.buffer)
|
||||||
|
result.finish()
|
||||||
|
|
||||||
|
proc decode*(_: typedesc[AutonatMsg], buf: seq[byte]): Option[AutonatMsg] =
|
||||||
|
var
|
||||||
|
msgTypeOrd: uint32
|
||||||
|
pbDial: ProtoBuffer
|
||||||
|
pbResponse: ProtoBuffer
|
||||||
|
msg: AutonatMsg
|
||||||
|
|
||||||
|
let
|
||||||
|
pb = initProtoBuffer(buf)
|
||||||
|
r1 = pb.getField(1, msgTypeOrd)
|
||||||
|
r2 = pb.getField(2, pbDial)
|
||||||
|
r3 = pb.getField(3, pbResponse)
|
||||||
|
if r1.isErr() or r2.isErr() or r3.isErr(): return none(AutonatMsg)
|
||||||
|
|
||||||
|
if r1.get() and not checkedEnumAssign(msg.msgType, msgTypeOrd):
|
||||||
|
return none(AutonatMsg)
|
||||||
|
if r2.get():
|
||||||
|
var
|
||||||
|
pbPeerInfo: ProtoBuffer
|
||||||
|
dial: AutonatDial
|
||||||
|
let
|
||||||
|
r4 = pbDial.getField(1, pbPeerInfo)
|
||||||
|
if r4.isErr(): return none(AutonatMsg)
|
||||||
|
|
||||||
|
var peerInfo: AutonatPeerInfo
|
||||||
|
if r4.get():
|
||||||
|
var pid: PeerId
|
||||||
|
let
|
||||||
|
r5 = pbPeerInfo.getField(1, pid)
|
||||||
|
r6 = pbPeerInfo.getRepeatedField(2, peerInfo.addrs)
|
||||||
|
if r5.isErr() or r6.isErr(): return none(AutonatMsg)
|
||||||
|
if r5.get(): peerInfo.id = some(pid)
|
||||||
|
dial.peerInfo = some(peerInfo)
|
||||||
|
msg.dial = some(dial)
|
||||||
|
|
||||||
|
if r3.get():
|
||||||
|
var
|
||||||
|
statusOrd: uint
|
||||||
|
text: string
|
||||||
|
ma: MultiAddress
|
||||||
|
response: AutonatDialResponse
|
||||||
|
|
||||||
|
let
|
||||||
|
r4 = pbResponse.getField(1, statusOrd)
|
||||||
|
r5 = pbResponse.getField(2, text)
|
||||||
|
r6 = pbResponse.getField(3, ma)
|
||||||
|
|
||||||
|
if r4.isErr() or r5.isErr() or r6.isErr() or
|
||||||
|
(r4.get() and not checkedEnumAssign(response.status, statusOrd)):
|
||||||
|
return none(AutonatMsg)
|
||||||
|
if r5.get(): response.text = some(text)
|
||||||
|
if r6.get(): response.ma = some(ma)
|
||||||
|
msg.response = some(response)
|
||||||
|
|
||||||
|
return some(msg)
|
||||||
|
|
||||||
|
proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} =
|
||||||
|
let pb = AutonatDial(peerInfo: some(AutonatPeerInfo(
|
||||||
|
id: some(pid),
|
||||||
|
addrs: addrs
|
||||||
|
))).encode()
|
||||||
|
await conn.writeLp(pb.buffer)
|
||||||
|
|
||||||
|
proc sendResponseError(conn: Connection, status: ResponseStatus, text: string = "") {.async.} =
|
||||||
|
let pb = AutonatDialResponse(
|
||||||
|
status: status,
|
||||||
|
text: if text == "": none(string) else: some(text),
|
||||||
|
ma: none(MultiAddress)
|
||||||
|
).encode()
|
||||||
|
await conn.writeLp(pb.buffer)
|
||||||
|
|
||||||
|
proc sendResponseOk(conn: Connection, ma: MultiAddress) {.async.} =
|
||||||
|
let pb = AutonatDialResponse(
|
||||||
|
status: ResponseStatus.Ok,
|
||||||
|
text: some("Ok"),
|
||||||
|
ma: some(ma)
|
||||||
|
).encode()
|
||||||
|
await conn.writeLp(pb.buffer)
|
||||||
|
|
||||||
|
type
|
||||||
|
Autonat* = ref object of LPProtocol
|
||||||
|
sem: AsyncSemaphore
|
||||||
|
switch*: Switch
|
||||||
|
dialTimeout: Duration
|
||||||
|
|
||||||
|
method dialMe*(a: Autonat, pid: PeerId, addrs: seq[MultiAddress] = newSeq[MultiAddress]()):
|
||||||
|
Future[MultiAddress] {.base, async.} =
|
||||||
|
|
||||||
|
proc getResponseOrRaise(autonatMsg: Option[AutonatMsg]): AutonatDialResponse {.raises: [UnpackError, AutonatError].} =
|
||||||
|
if autonatMsg.isNone() or
|
||||||
|
autonatMsg.get().msgType != DialResponse or
|
||||||
|
autonatMsg.get().response.isNone() or
|
||||||
|
(autonatMsg.get().response.get().status == Ok and
|
||||||
|
autonatMsg.get().response.get().ma.isNone()):
|
||||||
|
raise newException(AutonatError, "Unexpected response")
|
||||||
|
else:
|
||||||
|
autonatMsg.get().response.get()
|
||||||
|
|
||||||
|
let conn =
|
||||||
|
try:
|
||||||
|
if addrs.len == 0:
|
||||||
|
await a.switch.dial(pid, @[AutonatCodec])
|
||||||
|
else:
|
||||||
|
await a.switch.dial(pid, addrs, AutonatCodec)
|
||||||
|
except CatchableError as err:
|
||||||
|
raise newException(AutonatError, "Unexpected error when dialling", err)
|
||||||
|
|
||||||
|
defer: await conn.close()
|
||||||
|
await conn.sendDial(a.switch.peerInfo.peerId, a.switch.peerInfo.addrs)
|
||||||
|
let response = getResponseOrRaise(AutonatMsg.decode(await conn.readLp(1024)))
|
||||||
|
return case response.status:
|
||||||
|
of ResponseStatus.Ok:
|
||||||
|
response.ma.get()
|
||||||
|
of ResponseStatus.DialError:
|
||||||
|
raise newException(AutonatUnreachableError, "Peer could not dial us back")
|
||||||
|
else:
|
||||||
|
raise newException(AutonatError, "Bad status " & $response.status & " " & response.text.get(""))
|
||||||
|
|
||||||
|
proc tryDial(a: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.async.} =
|
||||||
|
try:
|
||||||
|
await a.sem.acquire()
|
||||||
|
let ma = await a.switch.dialer.tryDial(conn.peerId, addrs).wait(a.dialTimeout)
|
||||||
|
if ma.isSome:
|
||||||
|
await conn.sendResponseOk(ma.get())
|
||||||
|
else:
|
||||||
|
await conn.sendResponseError(DialError, "Missing observed address")
|
||||||
|
except CancelledError as exc:
|
||||||
|
raise exc
|
||||||
|
except CatchableError as exc:
|
||||||
|
await conn.sendResponseError(DialError, exc.msg)
|
||||||
|
finally:
|
||||||
|
a.sem.release()
|
||||||
|
|
||||||
|
proc handleDial(a: Autonat, conn: Connection, msg: AutonatMsg): Future[void] =
|
||||||
|
if msg.dial.isNone() or msg.dial.get().peerInfo.isNone():
|
||||||
|
return conn.sendResponseError(BadRequest, "Missing Peer Info")
|
||||||
|
let peerInfo = msg.dial.get().peerInfo.get()
|
||||||
|
if peerInfo.id.isSome() and peerInfo.id.get() != conn.peerId:
|
||||||
|
return conn.sendResponseError(BadRequest, "PeerId mismatch")
|
||||||
|
|
||||||
|
if conn.observedAddr.isNone:
|
||||||
|
return conn.sendResponseError(BadRequest, "Missing observed address")
|
||||||
|
let observedAddr = conn.observedAddr.get()
|
||||||
|
|
||||||
|
var isRelayed = observedAddr.contains(multiCodec("p2p-circuit"))
|
||||||
|
if isRelayed.isErr() or isRelayed.get():
|
||||||
|
return conn.sendResponseError(DialRefused, "Refused to dial a relayed observed address")
|
||||||
|
let hostIp = observedAddr[0]
|
||||||
|
if hostIp.isErr() or not IP.match(hostIp.get()):
|
||||||
|
trace "wrong observed address", address=observedAddr
|
||||||
|
return conn.sendResponseError(InternalError, "Expected an IP address")
|
||||||
|
var addrs = initHashSet[MultiAddress]()
|
||||||
|
addrs.incl(observedAddr)
|
||||||
|
for ma in peerInfo.addrs:
|
||||||
|
isRelayed = ma.contains(multiCodec("p2p-circuit"))
|
||||||
|
if isRelayed.isErr() or isRelayed.get():
|
||||||
|
continue
|
||||||
|
let maFirst = ma[0]
|
||||||
|
if maFirst.isErr() or not IP.match(maFirst.get()):
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
addrs.incl(
|
||||||
|
if maFirst.get() == hostIp.get():
|
||||||
|
ma
|
||||||
|
else:
|
||||||
|
let maEnd = ma[1..^1]
|
||||||
|
if maEnd.isErr(): continue
|
||||||
|
hostIp.get() & maEnd.get()
|
||||||
|
)
|
||||||
|
except LPError as exc:
|
||||||
|
continue
|
||||||
|
if len(addrs) >= AddressLimit:
|
||||||
|
break
|
||||||
|
|
||||||
|
if len(addrs) == 0:
|
||||||
|
return conn.sendResponseError(DialRefused, "No dialable address")
|
||||||
|
return a.tryDial(conn, toSeq(addrs))
|
||||||
|
|
||||||
|
proc new*(T: typedesc[Autonat], switch: Switch, semSize: int = 1, dialTimeout = 15.seconds): T =
|
||||||
|
let autonat = T(switch: switch, sem: newAsyncSemaphore(semSize), dialTimeout: dialTimeout)
|
||||||
|
autonat.init()
|
||||||
|
autonat
|
||||||
|
|
||||||
|
method init*(a: Autonat) =
|
||||||
|
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||||
|
try:
|
||||||
|
let msgOpt = AutonatMsg.decode(await conn.readLp(1024))
|
||||||
|
if msgOpt.isNone() or msgOpt.get().msgType != MsgType.Dial:
|
||||||
|
raise newException(AutonatError, "Received malformed message")
|
||||||
|
let msg = msgOpt.get()
|
||||||
|
await a.handleDial(conn, msg)
|
||||||
|
except CancelledError as exc:
|
||||||
|
raise exc
|
||||||
|
except CatchableError as exc:
|
||||||
|
trace "exception in autonat handler", exc = exc.msg, conn
|
||||||
|
finally:
|
||||||
|
trace "exiting autonat handler", conn
|
||||||
|
await conn.close()
|
||||||
|
|
||||||
|
a.handler = handleStream
|
||||||
|
a.codec = AutonatCodec
|
@ -20,11 +20,12 @@ import ./relay,
|
|||||||
./messages,
|
./messages,
|
||||||
./rconn,
|
./rconn,
|
||||||
./utils,
|
./utils,
|
||||||
../../peerinfo,
|
../../../peerinfo,
|
||||||
../../switch,
|
../../../switch,
|
||||||
../../multiaddress,
|
../../../multiaddress,
|
||||||
../../stream/connection
|
../../../stream/connection
|
||||||
|
|
||||||
|
export options
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "libp2p relay relay-client"
|
topics = "libp2p relay relay-client"
|
@ -14,8 +14,8 @@ else:
|
|||||||
|
|
||||||
import options, macros, sequtils
|
import options, macros, sequtils
|
||||||
import stew/objects
|
import stew/objects
|
||||||
import ../../peerinfo,
|
import ../../../peerinfo,
|
||||||
../../signed_envelope
|
../../../signed_envelope
|
||||||
|
|
||||||
# Circuit Relay V1 Message
|
# Circuit Relay V1 Message
|
||||||
|
|
@ -14,7 +14,7 @@ else:
|
|||||||
|
|
||||||
import chronos
|
import chronos
|
||||||
|
|
||||||
import ../../stream/connection
|
import ../../../stream/connection
|
||||||
|
|
||||||
type
|
type
|
||||||
RelayConnection* = ref object of Connection
|
RelayConnection* = ref object of Connection
|
@ -19,16 +19,16 @@ import chronos, chronicles
|
|||||||
import ./messages,
|
import ./messages,
|
||||||
./rconn,
|
./rconn,
|
||||||
./utils,
|
./utils,
|
||||||
../../peerinfo,
|
../../../peerinfo,
|
||||||
../../switch,
|
../../../switch,
|
||||||
../../multiaddress,
|
../../../multiaddress,
|
||||||
../../multicodec,
|
../../../multicodec,
|
||||||
../../stream/connection,
|
../../../stream/connection,
|
||||||
../../protocols/protocol,
|
../../../protocols/protocol,
|
||||||
../../transports/transport,
|
../../../transports/transport,
|
||||||
../../errors,
|
../../../errors,
|
||||||
../../utils/heartbeat,
|
../../../utils/heartbeat,
|
||||||
../../signed_envelope
|
../../../signed_envelope
|
||||||
|
|
||||||
# TODO:
|
# TODO:
|
||||||
# * Eventually replace std/times by chronos/timer. Currently chronos/timer
|
# * Eventually replace std/times by chronos/timer. Currently chronos/timer
|
@ -19,9 +19,9 @@ import chronos, chronicles
|
|||||||
import ./client,
|
import ./client,
|
||||||
./rconn,
|
./rconn,
|
||||||
./utils,
|
./utils,
|
||||||
../../switch,
|
../../../switch,
|
||||||
../../stream/connection,
|
../../../stream/connection,
|
||||||
../../transports/transport
|
../../../transports/transport
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "libp2p relay relay-transport"
|
topics = "libp2p relay relay-transport"
|
@ -17,7 +17,7 @@ import options
|
|||||||
import chronos, chronicles
|
import chronos, chronicles
|
||||||
|
|
||||||
import ./messages,
|
import ./messages,
|
||||||
../../stream/connection
|
../../../stream/connection
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "libp2p relay relay-utils"
|
topics = "libp2p relay relay-utils"
|
||||||
@ -64,15 +64,17 @@ proc bridge*(connSrc: Connection, connDst: Connection) {.async.} =
|
|||||||
await futSrc or futDst
|
await futSrc or futDst
|
||||||
if futSrc.finished():
|
if futSrc.finished():
|
||||||
bufRead = await futSrc
|
bufRead = await futSrc
|
||||||
bytesSendFromSrcToDst.inc(bufRead)
|
if bufRead > 0:
|
||||||
await connDst.write(@bufSrcToDst[0..<bufRead])
|
bytesSendFromSrcToDst.inc(bufRead)
|
||||||
zeroMem(addr(bufSrcToDst), bufSrcToDst.high + 1)
|
await connDst.write(@bufSrcToDst[0..<bufRead])
|
||||||
|
zeroMem(addr(bufSrcToDst), bufSrcToDst.high + 1)
|
||||||
futSrc = connSrc.readOnce(addr bufSrcToDst[0], bufSrcToDst.high + 1)
|
futSrc = connSrc.readOnce(addr bufSrcToDst[0], bufSrcToDst.high + 1)
|
||||||
if futDst.finished():
|
if futDst.finished():
|
||||||
bufRead = await futDst
|
bufRead = await futDst
|
||||||
bytesSendFromDstToSrc += bufRead
|
if bufRead > 0:
|
||||||
await connSrc.write(bufDstToSrc[0..<bufRead])
|
bytesSendFromDstToSrc += bufRead
|
||||||
zeroMem(addr(bufDstToSrc), bufDstToSrc.high + 1)
|
await connSrc.write(bufDstToSrc[0..<bufRead])
|
||||||
|
zeroMem(addr(bufDstToSrc), bufDstToSrc.high + 1)
|
||||||
futDst = connDst.readOnce(addr bufDstToSrc[0], bufDstToSrc.high + 1)
|
futDst = connDst.readOnce(addr bufDstToSrc[0], bufDstToSrc.high + 1)
|
||||||
except CancelledError as exc:
|
except CancelledError as exc:
|
||||||
raise exc
|
raise exc
|
@ -16,6 +16,7 @@ else:
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import std/[sequtils, options, strutils, sugar]
|
import std/[sequtils, options, strutils, sugar]
|
||||||
|
import stew/results
|
||||||
import chronos, chronicles
|
import chronos, chronicles
|
||||||
import ../protobuf/minprotobuf,
|
import ../protobuf/minprotobuf,
|
||||||
../peerinfo,
|
../peerinfo,
|
||||||
@ -80,7 +81,7 @@ chronicles.expandIt(IdentifyInfo):
|
|||||||
if iinfo.signedPeerRecord.isSome(): "Some"
|
if iinfo.signedPeerRecord.isSome(): "Some"
|
||||||
else: "None"
|
else: "None"
|
||||||
|
|
||||||
proc encodeMsg(peerInfo: PeerInfo, observedAddr: MultiAddress, sendSpr: bool): ProtoBuffer
|
proc encodeMsg(peerInfo: PeerInfo, observedAddr: Opt[MultiAddress], sendSpr: bool): ProtoBuffer
|
||||||
{.raises: [Defect].} =
|
{.raises: [Defect].} =
|
||||||
result = initProtoBuffer()
|
result = initProtoBuffer()
|
||||||
|
|
||||||
@ -91,7 +92,8 @@ proc encodeMsg(peerInfo: PeerInfo, observedAddr: MultiAddress, sendSpr: bool): P
|
|||||||
result.write(2, ma.data.buffer)
|
result.write(2, ma.data.buffer)
|
||||||
for proto in peerInfo.protocols:
|
for proto in peerInfo.protocols:
|
||||||
result.write(3, proto)
|
result.write(3, proto)
|
||||||
result.write(4, observedAddr.data.buffer)
|
if observedAddr.isSome:
|
||||||
|
result.write(4, observedAddr.get().data.buffer)
|
||||||
let protoVersion = ProtoVersion
|
let protoVersion = ProtoVersion
|
||||||
result.write(5, protoVersion)
|
result.write(5, protoVersion)
|
||||||
let agentVersion = if peerInfo.agentVersion.len <= 0:
|
let agentVersion = if peerInfo.agentVersion.len <= 0:
|
||||||
|
@ -12,9 +12,14 @@ when (NimMajor, NimMinor) < (1, 4):
|
|||||||
else:
|
else:
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import chronos
|
import chronos, stew/results
|
||||||
import ../stream/connection
|
import ../stream/connection
|
||||||
|
|
||||||
|
export results
|
||||||
|
|
||||||
|
const
|
||||||
|
DefaultMaxIncomingStreams* = 10
|
||||||
|
|
||||||
type
|
type
|
||||||
LPProtoHandler* = proc (
|
LPProtoHandler* = proc (
|
||||||
conn: Connection,
|
conn: Connection,
|
||||||
@ -26,11 +31,17 @@ type
|
|||||||
codecs*: seq[string]
|
codecs*: seq[string]
|
||||||
handler*: LPProtoHandler ## this handler gets invoked by the protocol negotiator
|
handler*: LPProtoHandler ## this handler gets invoked by the protocol negotiator
|
||||||
started*: bool
|
started*: bool
|
||||||
|
maxIncomingStreams: Opt[int]
|
||||||
|
|
||||||
method init*(p: LPProtocol) {.base, gcsafe.} = discard
|
method init*(p: LPProtocol) {.base, gcsafe.} = discard
|
||||||
method start*(p: LPProtocol) {.async, base.} = p.started = true
|
method start*(p: LPProtocol) {.async, base.} = p.started = true
|
||||||
method stop*(p: LPProtocol) {.async, base.} = p.started = false
|
method stop*(p: LPProtocol) {.async, base.} = p.started = false
|
||||||
|
|
||||||
|
proc maxIncomingStreams*(p: LPProtocol): int =
|
||||||
|
p.maxIncomingStreams.get(DefaultMaxIncomingStreams)
|
||||||
|
|
||||||
|
proc `maxIncomingStreams=`*(p: LPProtocol, val: int) =
|
||||||
|
p.maxIncomingStreams = Opt.some(val)
|
||||||
|
|
||||||
func codec*(p: LPProtocol): string =
|
func codec*(p: LPProtocol): string =
|
||||||
assert(p.codecs.len > 0, "Codecs sequence was empty!")
|
assert(p.codecs.len > 0, "Codecs sequence was empty!")
|
||||||
@ -40,3 +51,16 @@ func `codec=`*(p: LPProtocol, codec: string) =
|
|||||||
# always insert as first codec
|
# always insert as first codec
|
||||||
# if we use this abstraction
|
# if we use this abstraction
|
||||||
p.codecs.insert(codec, 0)
|
p.codecs.insert(codec, 0)
|
||||||
|
|
||||||
|
proc new*(
|
||||||
|
T: type LPProtocol,
|
||||||
|
codecs: seq[string],
|
||||||
|
handler: LPProtoHandler, # default(Opt[int]) or Opt.none(int) don't work on 1.2
|
||||||
|
maxIncomingStreams: Opt[int] | int = Opt[int]()): T =
|
||||||
|
T(
|
||||||
|
codecs: codecs,
|
||||||
|
handler: handler,
|
||||||
|
maxIncomingStreams:
|
||||||
|
when maxIncomingStreams is int: Opt.some(maxIncomingStreams)
|
||||||
|
else: maxIncomingStreams
|
||||||
|
)
|
||||||
|
@ -360,7 +360,7 @@ method rpcHandler*(g: GossipSub,
|
|||||||
template sub: untyped = rpcMsg.subscriptions[i]
|
template sub: untyped = rpcMsg.subscriptions[i]
|
||||||
g.handleSubscribe(peer, sub.topic, sub.subscribe)
|
g.handleSubscribe(peer, sub.topic, sub.subscribe)
|
||||||
|
|
||||||
# the above call applied limtis to subs number
|
# the above call applied limits to subs number
|
||||||
# in gossipsub we want to apply scoring as well
|
# in gossipsub we want to apply scoring as well
|
||||||
if rpcMsg.subscriptions.len > g.topicsHigh:
|
if rpcMsg.subscriptions.len > g.topicsHigh:
|
||||||
debug "received an rpc message with an oversized amount of subscriptions", peer,
|
debug "received an rpc message with an oversized amount of subscriptions", peer,
|
||||||
@ -435,6 +435,13 @@ method rpcHandler*(g: GossipSub,
|
|||||||
if rpcMsg.control.isSome():
|
if rpcMsg.control.isSome():
|
||||||
g.handleControl(peer, rpcMsg.control.unsafeGet())
|
g.handleControl(peer, rpcMsg.control.unsafeGet())
|
||||||
|
|
||||||
|
# Now, check subscription to update the meshes if required
|
||||||
|
for i in 0..<min(g.topicsHigh, rpcMsg.subscriptions.len):
|
||||||
|
let topic = rpcMsg.subscriptions[i].topic
|
||||||
|
if topic in g.topics and g.mesh.peers(topic) < g.parameters.dLow:
|
||||||
|
# rebalance but don't update metrics here, we do that only in the heartbeat
|
||||||
|
g.rebalanceMesh(topic, metrics = nil)
|
||||||
|
|
||||||
g.updateMetrics(rpcMsg)
|
g.updateMetrics(rpcMsg)
|
||||||
|
|
||||||
method onTopicSubscription*(g: GossipSub, topic: string, subscribed: bool) =
|
method onTopicSubscription*(g: GossipSub, topic: string, subscribed: bool) =
|
||||||
|
@ -19,6 +19,9 @@ import ".."/[pubsubpeer, peertable, timedcache, mcache, floodsub, pubsub]
|
|||||||
import "../rpc"/[messages]
|
import "../rpc"/[messages]
|
||||||
import "../../.."/[peerid, multiaddress, utility, switch, routing_record, signed_envelope, utils/heartbeat]
|
import "../../.."/[peerid, multiaddress, utility, switch, routing_record, signed_envelope, utils/heartbeat]
|
||||||
|
|
||||||
|
logScope:
|
||||||
|
topics = "libp2p gossipsub"
|
||||||
|
|
||||||
declareGauge(libp2p_gossipsub_cache_window_size, "the number of messages in the cache")
|
declareGauge(libp2p_gossipsub_cache_window_size, "the number of messages in the cache")
|
||||||
declareGauge(libp2p_gossipsub_peers_per_topic_mesh, "gossipsub peers per topic in mesh", labels = ["topic"])
|
declareGauge(libp2p_gossipsub_peers_per_topic_mesh, "gossipsub peers per topic in mesh", labels = ["topic"])
|
||||||
declareGauge(libp2p_gossipsub_peers_per_topic_fanout, "gossipsub peers per topic in fanout", labels = ["topic"])
|
declareGauge(libp2p_gossipsub_peers_per_topic_fanout, "gossipsub peers per topic in fanout", labels = ["topic"])
|
||||||
|
@ -18,6 +18,9 @@ import "."/[types]
|
|||||||
import ".."/[pubsubpeer]
|
import ".."/[pubsubpeer]
|
||||||
import "../../.."/[peerid, multiaddress, utility, switch, utils/heartbeat]
|
import "../../.."/[peerid, multiaddress, utility, switch, utils/heartbeat]
|
||||||
|
|
||||||
|
logScope:
|
||||||
|
topics = "libp2p gossipsub"
|
||||||
|
|
||||||
declareGauge(libp2p_gossipsub_peers_scores, "the scores of the peers in gossipsub", labels = ["agent"])
|
declareGauge(libp2p_gossipsub_peers_scores, "the scores of the peers in gossipsub", labels = ["agent"])
|
||||||
declareCounter(libp2p_gossipsub_bad_score_disconnection, "the number of peers disconnected by gossipsub", labels = ["agent"])
|
declareCounter(libp2p_gossipsub_bad_score_disconnection, "the number of peers disconnected by gossipsub", labels = ["agent"])
|
||||||
declareGauge(libp2p_gossipsub_peers_score_firstMessageDeliveries, "Detailed gossipsub scoring metric", labels = ["agent"])
|
declareGauge(libp2p_gossipsub_peers_score_firstMessageDeliveries, "Detailed gossipsub scoring metric", labels = ["agent"])
|
||||||
|
@ -16,7 +16,7 @@ import chronos
|
|||||||
import std/[tables, sets]
|
import std/[tables, sets]
|
||||||
import ".."/[floodsub, peertable, mcache, pubsubpeer]
|
import ".."/[floodsub, peertable, mcache, pubsubpeer]
|
||||||
import "../rpc"/[messages]
|
import "../rpc"/[messages]
|
||||||
import "../../.."/[peerid, multiaddress]
|
import "../../.."/[peerid, multiaddress, utility]
|
||||||
|
|
||||||
const
|
const
|
||||||
GossipSubCodec* = "/meshsub/1.1.0"
|
GossipSubCodec* = "/meshsub/1.1.0"
|
||||||
@ -65,7 +65,7 @@ type
|
|||||||
meshFailurePenalty*: float64
|
meshFailurePenalty*: float64
|
||||||
invalidMessageDeliveries*: float64
|
invalidMessageDeliveries*: float64
|
||||||
|
|
||||||
TopicParams* = object
|
TopicParams* {.public.} = object
|
||||||
topicWeight*: float64
|
topicWeight*: float64
|
||||||
|
|
||||||
# p1
|
# p1
|
||||||
@ -102,7 +102,7 @@ type
|
|||||||
appScore*: float64 # application specific score
|
appScore*: float64 # application specific score
|
||||||
behaviourPenalty*: float64 # the eventual penalty score
|
behaviourPenalty*: float64 # the eventual penalty score
|
||||||
|
|
||||||
GossipSubParams* = object
|
GossipSubParams* {.public.} = object
|
||||||
explicit*: bool
|
explicit*: bool
|
||||||
pruneBackoff*: Duration
|
pruneBackoff*: Duration
|
||||||
unsubscribeBackoff*: Duration
|
unsubscribeBackoff*: Duration
|
||||||
|
@ -130,7 +130,7 @@ type
|
|||||||
|
|
||||||
knownTopics*: HashSet[string]
|
knownTopics*: HashSet[string]
|
||||||
|
|
||||||
method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base.} =
|
method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
|
||||||
## handle peer disconnects
|
## handle peer disconnects
|
||||||
##
|
##
|
||||||
|
|
||||||
@ -267,11 +267,11 @@ proc updateMetrics*(p: PubSub, rpcMsg: RPCMsg) =
|
|||||||
|
|
||||||
method rpcHandler*(p: PubSub,
|
method rpcHandler*(p: PubSub,
|
||||||
peer: PubSubPeer,
|
peer: PubSubPeer,
|
||||||
rpcMsg: RPCMsg): Future[void] {.base.} =
|
rpcMsg: RPCMsg): Future[void] {.base, async.} =
|
||||||
## Handler that must be overridden by concrete implementation
|
## Handler that must be overridden by concrete implementation
|
||||||
raiseAssert "Unimplemented"
|
raiseAssert "Unimplemented"
|
||||||
|
|
||||||
method onNewPeer(p: PubSub, peer: PubSubPeer) {.base.} = discard
|
method onNewPeer(p: PubSub, peer: PubSubPeer) {.base, gcsafe.} = discard
|
||||||
|
|
||||||
method onPubSubPeerEvent*(p: PubSub, peer: PubSubPeer, event: PubSubPeerEvent) {.base, gcsafe.} =
|
method onPubSubPeerEvent*(p: PubSub, peer: PubSubPeer, event: PubSubPeerEvent) {.base, gcsafe.} =
|
||||||
# Peer event is raised for the send connection in particular
|
# Peer event is raised for the send connection in particular
|
||||||
@ -292,19 +292,11 @@ proc getOrCreatePeer*(
|
|||||||
proc getConn(): Future[Connection] {.async.} =
|
proc getConn(): Future[Connection] {.async.} =
|
||||||
return await p.switch.dial(peerId, protos)
|
return await p.switch.dial(peerId, protos)
|
||||||
|
|
||||||
proc dropConn(peer: PubSubPeer) =
|
|
||||||
proc dropConnAsync(peer: PubSubPeer) {.async.} =
|
|
||||||
try:
|
|
||||||
await p.switch.disconnect(peer.peerId)
|
|
||||||
except CatchableError as exc: # never cancelled
|
|
||||||
trace "Failed to close connection", peer, error = exc.name, msg = exc.msg
|
|
||||||
asyncSpawn dropConnAsync(peer)
|
|
||||||
|
|
||||||
proc onEvent(peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe.} =
|
proc onEvent(peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe.} =
|
||||||
p.onPubSubPeerEvent(peer, event)
|
p.onPubSubPeerEvent(peer, event)
|
||||||
|
|
||||||
# create new pubsub peer
|
# create new pubsub peer
|
||||||
let pubSubPeer = PubSubPeer.new(peerId, getConn, dropConn, onEvent, protos[0], p.maxMessageSize)
|
let pubSubPeer = PubSubPeer.new(peerId, getConn, onEvent, protos[0], p.maxMessageSize)
|
||||||
debug "created new pubsub peer", peerId
|
debug "created new pubsub peer", peerId
|
||||||
|
|
||||||
p.peers[peerId] = pubSubPeer
|
p.peers[peerId] = pubSubPeer
|
||||||
@ -385,7 +377,7 @@ method handleConn*(p: PubSub,
|
|||||||
finally:
|
finally:
|
||||||
await conn.closeWithEOF()
|
await conn.closeWithEOF()
|
||||||
|
|
||||||
method subscribePeer*(p: PubSub, peer: PeerId) {.base.} =
|
method subscribePeer*(p: PubSub, peer: PeerId) {.base, gcsafe.} =
|
||||||
## subscribe to remote peer to receive/send pubsub
|
## subscribe to remote peer to receive/send pubsub
|
||||||
## messages
|
## messages
|
||||||
##
|
##
|
||||||
@ -408,7 +400,7 @@ proc updateTopicMetrics(p: PubSub, topic: string) =
|
|||||||
|
|
||||||
libp2p_pubsub_topic_handlers.set(others, labelValues = ["other"])
|
libp2p_pubsub_topic_handlers.set(others, labelValues = ["other"])
|
||||||
|
|
||||||
method onTopicSubscription*(p: PubSub, topic: string, subscribed: bool) {.base.} =
|
method onTopicSubscription*(p: PubSub, topic: string, subscribed: bool) {.base, gcsafe.} =
|
||||||
# Called when subscribe is called the first time for a topic or unsubscribe
|
# Called when subscribe is called the first time for a topic or unsubscribe
|
||||||
# removes the last handler
|
# removes the last handler
|
||||||
|
|
||||||
@ -441,7 +433,7 @@ proc unsubscribe*(p: PubSub, topics: openArray[TopicPair]) {.public.} =
|
|||||||
for t in topics:
|
for t in topics:
|
||||||
p.unsubscribe(t.topic, t.handler)
|
p.unsubscribe(t.topic, t.handler)
|
||||||
|
|
||||||
proc unsubscribeAll*(p: PubSub, topic: string) {.public.} =
|
proc unsubscribeAll*(p: PubSub, topic: string) {.public, gcsafe.} =
|
||||||
## unsubscribe every `handler` from `topic`
|
## unsubscribe every `handler` from `topic`
|
||||||
if topic notin p.topics:
|
if topic notin p.topics:
|
||||||
debug "unsubscribeAll called for an unknown topic", topic
|
debug "unsubscribeAll called for an unknown topic", topic
|
||||||
@ -503,7 +495,7 @@ method initPubSub*(p: PubSub)
|
|||||||
|
|
||||||
method addValidator*(p: PubSub,
|
method addValidator*(p: PubSub,
|
||||||
topic: varargs[string],
|
topic: varargs[string],
|
||||||
hook: ValidatorHandler) {.base, public.} =
|
hook: ValidatorHandler) {.base, public, gcsafe.} =
|
||||||
## Add a validator to a `topic`. Each new message received in this
|
## Add a validator to a `topic`. Each new message received in this
|
||||||
## will be sent to `hook`. `hook` can return either `Accept`,
|
## will be sent to `hook`. `hook` can return either `Accept`,
|
||||||
## `Ignore` or `Reject` (which can descore the peer)
|
## `Ignore` or `Reject` (which can descore the peer)
|
||||||
|
@ -12,7 +12,8 @@ when (NimMajor, NimMinor) < (1, 4):
|
|||||||
else:
|
else:
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import std/[sequtils, strutils, tables, hashes]
|
import std/[sequtils, strutils, tables, hashes, options]
|
||||||
|
import stew/results
|
||||||
import chronos, chronicles, nimcrypto/sha2, metrics
|
import chronos, chronicles, nimcrypto/sha2, metrics
|
||||||
import rpc/[messages, message, protobuf],
|
import rpc/[messages, message, protobuf],
|
||||||
../../peerid,
|
../../peerid,
|
||||||
@ -51,7 +52,6 @@ type
|
|||||||
|
|
||||||
PubSubPeer* = ref object of RootObj
|
PubSubPeer* = ref object of RootObj
|
||||||
getConn*: GetConn # callback to establish a new send connection
|
getConn*: GetConn # callback to establish a new send connection
|
||||||
dropConn*: DropConn # Function pointer to use to drop connections
|
|
||||||
onEvent*: OnEvent # Connectivity updates for peer
|
onEvent*: OnEvent # Connectivity updates for peer
|
||||||
codec*: string # the protocol that this peer joined from
|
codec*: string # the protocol that this peer joined from
|
||||||
sendConn*: Connection # cached send connection
|
sendConn*: Connection # cached send connection
|
||||||
@ -175,7 +175,7 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
|
|||||||
|
|
||||||
trace "Get new send connection", p, newConn
|
trace "Get new send connection", p, newConn
|
||||||
p.sendConn = newConn
|
p.sendConn = newConn
|
||||||
p.address = some(p.sendConn.observedAddr)
|
p.address = if p.sendConn.observedAddr.isSome: some(p.sendConn.observedAddr.get) else: none(MultiAddress)
|
||||||
|
|
||||||
if p.onEvent != nil:
|
if p.onEvent != nil:
|
||||||
p.onEvent(p, PubSubPeerEvent(kind: PubSubPeerEventKind.Connected))
|
p.onEvent(p, PubSubPeerEvent(kind: PubSubPeerEventKind.Connected))
|
||||||
@ -206,9 +206,6 @@ proc connectImpl(p: PubSubPeer) {.async.} =
|
|||||||
await connectOnce(p)
|
await connectOnce(p)
|
||||||
except CatchableError as exc: # never cancelled
|
except CatchableError as exc: # never cancelled
|
||||||
debug "Could not establish send connection", msg = exc.msg
|
debug "Could not establish send connection", msg = exc.msg
|
||||||
finally:
|
|
||||||
# drop the connection, else we end up with ghost peers
|
|
||||||
if p.dropConn != nil: p.dropConn(p)
|
|
||||||
|
|
||||||
proc connect*(p: PubSubPeer) =
|
proc connect*(p: PubSubPeer) =
|
||||||
asyncSpawn connectImpl(p)
|
asyncSpawn connectImpl(p)
|
||||||
@ -286,14 +283,12 @@ proc new*(
|
|||||||
T: typedesc[PubSubPeer],
|
T: typedesc[PubSubPeer],
|
||||||
peerId: PeerId,
|
peerId: PeerId,
|
||||||
getConn: GetConn,
|
getConn: GetConn,
|
||||||
dropConn: DropConn,
|
|
||||||
onEvent: OnEvent,
|
onEvent: OnEvent,
|
||||||
codec: string,
|
codec: string,
|
||||||
maxMessageSize: int): T =
|
maxMessageSize: int): T =
|
||||||
|
|
||||||
T(
|
T(
|
||||||
getConn: getConn,
|
getConn: getConn,
|
||||||
dropConn: dropConn,
|
|
||||||
onEvent: onEvent,
|
onEvent: onEvent,
|
||||||
codec: codec,
|
codec: codec,
|
||||||
peerId: peerId,
|
peerId: peerId,
|
||||||
|
@ -22,7 +22,7 @@ import messages,
|
|||||||
|
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "pubsubprotobuf"
|
topics = "libp2p pubsubprotobuf"
|
||||||
|
|
||||||
when defined(libp2p_protobuf_metrics):
|
when defined(libp2p_protobuf_metrics):
|
||||||
import metrics
|
import metrics
|
||||||
@ -304,14 +304,15 @@ proc decodeMessages*(pb: ProtoBuffer): ProtoResult[seq[Message]] {.inline.} =
|
|||||||
if ? pb.getRepeatedField(2, msgpbs):
|
if ? pb.getRepeatedField(2, msgpbs):
|
||||||
trace "decodeMessages: read messages", count = len(msgpbs)
|
trace "decodeMessages: read messages", count = len(msgpbs)
|
||||||
for item in msgpbs:
|
for item in msgpbs:
|
||||||
msgs.add(? decodeMessage(initProtoBuffer(item)))
|
# size is constrained at the network level
|
||||||
|
msgs.add(? decodeMessage(initProtoBuffer(item, maxSize = uint.high)))
|
||||||
else:
|
else:
|
||||||
trace "decodeMessages: no messages found"
|
trace "decodeMessages: no messages found"
|
||||||
ok(msgs)
|
ok(msgs)
|
||||||
|
|
||||||
proc encodeRpcMsg*(msg: RPCMsg, anonymize: bool): seq[byte] =
|
proc encodeRpcMsg*(msg: RPCMsg, anonymize: bool): seq[byte] =
|
||||||
trace "encodeRpcMsg: encoding message", msg = msg.shortLog()
|
trace "encodeRpcMsg: encoding message", msg = msg.shortLog()
|
||||||
var pb = initProtoBuffer()
|
var pb = initProtoBuffer(maxSize = uint.high)
|
||||||
for item in msg.subscriptions:
|
for item in msg.subscriptions:
|
||||||
pb.write(1, item)
|
pb.write(1, item)
|
||||||
for item in msg.messages:
|
for item in msg.messages:
|
||||||
@ -324,7 +325,7 @@ proc encodeRpcMsg*(msg: RPCMsg, anonymize: bool): seq[byte] =
|
|||||||
|
|
||||||
proc decodeRpcMsg*(msg: seq[byte]): ProtoResult[RPCMsg] {.inline.} =
|
proc decodeRpcMsg*(msg: seq[byte]): ProtoResult[RPCMsg] {.inline.} =
|
||||||
trace "decodeRpcMsg: decoding message", msg = msg.shortLog()
|
trace "decodeRpcMsg: decoding message", msg = msg.shortLog()
|
||||||
var pb = initProtoBuffer(msg)
|
var pb = initProtoBuffer(msg, maxSize = uint.high)
|
||||||
var rpcMsg = ok(RPCMsg())
|
var rpcMsg = ok(RPCMsg())
|
||||||
assign(rpcMsg.get().messages, ? pb.decodeMessages())
|
assign(rpcMsg.get().messages, ? pb.decodeMessages())
|
||||||
assign(rpcMsg.get().subscriptions, ? pb.decodeSubscriptions())
|
assign(rpcMsg.get().subscriptions, ? pb.decodeSubscriptions())
|
||||||
|
677
libp2p/protocols/rendezvous.nim
Normal file
677
libp2p/protocols/rendezvous.nim
Normal file
@ -0,0 +1,677 @@
|
|||||||
|
# Nim-LibP2P
|
||||||
|
# Copyright (c) 2022 Status Research & Development GmbH
|
||||||
|
# Licensed under either of
|
||||||
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||||
|
# at your option.
|
||||||
|
# This file may not be copied, modified, or distributed except according to
|
||||||
|
# those terms.
|
||||||
|
|
||||||
|
when (NimMajor, NimMinor) < (1, 4):
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
else:
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import tables, sequtils, sugar, sets, options
|
||||||
|
import chronos,
|
||||||
|
chronicles,
|
||||||
|
bearssl/rand,
|
||||||
|
stew/[byteutils, objects]
|
||||||
|
import ./protocol,
|
||||||
|
../switch,
|
||||||
|
../routing_record,
|
||||||
|
../utils/heartbeat,
|
||||||
|
../stream/connection,
|
||||||
|
../utils/offsettedseq,
|
||||||
|
../utils/semaphore
|
||||||
|
|
||||||
|
export chronicles
|
||||||
|
|
||||||
|
logScope:
|
||||||
|
topics = "libp2p discovery rendezvous"
|
||||||
|
|
||||||
|
const
|
||||||
|
RendezVousCodec* = "/rendezvous/1.0.0"
|
||||||
|
MinimumDuration* = 2.hours
|
||||||
|
MaximumDuration = 72.hours
|
||||||
|
MinimumTTL = MinimumDuration.seconds.uint64
|
||||||
|
MaximumTTL = MaximumDuration.seconds.uint64
|
||||||
|
RegistrationLimitPerPeer = 1000
|
||||||
|
DiscoverLimit = 1000'u64
|
||||||
|
SemaphoreDefaultSize = 5
|
||||||
|
|
||||||
|
type
|
||||||
|
MessageType {.pure.} = enum
|
||||||
|
Register = 0
|
||||||
|
RegisterResponse = 1
|
||||||
|
Unregister = 2
|
||||||
|
Discover = 3
|
||||||
|
DiscoverResponse = 4
|
||||||
|
|
||||||
|
ResponseStatus = enum
|
||||||
|
Ok = 0
|
||||||
|
InvalidNamespace = 100
|
||||||
|
InvalidSignedPeerRecord = 101
|
||||||
|
InvalidTTL = 102
|
||||||
|
InvalidCookie = 103
|
||||||
|
NotAuthorized = 200
|
||||||
|
InternalError = 300
|
||||||
|
Unavailable = 400
|
||||||
|
|
||||||
|
Cookie = object
|
||||||
|
offset : uint64
|
||||||
|
ns : string
|
||||||
|
|
||||||
|
Register = object
|
||||||
|
ns : string
|
||||||
|
signedPeerRecord: seq[byte]
|
||||||
|
ttl: Option[uint64] # in seconds
|
||||||
|
|
||||||
|
RegisterResponse = object
|
||||||
|
status: ResponseStatus
|
||||||
|
text: Option[string]
|
||||||
|
ttl: Option[uint64] # in seconds
|
||||||
|
|
||||||
|
Unregister = object
|
||||||
|
ns: string
|
||||||
|
|
||||||
|
Discover = object
|
||||||
|
ns: string
|
||||||
|
limit: Option[uint64]
|
||||||
|
cookie: Option[seq[byte]]
|
||||||
|
|
||||||
|
DiscoverResponse = object
|
||||||
|
registrations: seq[Register]
|
||||||
|
cookie: Option[seq[byte]]
|
||||||
|
status: ResponseStatus
|
||||||
|
text: Option[string]
|
||||||
|
|
||||||
|
Message = object
|
||||||
|
msgType: MessageType
|
||||||
|
register: Option[Register]
|
||||||
|
registerResponse: Option[RegisterResponse]
|
||||||
|
unregister: Option[Unregister]
|
||||||
|
discover: Option[Discover]
|
||||||
|
discoverResponse: Option[DiscoverResponse]
|
||||||
|
|
||||||
|
proc encode(c: Cookie): ProtoBuffer =
|
||||||
|
result = initProtoBuffer()
|
||||||
|
result.write(1, c.offset)
|
||||||
|
result.write(2, c.ns)
|
||||||
|
result.finish()
|
||||||
|
|
||||||
|
proc encode(r: Register): ProtoBuffer =
|
||||||
|
result = initProtoBuffer()
|
||||||
|
result.write(1, r.ns)
|
||||||
|
result.write(2, r.signedPeerRecord)
|
||||||
|
if r.ttl.isSome():
|
||||||
|
result.write(3, r.ttl.get())
|
||||||
|
result.finish()
|
||||||
|
|
||||||
|
proc encode(rr: RegisterResponse): ProtoBuffer =
|
||||||
|
result = initProtoBuffer()
|
||||||
|
result.write(1, rr.status.uint)
|
||||||
|
if rr.text.isSome():
|
||||||
|
result.write(2, rr.text.get())
|
||||||
|
if rr.ttl.isSome():
|
||||||
|
result.write(3, rr.ttl.get())
|
||||||
|
result.finish()
|
||||||
|
|
||||||
|
proc encode(u: Unregister): ProtoBuffer =
|
||||||
|
result = initProtoBuffer()
|
||||||
|
result.write(1, u.ns)
|
||||||
|
result.finish()
|
||||||
|
|
||||||
|
proc encode(d: Discover): ProtoBuffer =
|
||||||
|
result = initProtoBuffer()
|
||||||
|
result.write(1, d.ns)
|
||||||
|
if d.limit.isSome():
|
||||||
|
result.write(2, d.limit.get())
|
||||||
|
if d.cookie.isSome():
|
||||||
|
result.write(3, d.cookie.get())
|
||||||
|
result.finish()
|
||||||
|
|
||||||
|
proc encode(d: DiscoverResponse): ProtoBuffer =
|
||||||
|
result = initProtoBuffer()
|
||||||
|
for reg in d.registrations:
|
||||||
|
result.write(1, reg.encode())
|
||||||
|
if d.cookie.isSome():
|
||||||
|
result.write(2, d.cookie.get())
|
||||||
|
result.write(3, d.status.uint)
|
||||||
|
if d.text.isSome():
|
||||||
|
result.write(4, d.text.get())
|
||||||
|
result.finish()
|
||||||
|
|
||||||
|
proc encode(msg: Message): ProtoBuffer =
|
||||||
|
result = initProtoBuffer()
|
||||||
|
result.write(1, msg.msgType.uint)
|
||||||
|
if msg.register.isSome():
|
||||||
|
result.write(2, msg.register.get().encode())
|
||||||
|
if msg.registerResponse.isSome():
|
||||||
|
result.write(3, msg.registerResponse.get().encode())
|
||||||
|
if msg.unregister.isSome():
|
||||||
|
result.write(4, msg.unregister.get().encode())
|
||||||
|
if msg.discover.isSome():
|
||||||
|
result.write(5, msg.discover.get().encode())
|
||||||
|
if msg.discoverResponse.isSome():
|
||||||
|
result.write(6, msg.discoverResponse.get().encode())
|
||||||
|
result.finish()
|
||||||
|
|
||||||
|
proc decode(_: typedesc[Cookie], buf: seq[byte]): Option[Cookie] =
|
||||||
|
var c: Cookie
|
||||||
|
let
|
||||||
|
pb = initProtoBuffer(buf)
|
||||||
|
r1 = pb.getRequiredField(1, c.offset)
|
||||||
|
r2 = pb.getRequiredField(2, c.ns)
|
||||||
|
if r1.isErr() or r2.isErr(): return none(Cookie)
|
||||||
|
some(c)
|
||||||
|
|
||||||
|
proc decode(_: typedesc[Register], buf: seq[byte]): Option[Register] =
|
||||||
|
var
|
||||||
|
r: Register
|
||||||
|
ttl: uint64
|
||||||
|
let
|
||||||
|
pb = initProtoBuffer(buf)
|
||||||
|
r1 = pb.getRequiredField(1, r.ns)
|
||||||
|
r2 = pb.getRequiredField(2, r.signedPeerRecord)
|
||||||
|
r3 = pb.getField(3, ttl)
|
||||||
|
if r1.isErr() or r2.isErr() or r3.isErr(): return none(Register)
|
||||||
|
if r3.get(): r.ttl = some(ttl)
|
||||||
|
some(r)
|
||||||
|
|
||||||
|
proc decode(_: typedesc[RegisterResponse], buf: seq[byte]): Option[RegisterResponse] =
|
||||||
|
var
|
||||||
|
rr: RegisterResponse
|
||||||
|
statusOrd: uint
|
||||||
|
text: string
|
||||||
|
ttl: uint64
|
||||||
|
let
|
||||||
|
pb = initProtoBuffer(buf)
|
||||||
|
r1 = pb.getRequiredField(1, statusOrd)
|
||||||
|
r2 = pb.getField(2, text)
|
||||||
|
r3 = pb.getField(3, ttl)
|
||||||
|
if r1.isErr() or r2.isErr() or r3.isErr() or
|
||||||
|
not checkedEnumAssign(rr.status, statusOrd): return none(RegisterResponse)
|
||||||
|
if r2.get(): rr.text = some(text)
|
||||||
|
if r3.get(): rr.ttl = some(ttl)
|
||||||
|
some(rr)
|
||||||
|
|
||||||
|
proc decode(_: typedesc[Unregister], buf: seq[byte]): Option[Unregister] =
|
||||||
|
var u: Unregister
|
||||||
|
let
|
||||||
|
pb = initProtoBuffer(buf)
|
||||||
|
r1 = pb.getRequiredField(1, u.ns)
|
||||||
|
if r1.isErr(): return none(Unregister)
|
||||||
|
some(u)
|
||||||
|
|
||||||
|
proc decode(_: typedesc[Discover], buf: seq[byte]): Option[Discover] =
|
||||||
|
var
|
||||||
|
d: Discover
|
||||||
|
limit: uint64
|
||||||
|
cookie: seq[byte]
|
||||||
|
let
|
||||||
|
pb = initProtoBuffer(buf)
|
||||||
|
r1 = pb.getRequiredField(1, d.ns)
|
||||||
|
r2 = pb.getField(2, limit)
|
||||||
|
r3 = pb.getField(3, cookie)
|
||||||
|
if r1.isErr() or r2.isErr() or r3.isErr: return none(Discover)
|
||||||
|
if r2.get(): d.limit = some(limit)
|
||||||
|
if r3.get(): d.cookie = some(cookie)
|
||||||
|
some(d)
|
||||||
|
|
||||||
|
proc decode(_: typedesc[DiscoverResponse], buf: seq[byte]): Option[DiscoverResponse] =
|
||||||
|
var
|
||||||
|
dr: DiscoverResponse
|
||||||
|
registrations: seq[seq[byte]]
|
||||||
|
cookie: seq[byte]
|
||||||
|
statusOrd: uint
|
||||||
|
text: string
|
||||||
|
let
|
||||||
|
pb = initProtoBuffer(buf)
|
||||||
|
r1 = pb.getRepeatedField(1, registrations)
|
||||||
|
r2 = pb.getField(2, cookie)
|
||||||
|
r3 = pb.getRequiredField(3, statusOrd)
|
||||||
|
r4 = pb.getField(4, text)
|
||||||
|
if r1.isErr() or r2.isErr() or r3.isErr or r4.isErr() or
|
||||||
|
not checkedEnumAssign(dr.status, statusOrd): return none(DiscoverResponse)
|
||||||
|
for reg in registrations:
|
||||||
|
var r: Register
|
||||||
|
let regOpt = Register.decode(reg)
|
||||||
|
if regOpt.isNone(): return none(DiscoverResponse)
|
||||||
|
dr.registrations.add(regOpt.get())
|
||||||
|
if r2.get(): dr.cookie = some(cookie)
|
||||||
|
if r4.get(): dr.text = some(text)
|
||||||
|
some(dr)
|
||||||
|
|
||||||
|
proc decode(_: typedesc[Message], buf: seq[byte]): Option[Message] =
|
||||||
|
var
|
||||||
|
msg: Message
|
||||||
|
statusOrd: uint
|
||||||
|
pbr, pbrr, pbu, pbd, pbdr: ProtoBuffer
|
||||||
|
let
|
||||||
|
pb = initProtoBuffer(buf)
|
||||||
|
r1 = pb.getRequiredField(1, statusOrd)
|
||||||
|
r2 = pb.getField(2, pbr)
|
||||||
|
r3 = pb.getField(3, pbrr)
|
||||||
|
r4 = pb.getField(4, pbu)
|
||||||
|
r5 = pb.getField(5, pbd)
|
||||||
|
r6 = pb.getField(6, pbdr)
|
||||||
|
if r1.isErr() or r2.isErr() or r3.isErr() or
|
||||||
|
r4.isErr() or r5.isErr() or r6.isErr() or
|
||||||
|
not checkedEnumAssign(msg.msgType, statusOrd): return none(Message)
|
||||||
|
if r2.get():
|
||||||
|
msg.register = Register.decode(pbr.buffer)
|
||||||
|
if msg.register.isNone(): return none(Message)
|
||||||
|
if r3.get():
|
||||||
|
msg.registerResponse = RegisterResponse.decode(pbrr.buffer)
|
||||||
|
if msg.registerResponse.isNone(): return none(Message)
|
||||||
|
if r4.get():
|
||||||
|
msg.unregister = Unregister.decode(pbu.buffer)
|
||||||
|
if msg.unregister.isNone(): return none(Message)
|
||||||
|
if r5.get():
|
||||||
|
msg.discover = Discover.decode(pbd.buffer)
|
||||||
|
if msg.discover.isNone(): return none(Message)
|
||||||
|
if r6.get():
|
||||||
|
msg.discoverResponse = DiscoverResponse.decode(pbdr.buffer)
|
||||||
|
if msg.discoverResponse.isNone(): return none(Message)
|
||||||
|
some(msg)
|
||||||
|
|
||||||
|
|
||||||
|
type
|
||||||
|
RendezVousError* = object of LPError
|
||||||
|
RegisteredData = object
|
||||||
|
expiration: Moment
|
||||||
|
peerId: PeerId
|
||||||
|
data: Register
|
||||||
|
|
||||||
|
RendezVous* = ref object of LPProtocol
|
||||||
|
# Registered needs to be an offsetted sequence
|
||||||
|
# because we need stable index for the cookies.
|
||||||
|
registered: OffsettedSeq[RegisteredData]
|
||||||
|
# Namespaces is a table whose key is a salted namespace and
|
||||||
|
# the value is the index sequence corresponding to this
|
||||||
|
# namespace in the offsettedqueue.
|
||||||
|
namespaces: Table[string, seq[int]]
|
||||||
|
rng: ref HmacDrbgContext
|
||||||
|
salt: string
|
||||||
|
defaultDT: Moment
|
||||||
|
registerDeletionLoop: Future[void]
|
||||||
|
#registerEvent: AsyncEvent # TODO: to raise during the heartbeat
|
||||||
|
# + make the heartbeat sleep duration "smarter"
|
||||||
|
sema: AsyncSemaphore
|
||||||
|
peers: seq[PeerId]
|
||||||
|
cookiesSaved: Table[PeerId, Table[string, seq[byte]]]
|
||||||
|
switch: Switch
|
||||||
|
|
||||||
|
proc checkPeerRecord(spr: seq[byte], peerId: PeerId): Result[void, string] =
|
||||||
|
if spr.len == 0: return err("Empty peer record")
|
||||||
|
let signedEnv = ? SignedPeerRecord.decode(spr).mapErr(x => $x)
|
||||||
|
if signedEnv.data.peerId != peerId:
|
||||||
|
return err("Bad Peer ID")
|
||||||
|
return ok()
|
||||||
|
|
||||||
|
proc sendRegisterResponse(conn: Connection,
|
||||||
|
ttl: uint64) {.async.} =
|
||||||
|
let msg = encode(Message(
|
||||||
|
msgType: MessageType.RegisterResponse,
|
||||||
|
registerResponse: some(RegisterResponse(status: Ok, ttl: some(ttl)))))
|
||||||
|
await conn.writeLp(msg.buffer)
|
||||||
|
|
||||||
|
proc sendRegisterResponseError(conn: Connection,
|
||||||
|
status: ResponseStatus,
|
||||||
|
text: string = "") {.async.} =
|
||||||
|
let msg = encode(Message(
|
||||||
|
msgType: MessageType.RegisterResponse,
|
||||||
|
registerResponse: some(RegisterResponse(status: status, text: some(text)))))
|
||||||
|
await conn.writeLp(msg.buffer)
|
||||||
|
|
||||||
|
proc sendDiscoverResponse(conn: Connection,
|
||||||
|
s: seq[Register],
|
||||||
|
cookie: Cookie) {.async.} =
|
||||||
|
let msg = encode(Message(
|
||||||
|
msgType: MessageType.DiscoverResponse,
|
||||||
|
discoverResponse: some(DiscoverResponse(
|
||||||
|
status: Ok,
|
||||||
|
registrations: s,
|
||||||
|
cookie: some(cookie.encode().buffer)
|
||||||
|
))
|
||||||
|
))
|
||||||
|
await conn.writeLp(msg.buffer)
|
||||||
|
|
||||||
|
proc sendDiscoverResponseError(conn: Connection,
|
||||||
|
status: ResponseStatus,
|
||||||
|
text: string = "") {.async.} =
|
||||||
|
let msg = encode(Message(
|
||||||
|
msgType: MessageType.DiscoverResponse,
|
||||||
|
discoverResponse: some(DiscoverResponse(status: status, text: some(text)))))
|
||||||
|
await conn.writeLp(msg.buffer)
|
||||||
|
|
||||||
|
proc countRegister(rdv: RendezVous, peerId: PeerId): int =
|
||||||
|
let n = Moment.now()
|
||||||
|
for data in rdv.registered:
|
||||||
|
if data.peerId == peerId and data.expiration > n:
|
||||||
|
result.inc()
|
||||||
|
|
||||||
|
proc save(rdv: RendezVous,
|
||||||
|
ns: string,
|
||||||
|
peerId: PeerId,
|
||||||
|
r: Register,
|
||||||
|
update: bool = true) =
|
||||||
|
let nsSalted = ns & rdv.salt
|
||||||
|
discard rdv.namespaces.hasKeyOrPut(nsSalted, newSeq[int]())
|
||||||
|
try:
|
||||||
|
for index in rdv.namespaces[nsSalted]:
|
||||||
|
if rdv.registered[index].peerId == peerId:
|
||||||
|
if update == false: return
|
||||||
|
rdv.registered[index].expiration = rdv.defaultDT
|
||||||
|
rdv.registered.add(
|
||||||
|
RegisteredData(
|
||||||
|
peerId: peerId,
|
||||||
|
expiration: Moment.now() + r.ttl.get(MinimumTTL).int64.seconds,
|
||||||
|
data: r
|
||||||
|
)
|
||||||
|
)
|
||||||
|
rdv.namespaces[nsSalted].add(rdv.registered.high)
|
||||||
|
# rdv.registerEvent.fire()
|
||||||
|
except KeyError:
|
||||||
|
doAssert false, "Should have key"
|
||||||
|
|
||||||
|
proc register(rdv: RendezVous, conn: Connection, r: Register): Future[void] =
|
||||||
|
trace "Received Register", peerId = conn.peerId, ns = r.ns
|
||||||
|
if r.ns.len notin 1..255:
|
||||||
|
return conn.sendRegisterResponseError(InvalidNamespace)
|
||||||
|
let ttl = r.ttl.get(MinimumTTL)
|
||||||
|
if ttl notin MinimumTTL..MaximumTTL:
|
||||||
|
return conn.sendRegisterResponseError(InvalidTTL)
|
||||||
|
let pr = checkPeerRecord(r.signedPeerRecord, conn.peerId)
|
||||||
|
if pr.isErr():
|
||||||
|
return conn.sendRegisterResponseError(InvalidSignedPeerRecord, pr.error())
|
||||||
|
if rdv.countRegister(conn.peerId) >= RegistrationLimitPerPeer:
|
||||||
|
return conn.sendRegisterResponseError(NotAuthorized, "Registration limit reached")
|
||||||
|
rdv.save(r.ns, conn.peerId, r)
|
||||||
|
conn.sendRegisterResponse(ttl)
|
||||||
|
|
||||||
|
proc unregister(rdv: RendezVous, conn: Connection, u: Unregister) =
|
||||||
|
trace "Received Unregister", peerId = conn.peerId, ns = u.ns
|
||||||
|
let nsSalted = u.ns & rdv.salt
|
||||||
|
try:
|
||||||
|
for index in rdv.namespaces[nsSalted]:
|
||||||
|
if rdv.registered[index].peerId == conn.peerId:
|
||||||
|
rdv.registered[index].expiration = rdv.defaultDT
|
||||||
|
except KeyError:
|
||||||
|
return
|
||||||
|
|
||||||
|
proc discover(rdv: RendezVous, conn: Connection, d: Discover) {.async.} =
|
||||||
|
trace "Received Discover", peerId = conn.peerId, ns = d.ns
|
||||||
|
if d.ns.len notin 0..255:
|
||||||
|
await conn.sendDiscoverResponseError(InvalidNamespace)
|
||||||
|
return
|
||||||
|
var limit = min(DiscoverLimit, d.limit.get(DiscoverLimit))
|
||||||
|
var
|
||||||
|
cookie =
|
||||||
|
if d.cookie.isSome():
|
||||||
|
try:
|
||||||
|
Cookie.decode(d.cookie.get()).get()
|
||||||
|
except CatchableError:
|
||||||
|
await conn.sendDiscoverResponseError(InvalidCookie)
|
||||||
|
return
|
||||||
|
else: Cookie(offset: rdv.registered.low().uint64 - 1)
|
||||||
|
if cookie.ns != d.ns or
|
||||||
|
cookie.offset notin rdv.registered.low().uint64..rdv.registered.high().uint64:
|
||||||
|
cookie = Cookie(offset: rdv.registered.low().uint64 - 1)
|
||||||
|
let
|
||||||
|
nsSalted = d.ns & rdv.salt
|
||||||
|
namespaces =
|
||||||
|
if d.ns != "":
|
||||||
|
try:
|
||||||
|
rdv.namespaces[nsSalted]
|
||||||
|
except KeyError:
|
||||||
|
await conn.sendDiscoverResponseError(InvalidNamespace)
|
||||||
|
return
|
||||||
|
else: toSeq(cookie.offset.int..rdv.registered.high())
|
||||||
|
if namespaces.len() == 0:
|
||||||
|
await conn.sendDiscoverResponse(@[], Cookie())
|
||||||
|
return
|
||||||
|
var offset = namespaces[^1]
|
||||||
|
let n = Moment.now()
|
||||||
|
var s = collect(newSeq()):
|
||||||
|
for index in namespaces:
|
||||||
|
var reg = rdv.registered[index]
|
||||||
|
if limit == 0:
|
||||||
|
offset = index
|
||||||
|
break
|
||||||
|
if reg.expiration < n or index.uint64 <= cookie.offset: continue
|
||||||
|
limit.dec()
|
||||||
|
reg.data.ttl = some((reg.expiration - Moment.now()).seconds.uint64)
|
||||||
|
reg.data
|
||||||
|
rdv.rng.shuffle(s)
|
||||||
|
await conn.sendDiscoverResponse(s, Cookie(offset: offset.uint64, ns: d.ns))
|
||||||
|
|
||||||
|
proc advertisePeer(rdv: RendezVous,
|
||||||
|
peer: PeerId,
|
||||||
|
msg: seq[byte]) {.async.} =
|
||||||
|
proc advertiseWrap() {.async.} =
|
||||||
|
try:
|
||||||
|
let conn = await rdv.switch.dial(peer, RendezVousCodec)
|
||||||
|
defer: await conn.close()
|
||||||
|
await conn.writeLp(msg)
|
||||||
|
let
|
||||||
|
buf = await conn.readLp(4096)
|
||||||
|
msgRecv = Message.decode(buf).get()
|
||||||
|
if msgRecv.msgType != MessageType.RegisterResponse:
|
||||||
|
trace "Unexpected register response", peer, msgType = msgRecv.msgType
|
||||||
|
elif msgRecv.registerResponse.isNone() or
|
||||||
|
msgRecv.registerResponse.get().status != ResponseStatus.Ok:
|
||||||
|
trace "Refuse to register", peer, response = msgRecv.registerResponse
|
||||||
|
except CatchableError as exc:
|
||||||
|
trace "exception in the advertise", error = exc.msg
|
||||||
|
finally:
|
||||||
|
rdv.sema.release()
|
||||||
|
await rdv.sema.acquire()
|
||||||
|
discard await advertiseWrap().withTimeout(5.seconds)
|
||||||
|
|
||||||
|
proc advertise*(rdv: RendezVous,
|
||||||
|
ns: string,
|
||||||
|
ttl: Duration = MinimumDuration) {.async.} =
|
||||||
|
let sprBuff = rdv.switch.peerInfo.signedPeerRecord.encode()
|
||||||
|
if sprBuff.isErr():
|
||||||
|
raise newException(RendezVousError, "Wrong Signed Peer Record")
|
||||||
|
if ns.len notin 1..255:
|
||||||
|
raise newException(RendezVousError, "Invalid namespace")
|
||||||
|
if ttl notin MinimumDuration..MaximumDuration:
|
||||||
|
raise newException(RendezVousError, "Invalid time to live")
|
||||||
|
let
|
||||||
|
r = Register(ns: ns, signedPeerRecord: sprBuff.get(), ttl: some(ttl.seconds.uint64))
|
||||||
|
msg = encode(Message(msgType: MessageType.Register, register: some(r)))
|
||||||
|
rdv.save(ns, rdv.switch.peerInfo.peerId, r)
|
||||||
|
let fut = collect(newSeq()):
|
||||||
|
for peer in rdv.peers:
|
||||||
|
trace "Send Advertise", peerId = peer, ns
|
||||||
|
rdv.advertisePeer(peer, msg.buffer)
|
||||||
|
await allFutures(fut)
|
||||||
|
|
||||||
|
proc requestLocally*(rdv: RendezVous, ns: string): seq[PeerRecord] =
|
||||||
|
let
|
||||||
|
nsSalted = ns & rdv.salt
|
||||||
|
n = Moment.now()
|
||||||
|
try:
|
||||||
|
collect(newSeq()):
|
||||||
|
for index in rdv.namespaces[nsSalted]:
|
||||||
|
if rdv.registered[index].expiration > n:
|
||||||
|
SignedPeerRecord.decode(rdv.registered[index].data.signedPeerRecord).get().data
|
||||||
|
except KeyError as exc:
|
||||||
|
@[]
|
||||||
|
|
||||||
|
proc request*(rdv: RendezVous,
|
||||||
|
ns: string,
|
||||||
|
l: int = DiscoverLimit.int): Future[seq[PeerRecord]] {.async.} =
|
||||||
|
let nsSalted = ns & rdv.salt
|
||||||
|
var
|
||||||
|
s: Table[PeerId, (PeerRecord, Register)]
|
||||||
|
limit: uint64
|
||||||
|
d = Discover(ns: ns)
|
||||||
|
|
||||||
|
if l <= 0 or l > DiscoverLimit.int:
|
||||||
|
raise newException(RendezVousError, "Invalid limit")
|
||||||
|
if ns.len notin 0..255:
|
||||||
|
raise newException(RendezVousError, "Invalid namespace")
|
||||||
|
limit = l.uint64
|
||||||
|
proc requestPeer(peer: PeerId) {.async.} =
|
||||||
|
let conn = await rdv.switch.dial(peer, RendezVousCodec)
|
||||||
|
defer: await conn.close()
|
||||||
|
d.limit = some(limit)
|
||||||
|
d.cookie =
|
||||||
|
try:
|
||||||
|
some(rdv.cookiesSaved[peer][ns])
|
||||||
|
except KeyError as exc:
|
||||||
|
none(seq[byte])
|
||||||
|
await conn.writeLp(encode(Message(
|
||||||
|
msgType: MessageType.Discover,
|
||||||
|
discover: some(d))).buffer)
|
||||||
|
let
|
||||||
|
buf = await conn.readLp(65536)
|
||||||
|
msgRcv = Message.decode(buf).get()
|
||||||
|
if msgRcv.msgType != MessageType.DiscoverResponse or
|
||||||
|
msgRcv.discoverResponse.isNone():
|
||||||
|
debug "Unexpected discover response", msgType = msgRcv.msgType
|
||||||
|
return
|
||||||
|
let resp = msgRcv.discoverResponse.get()
|
||||||
|
if resp.status != ResponseStatus.Ok:
|
||||||
|
trace "Cannot discover", ns, status = resp.status, text = resp.text
|
||||||
|
return
|
||||||
|
if resp.cookie.isSome() and resp.cookie.get().len < 1000:
|
||||||
|
if rdv.cookiesSaved.hasKeyOrPut(peer, {ns: resp.cookie.get()}.toTable):
|
||||||
|
rdv.cookiesSaved[peer][ns] = resp.cookie.get()
|
||||||
|
for r in resp.registrations:
|
||||||
|
if limit == 0: return
|
||||||
|
if r.ttl.isNone() or r.ttl.get() > MaximumTTL: continue
|
||||||
|
let sprRes = SignedPeerRecord.decode(r.signedPeerRecord)
|
||||||
|
if sprRes.isErr(): continue
|
||||||
|
let pr = sprRes.get().data
|
||||||
|
if s.hasKey(pr.peerId):
|
||||||
|
let (prSaved, rSaved) = s[pr.peerId]
|
||||||
|
if (prSaved.seqNo == pr.seqNo and rSaved.ttl.get() < r.ttl.get()) or
|
||||||
|
prSaved.seqNo < pr.seqNo:
|
||||||
|
s[pr.peerId] = (pr, r)
|
||||||
|
else:
|
||||||
|
s[pr.peerId] = (pr, r)
|
||||||
|
limit.dec()
|
||||||
|
for (_, r) in s.values():
|
||||||
|
rdv.save(ns, peer, r, false)
|
||||||
|
|
||||||
|
# copy to avoid resizes during the loop
|
||||||
|
let peers = rdv.peers
|
||||||
|
for peer in peers:
|
||||||
|
if limit == 0: break
|
||||||
|
if RendezVousCodec notin rdv.switch.peerStore[ProtoBook][peer]: continue
|
||||||
|
try:
|
||||||
|
trace "Send Request", peerId = peer, ns
|
||||||
|
await peer.requestPeer()
|
||||||
|
except CancelledError as exc:
|
||||||
|
raise exc
|
||||||
|
except CatchableError as exc:
|
||||||
|
trace "exception catch in request", error = exc.msg
|
||||||
|
return toSeq(s.values()).mapIt(it[0])
|
||||||
|
|
||||||
|
proc unsubscribeLocally*(rdv: RendezVous, ns: string) =
|
||||||
|
let nsSalted = ns & rdv.salt
|
||||||
|
try:
|
||||||
|
for index in rdv.namespaces[nsSalted]:
|
||||||
|
if rdv.registered[index].peerId == rdv.switch.peerInfo.peerId:
|
||||||
|
rdv.registered[index].expiration = rdv.defaultDT
|
||||||
|
except KeyError:
|
||||||
|
return
|
||||||
|
|
||||||
|
proc unsubscribe*(rdv: RendezVous, ns: string) {.async.} =
|
||||||
|
# TODO: find a way to improve this, maybe something similar to the advertise
|
||||||
|
if ns.len notin 1..255:
|
||||||
|
raise newException(RendezVousError, "Invalid namespace")
|
||||||
|
rdv.unsubscribeLocally(ns)
|
||||||
|
let msg = encode(Message(
|
||||||
|
msgType: MessageType.Unregister,
|
||||||
|
unregister: some(Unregister(ns: ns))))
|
||||||
|
|
||||||
|
proc unsubscribePeer(rdv: RendezVous, peerId: PeerId) {.async.} =
|
||||||
|
try:
|
||||||
|
let conn = await rdv.switch.dial(peerId, RendezVousCodec)
|
||||||
|
defer: await conn.close()
|
||||||
|
await conn.writeLp(msg.buffer)
|
||||||
|
except CatchableError as exc:
|
||||||
|
trace "exception while unsubscribing", error = exc.msg
|
||||||
|
|
||||||
|
for peer in rdv.peers:
|
||||||
|
discard await rdv.unsubscribePeer(peer).withTimeout(5.seconds)
|
||||||
|
|
||||||
|
proc setup*(rdv: RendezVous, switch: Switch) =
|
||||||
|
rdv.switch = switch
|
||||||
|
proc handlePeer(peerId: PeerId, event: PeerEvent) {.async.} =
|
||||||
|
if event.kind == PeerEventKind.Joined:
|
||||||
|
rdv.peers.add(peerId)
|
||||||
|
elif event.kind == PeerEventKind.Left:
|
||||||
|
rdv.peers.keepItIf(it != peerId)
|
||||||
|
rdv.switch.addPeerEventHandler(handlePeer, Joined)
|
||||||
|
rdv.switch.addPeerEventHandler(handlePeer, Left)
|
||||||
|
|
||||||
|
proc new*(T: typedesc[RendezVous],
|
||||||
|
rng: ref HmacDrbgContext = newRng()): T =
|
||||||
|
let rdv = T(
|
||||||
|
rng: rng,
|
||||||
|
salt: string.fromBytes(generateBytes(rng[], 8)),
|
||||||
|
registered: initOffsettedSeq[RegisteredData](1),
|
||||||
|
defaultDT: Moment.now() - 1.days,
|
||||||
|
#registerEvent: newAsyncEvent(),
|
||||||
|
sema: newAsyncSemaphore(SemaphoreDefaultSize)
|
||||||
|
)
|
||||||
|
logScope: topics = "libp2p discovery rendezvous"
|
||||||
|
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||||
|
try:
|
||||||
|
let
|
||||||
|
buf = await conn.readLp(4096)
|
||||||
|
msg = Message.decode(buf).get()
|
||||||
|
case msg.msgType:
|
||||||
|
of MessageType.Register: await rdv.register(conn, msg.register.get())
|
||||||
|
of MessageType.RegisterResponse:
|
||||||
|
trace "Got an unexpected Register Response", response = msg.registerResponse
|
||||||
|
of MessageType.Unregister: rdv.unregister(conn, msg.unregister.get())
|
||||||
|
of MessageType.Discover: await rdv.discover(conn, msg.discover.get())
|
||||||
|
of MessageType.DiscoverResponse:
|
||||||
|
trace "Got an unexpected Discover Response", response = msg.discoverResponse
|
||||||
|
except CancelledError as exc:
|
||||||
|
raise exc
|
||||||
|
except CatchableError as exc:
|
||||||
|
trace "exception in rendezvous handler", error = exc.msg
|
||||||
|
finally:
|
||||||
|
await conn.close()
|
||||||
|
|
||||||
|
rdv.handler = handleStream
|
||||||
|
rdv.codec = RendezVousCodec
|
||||||
|
return rdv
|
||||||
|
|
||||||
|
proc new*(T: typedesc[RendezVous],
|
||||||
|
switch: Switch,
|
||||||
|
rng: ref HmacDrbgContext = newRng()): T =
|
||||||
|
let rdv = T.new(rng)
|
||||||
|
rdv.setup(switch)
|
||||||
|
return rdv
|
||||||
|
|
||||||
|
proc deletesRegister(rdv: RendezVous) {.async.} =
|
||||||
|
heartbeat "Register timeout", 1.minutes:
|
||||||
|
let n = Moment.now()
|
||||||
|
rdv.registered.flushIfIt(it.expiration < n)
|
||||||
|
for data in rdv.namespaces.mvalues():
|
||||||
|
data.keepItIf(it >= rdv.registered.offset)
|
||||||
|
|
||||||
|
method start*(rdv: RendezVous) {.async.} =
|
||||||
|
if not rdv.registerDeletionLoop.isNil:
|
||||||
|
warn "Starting rendezvous twice"
|
||||||
|
return
|
||||||
|
rdv.registerDeletionLoop = rdv.deletesRegister()
|
||||||
|
rdv.started = true
|
||||||
|
|
||||||
|
method stop*(rdv: RendezVous) {.async.} =
|
||||||
|
if rdv.registerDeletionLoop.isNil:
|
||||||
|
warn "Stopping rendezvous without starting it"
|
||||||
|
return
|
||||||
|
rdv.started = false
|
||||||
|
rdv.registerDeletionLoop.cancel()
|
||||||
|
rdv.registerDeletionLoop = nil
|
@ -38,7 +38,7 @@ const
|
|||||||
# https://godoc.org/github.com/libp2p/go-libp2p-noise#pkg-constants
|
# https://godoc.org/github.com/libp2p/go-libp2p-noise#pkg-constants
|
||||||
NoiseCodec* = "/noise"
|
NoiseCodec* = "/noise"
|
||||||
|
|
||||||
PayloadString = "noise-libp2p-static-key:"
|
PayloadString = toBytes("noise-libp2p-static-key:")
|
||||||
|
|
||||||
ProtocolXXName = "Noise_XX_25519_ChaChaPoly_SHA256"
|
ProtocolXXName = "Noise_XX_25519_ChaChaPoly_SHA256"
|
||||||
|
|
||||||
@ -339,7 +339,6 @@ proc handshakeXXOutbound(
|
|||||||
hs = HandshakeState.init()
|
hs = HandshakeState.init()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
||||||
hs.ss.mixHash(p.commonPrologue)
|
hs.ss.mixHash(p.commonPrologue)
|
||||||
hs.s = p.noiseKeys
|
hs.s = p.noiseKeys
|
||||||
|
|
||||||
@ -445,7 +444,6 @@ method readMessage*(sconn: NoiseConnection): Future[seq[byte]] {.async.} =
|
|||||||
dumpMessage(sconn, FlowDirection.Incoming, [])
|
dumpMessage(sconn, FlowDirection.Incoming, [])
|
||||||
trace "Received 0-length message", sconn
|
trace "Received 0-length message", sconn
|
||||||
|
|
||||||
|
|
||||||
proc encryptFrame(
|
proc encryptFrame(
|
||||||
sconn: NoiseConnection,
|
sconn: NoiseConnection,
|
||||||
cipherFrame: var openArray[byte],
|
cipherFrame: var openArray[byte],
|
||||||
@ -506,7 +504,7 @@ method write*(sconn: NoiseConnection, message: seq[byte]): Future[void] =
|
|||||||
# sequencing issues
|
# sequencing issues
|
||||||
sconn.stream.write(cipherFrames)
|
sconn.stream.write(cipherFrames)
|
||||||
|
|
||||||
method handshake*(p: Noise, conn: Connection, initiator: bool): Future[SecureConn] {.async.} =
|
method handshake*(p: Noise, conn: Connection, initiator: bool, peerId: Opt[PeerId]): Future[SecureConn] {.async.} =
|
||||||
trace "Starting Noise handshake", conn, initiator
|
trace "Starting Noise handshake", conn, initiator
|
||||||
|
|
||||||
let timeout = conn.timeout
|
let timeout = conn.timeout
|
||||||
@ -515,7 +513,7 @@ method handshake*(p: Noise, conn: Connection, initiator: bool): Future[SecureCon
|
|||||||
# https://github.com/libp2p/specs/tree/master/noise#libp2p-data-in-handshake-messages
|
# https://github.com/libp2p/specs/tree/master/noise#libp2p-data-in-handshake-messages
|
||||||
let
|
let
|
||||||
signedPayload = p.localPrivateKey.sign(
|
signedPayload = p.localPrivateKey.sign(
|
||||||
PayloadString.toBytes & p.noiseKeys.publicKey.getBytes).tryGet()
|
PayloadString & p.noiseKeys.publicKey.getBytes).tryGet()
|
||||||
|
|
||||||
var
|
var
|
||||||
libp2pProof = initProtoBuffer()
|
libp2pProof = initProtoBuffer()
|
||||||
@ -538,11 +536,9 @@ method handshake*(p: Noise, conn: Connection, initiator: bool): Future[SecureCon
|
|||||||
remoteSig: Signature
|
remoteSig: Signature
|
||||||
remoteSigBytes: seq[byte]
|
remoteSigBytes: seq[byte]
|
||||||
|
|
||||||
let r1 = remoteProof.getField(1, remotePubKeyBytes)
|
if not remoteProof.getField(1, remotePubKeyBytes).valueOr(false):
|
||||||
let r2 = remoteProof.getField(2, remoteSigBytes)
|
|
||||||
if r1.isErr() or not(r1.get()):
|
|
||||||
raise newException(NoiseHandshakeError, "Failed to deserialize remote public key bytes. (initiator: " & $initiator & ")")
|
raise newException(NoiseHandshakeError, "Failed to deserialize remote public key bytes. (initiator: " & $initiator & ")")
|
||||||
if r2.isErr() or not(r2.get()):
|
if not remoteProof.getField(2, remoteSigBytes).valueOr(false):
|
||||||
raise newException(NoiseHandshakeError, "Failed to deserialize remote signature bytes. (initiator: " & $initiator & ")")
|
raise newException(NoiseHandshakeError, "Failed to deserialize remote signature bytes. (initiator: " & $initiator & ")")
|
||||||
|
|
||||||
if not remotePubKey.init(remotePubKeyBytes):
|
if not remotePubKey.init(remotePubKeyBytes):
|
||||||
@ -550,33 +546,34 @@ method handshake*(p: Noise, conn: Connection, initiator: bool): Future[SecureCon
|
|||||||
if not remoteSig.init(remoteSigBytes):
|
if not remoteSig.init(remoteSigBytes):
|
||||||
raise newException(NoiseHandshakeError, "Failed to decode remote signature. (initiator: " & $initiator & ")")
|
raise newException(NoiseHandshakeError, "Failed to decode remote signature. (initiator: " & $initiator & ")")
|
||||||
|
|
||||||
let verifyPayload = PayloadString.toBytes & handshakeRes.rs.getBytes
|
let verifyPayload = PayloadString & handshakeRes.rs.getBytes
|
||||||
if not remoteSig.verify(verifyPayload, remotePubKey):
|
if not remoteSig.verify(verifyPayload, remotePubKey):
|
||||||
raise newException(NoiseHandshakeError, "Noise handshake signature verify failed.")
|
raise newException(NoiseHandshakeError, "Noise handshake signature verify failed.")
|
||||||
else:
|
else:
|
||||||
trace "Remote signature verified", conn
|
trace "Remote signature verified", conn
|
||||||
|
|
||||||
if initiator:
|
let pid = PeerId.init(remotePubKey).valueOr:
|
||||||
let pid = PeerId.init(remotePubKey)
|
raise newException(NoiseHandshakeError, "Invalid remote peer id: " & $error)
|
||||||
if not conn.peerId.validate():
|
|
||||||
raise newException(NoiseHandshakeError, "Failed to validate peerId.")
|
trace "Remote peer id", pid = $pid
|
||||||
if pid.isErr or pid.get() != conn.peerId:
|
|
||||||
|
if peerId.isSome():
|
||||||
|
let targetPid = peerId.get()
|
||||||
|
if not targetPid.validate():
|
||||||
|
raise newException(NoiseHandshakeError, "Failed to validate expected peerId.")
|
||||||
|
|
||||||
|
if pid != targetPid:
|
||||||
var
|
var
|
||||||
failedKey: PublicKey
|
failedKey: PublicKey
|
||||||
discard extractPublicKey(conn.peerId, failedKey)
|
discard extractPublicKey(targetPid, failedKey)
|
||||||
debug "Noise handshake, peer infos don't match!",
|
debug "Noise handshake, peer id doesn't match!",
|
||||||
initiator, dealt_peer = conn,
|
initiator, dealt_peer = conn,
|
||||||
dealt_key = $failedKey, received_peer = $pid,
|
dealt_key = $failedKey, received_peer = $pid,
|
||||||
received_key = $remotePubKey
|
received_key = $remotePubKey
|
||||||
raise newException(NoiseHandshakeError, "Noise handshake, peer infos don't match! " & $pid & " != " & $conn.peerId)
|
raise newException(NoiseHandshakeError, "Noise handshake, peer id don't match! " & $pid & " != " & $targetPid)
|
||||||
else:
|
conn.peerId = pid
|
||||||
let pid = PeerId.init(remotePubKey)
|
|
||||||
if pid.isErr:
|
|
||||||
raise newException(NoiseHandshakeError, "Invalid remote peer id")
|
|
||||||
conn.peerId = pid.get()
|
|
||||||
|
|
||||||
var tmp = NoiseConnection.new(conn, conn.peerId, conn.observedAddr)
|
var tmp = NoiseConnection.new(conn, conn.peerId, conn.observedAddr)
|
||||||
|
|
||||||
if initiator:
|
if initiator:
|
||||||
tmp.readCs = handshakeRes.cs2
|
tmp.readCs = handshakeRes.cs2
|
||||||
tmp.writeCs = handshakeRes.cs1
|
tmp.writeCs = handshakeRes.cs1
|
||||||
|
@ -291,7 +291,7 @@ proc transactMessage(conn: Connection,
|
|||||||
await conn.write(msg)
|
await conn.write(msg)
|
||||||
return await conn.readRawMessage()
|
return await conn.readRawMessage()
|
||||||
|
|
||||||
method handshake*(s: Secio, conn: Connection, initiator: bool = false): Future[SecureConn] {.async.} =
|
method handshake*(s: Secio, conn: Connection, initiator: bool, peerId: Opt[PeerId]): Future[SecureConn] {.async.} =
|
||||||
var
|
var
|
||||||
localNonce: array[SecioNonceSize, byte]
|
localNonce: array[SecioNonceSize, byte]
|
||||||
remoteNonce: seq[byte]
|
remoteNonce: seq[byte]
|
||||||
@ -342,9 +342,14 @@ method handshake*(s: Secio, conn: Connection, initiator: bool = false): Future[S
|
|||||||
|
|
||||||
remotePeerId = PeerId.init(remotePubkey).tryGet()
|
remotePeerId = PeerId.init(remotePubkey).tryGet()
|
||||||
|
|
||||||
# TODO: PeerId check against supplied PeerId
|
if peerId.isSome():
|
||||||
if not initiator:
|
let targetPid = peerId.get()
|
||||||
conn.peerId = remotePeerId
|
if not targetPid.validate():
|
||||||
|
raise newException(SecioError, "Failed to validate expected peerId.")
|
||||||
|
|
||||||
|
if remotePeerId != targetPid:
|
||||||
|
raise newException(SecioError, "Peer ids don't match!")
|
||||||
|
conn.peerId = remotePeerId
|
||||||
let order = getOrder(remoteBytesPubkey, localNonce, localBytesPubkey,
|
let order = getOrder(remoteBytesPubkey, localNonce, localBytesPubkey,
|
||||||
remoteNonce).tryGet()
|
remoteNonce).tryGet()
|
||||||
trace "Remote proposal", schemes = remoteExchanges, ciphers = remoteCiphers,
|
trace "Remote proposal", schemes = remoteExchanges, ciphers = remoteCiphers,
|
||||||
|
@ -7,12 +7,14 @@
|
|||||||
# This file may not be copied, modified, or distributed except according to
|
# This file may not be copied, modified, or distributed except according to
|
||||||
# those terms.
|
# those terms.
|
||||||
|
|
||||||
|
{.push gcsafe.}
|
||||||
when (NimMajor, NimMinor) < (1, 4):
|
when (NimMajor, NimMinor) < (1, 4):
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
else:
|
else:
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import std/[strformat]
|
import std/[strformat]
|
||||||
|
import stew/results
|
||||||
import chronos, chronicles
|
import chronos, chronicles
|
||||||
import ../protocol,
|
import ../protocol,
|
||||||
../../stream/streamseq,
|
../../stream/streamseq,
|
||||||
@ -21,7 +23,7 @@ import ../protocol,
|
|||||||
../../peerinfo,
|
../../peerinfo,
|
||||||
../../errors
|
../../errors
|
||||||
|
|
||||||
export protocol
|
export protocol, results
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "libp2p secure"
|
topics = "libp2p secure"
|
||||||
@ -48,7 +50,7 @@ chronicles.formatIt(SecureConn): shortLog(it)
|
|||||||
proc new*(T: type SecureConn,
|
proc new*(T: type SecureConn,
|
||||||
conn: Connection,
|
conn: Connection,
|
||||||
peerId: PeerId,
|
peerId: PeerId,
|
||||||
observedAddr: MultiAddress,
|
observedAddr: Opt[MultiAddress],
|
||||||
timeout: Duration = DefaultConnectionTimeout): T =
|
timeout: Duration = DefaultConnectionTimeout): T =
|
||||||
result = T(stream: conn,
|
result = T(stream: conn,
|
||||||
peerId: peerId,
|
peerId: peerId,
|
||||||
@ -79,13 +81,15 @@ method getWrapped*(s: SecureConn): Connection = s.stream
|
|||||||
|
|
||||||
method handshake*(s: Secure,
|
method handshake*(s: Secure,
|
||||||
conn: Connection,
|
conn: Connection,
|
||||||
initiator: bool): Future[SecureConn] {.async, base.} =
|
initiator: bool,
|
||||||
|
peerId: Opt[PeerId]): Future[SecureConn] {.async, base.} =
|
||||||
doAssert(false, "Not implemented!")
|
doAssert(false, "Not implemented!")
|
||||||
|
|
||||||
proc handleConn(s: Secure,
|
proc handleConn(s: Secure,
|
||||||
conn: Connection,
|
conn: Connection,
|
||||||
initiator: bool): Future[Connection] {.async.} =
|
initiator: bool,
|
||||||
var sconn = await s.handshake(conn, initiator)
|
peerId: Opt[PeerId]): Future[Connection] {.async.} =
|
||||||
|
var sconn = await s.handshake(conn, initiator, peerId)
|
||||||
# mark connection bottom level transport direction
|
# mark connection bottom level transport direction
|
||||||
# this is the safest place to do this
|
# this is the safest place to do this
|
||||||
# we require this information in for example gossipsub
|
# we require this information in for example gossipsub
|
||||||
@ -121,7 +125,7 @@ method init*(s: Secure) =
|
|||||||
try:
|
try:
|
||||||
# We don't need the result but we
|
# We don't need the result but we
|
||||||
# definitely need to await the handshake
|
# definitely need to await the handshake
|
||||||
discard await s.handleConn(conn, false)
|
discard await s.handleConn(conn, false, Opt.none(PeerId))
|
||||||
trace "connection secured", conn
|
trace "connection secured", conn
|
||||||
except CancelledError as exc:
|
except CancelledError as exc:
|
||||||
warn "securing connection canceled", conn
|
warn "securing connection canceled", conn
|
||||||
@ -135,9 +139,10 @@ method init*(s: Secure) =
|
|||||||
|
|
||||||
method secure*(s: Secure,
|
method secure*(s: Secure,
|
||||||
conn: Connection,
|
conn: Connection,
|
||||||
initiator: bool):
|
initiator: bool,
|
||||||
|
peerId: Opt[PeerId]):
|
||||||
Future[Connection] {.base.} =
|
Future[Connection] {.base.} =
|
||||||
s.handleConn(conn, initiator)
|
s.handleConn(conn, initiator, peerId)
|
||||||
|
|
||||||
method readOnce*(s: SecureConn,
|
method readOnce*(s: SecureConn,
|
||||||
pbytes: pointer,
|
pbytes: pointer,
|
||||||
|
159
libp2p/services/autonatservice.nim
Normal file
159
libp2p/services/autonatservice.nim
Normal file
@ -0,0 +1,159 @@
|
|||||||
|
# Nim-LibP2P
|
||||||
|
# Copyright (c) 2022 Status Research & Development GmbH
|
||||||
|
# Licensed under either of
|
||||||
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||||
|
# at your option.
|
||||||
|
# This file may not be copied, modified, or distributed except according to
|
||||||
|
# those terms.
|
||||||
|
|
||||||
|
when (NimMajor, NimMinor) < (1, 4):
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
else:
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import std/[options, deques, sequtils]
|
||||||
|
import chronos, metrics
|
||||||
|
import ../switch
|
||||||
|
import ../protocols/[connectivity/autonat]
|
||||||
|
import ../utils/heartbeat
|
||||||
|
import ../crypto/crypto
|
||||||
|
|
||||||
|
declarePublicGauge(libp2p_autonat_reachability_confidence, "autonat reachability confidence", labels = ["reachability"])
|
||||||
|
|
||||||
|
type
|
||||||
|
AutonatService* = ref object of Service
|
||||||
|
newConnectedPeerHandler: PeerEventHandler
|
||||||
|
scheduleHandle: Future[void]
|
||||||
|
networkReachability: NetworkReachability
|
||||||
|
confidence: Option[float]
|
||||||
|
answers: Deque[NetworkReachability]
|
||||||
|
autonat: Autonat
|
||||||
|
statusAndConfidenceHandler: StatusAndConfidenceHandler
|
||||||
|
rng: ref HmacDrbgContext
|
||||||
|
scheduleInterval: Option[Duration]
|
||||||
|
askNewConnectedPeers: bool
|
||||||
|
numPeersToAsk: int
|
||||||
|
maxQueueSize: int
|
||||||
|
minConfidence: float
|
||||||
|
dialTimeout: Duration
|
||||||
|
|
||||||
|
NetworkReachability* {.pure.} = enum
|
||||||
|
NotReachable, Reachable, Unknown
|
||||||
|
|
||||||
|
StatusAndConfidenceHandler* = proc (networkReachability: NetworkReachability, confidence: Option[float]): Future[void] {.gcsafe, raises: [Defect].}
|
||||||
|
|
||||||
|
proc new*(
|
||||||
|
T: typedesc[AutonatService],
|
||||||
|
autonat: Autonat,
|
||||||
|
rng: ref HmacDrbgContext,
|
||||||
|
scheduleInterval: Option[Duration] = none(Duration),
|
||||||
|
askNewConnectedPeers = true,
|
||||||
|
numPeersToAsk: int = 5,
|
||||||
|
maxQueueSize: int = 10,
|
||||||
|
minConfidence: float = 0.3,
|
||||||
|
dialTimeout = 30.seconds): T =
|
||||||
|
return T(
|
||||||
|
scheduleInterval: scheduleInterval,
|
||||||
|
networkReachability: Unknown,
|
||||||
|
confidence: none(float),
|
||||||
|
answers: initDeque[NetworkReachability](),
|
||||||
|
autonat: autonat,
|
||||||
|
rng: rng,
|
||||||
|
askNewConnectedPeers: askNewConnectedPeers,
|
||||||
|
numPeersToAsk: numPeersToAsk,
|
||||||
|
maxQueueSize: maxQueueSize,
|
||||||
|
minConfidence: minConfidence,
|
||||||
|
dialTimeout: dialTimeout)
|
||||||
|
|
||||||
|
proc networkReachability*(self: AutonatService): NetworkReachability {.inline.} =
|
||||||
|
return self.networkReachability
|
||||||
|
|
||||||
|
proc callHandler(self: AutonatService) {.async.} =
|
||||||
|
if not isNil(self.statusAndConfidenceHandler):
|
||||||
|
await self.statusAndConfidenceHandler(self.networkReachability, self.confidence)
|
||||||
|
|
||||||
|
proc handleAnswer(self: AutonatService, ans: NetworkReachability) {.async.} =
|
||||||
|
|
||||||
|
if ans == Unknown:
|
||||||
|
return
|
||||||
|
|
||||||
|
if self.answers.len == self.maxQueueSize:
|
||||||
|
self.answers.popFirst()
|
||||||
|
self.answers.addLast(ans)
|
||||||
|
|
||||||
|
self.networkReachability = Unknown
|
||||||
|
self.confidence = none(float)
|
||||||
|
const reachabilityPriority = [Reachable, NotReachable]
|
||||||
|
for reachability in reachabilityPriority:
|
||||||
|
let confidence = self.answers.countIt(it == reachability) / self.maxQueueSize
|
||||||
|
libp2p_autonat_reachability_confidence.set(value = confidence, labelValues = [$reachability])
|
||||||
|
if self.confidence.isNone and confidence >= self.minConfidence:
|
||||||
|
self.networkReachability = reachability
|
||||||
|
self.confidence = some(confidence)
|
||||||
|
|
||||||
|
trace "Current status", currentStats = $self.networkReachability, confidence = $self.confidence
|
||||||
|
|
||||||
|
proc askPeer(self: AutonatService, s: Switch, peerId: PeerId): Future[NetworkReachability] {.async.} =
|
||||||
|
trace "Asking for reachability", peerId = $peerId
|
||||||
|
let ans =
|
||||||
|
try:
|
||||||
|
discard await self.autonat.dialMe(peerId).wait(self.dialTimeout)
|
||||||
|
Reachable
|
||||||
|
except AutonatUnreachableError:
|
||||||
|
trace "dialMe answer is not reachable", peerId = $peerId
|
||||||
|
NotReachable
|
||||||
|
except AsyncTimeoutError:
|
||||||
|
trace "dialMe timed out", peerId = $peerId
|
||||||
|
Unknown
|
||||||
|
except CatchableError as err:
|
||||||
|
trace "dialMe unexpected error", peerId = $peerId, errMsg = $err.msg
|
||||||
|
Unknown
|
||||||
|
await self.handleAnswer(ans)
|
||||||
|
if not isNil(self.statusAndConfidenceHandler):
|
||||||
|
await self.statusAndConfidenceHandler(self.networkReachability, self.confidence)
|
||||||
|
return ans
|
||||||
|
|
||||||
|
proc askConnectedPeers(self: AutonatService, switch: Switch) {.async.} =
|
||||||
|
var peers = switch.connectedPeers(Direction.Out)
|
||||||
|
self.rng.shuffle(peers)
|
||||||
|
var answersFromPeers = 0
|
||||||
|
for peer in peers:
|
||||||
|
if answersFromPeers >= self.numPeersToAsk:
|
||||||
|
break
|
||||||
|
elif (await askPeer(self, switch, peer)) != Unknown:
|
||||||
|
answersFromPeers.inc()
|
||||||
|
|
||||||
|
proc schedule(service: AutonatService, switch: Switch, interval: Duration) {.async.} =
|
||||||
|
heartbeat "Schedule AutonatService run", interval:
|
||||||
|
await service.run(switch)
|
||||||
|
|
||||||
|
method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
|
||||||
|
let hasBeenSetup = await procCall Service(self).setup(switch)
|
||||||
|
if hasBeenSetup:
|
||||||
|
if self.askNewConnectedPeers:
|
||||||
|
self.newConnectedPeerHandler = proc (peerId: PeerId, event: PeerEvent): Future[void] {.async.} =
|
||||||
|
discard askPeer(self, switch, peerId)
|
||||||
|
await self.callHandler()
|
||||||
|
switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
|
||||||
|
if self.scheduleInterval.isSome():
|
||||||
|
self.scheduleHandle = schedule(self, switch, self.scheduleInterval.get())
|
||||||
|
return hasBeenSetup
|
||||||
|
|
||||||
|
method run*(self: AutonatService, switch: Switch) {.async, public.} =
|
||||||
|
await askConnectedPeers(self, switch)
|
||||||
|
await self.callHandler()
|
||||||
|
|
||||||
|
|
||||||
|
method stop*(self: AutonatService, switch: Switch): Future[bool] {.async, public.} =
|
||||||
|
let hasBeenStopped = await procCall Service(self).stop(switch)
|
||||||
|
if hasBeenStopped:
|
||||||
|
if not isNil(self.scheduleHandle):
|
||||||
|
self.scheduleHandle.cancel()
|
||||||
|
self.scheduleHandle = nil
|
||||||
|
if not isNil(self.newConnectedPeerHandler):
|
||||||
|
switch.connManager.removePeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
|
||||||
|
return hasBeenStopped
|
||||||
|
|
||||||
|
proc statusAndConfidenceHandler*(self: AutonatService, statusAndConfidenceHandler: StatusAndConfidenceHandler) =
|
||||||
|
self.statusAndConfidenceHandler = statusAndConfidenceHandler
|
@ -79,7 +79,7 @@ method pushData*(s: BufferStream, data: seq[byte]) {.base, async.} =
|
|||||||
&"Only one concurrent push allowed for stream {s.shortLog()}")
|
&"Only one concurrent push allowed for stream {s.shortLog()}")
|
||||||
|
|
||||||
if s.isClosed or s.pushedEof:
|
if s.isClosed or s.pushedEof:
|
||||||
raise newLPStreamEOFError()
|
raise newLPStreamClosedError()
|
||||||
|
|
||||||
if data.len == 0:
|
if data.len == 0:
|
||||||
return # Don't push 0-length buffers, these signal EOF
|
return # Don't push 0-length buffers, these signal EOF
|
||||||
|
@ -13,10 +13,13 @@ else:
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import std/[oids, strformat]
|
import std/[oids, strformat]
|
||||||
|
import stew/results
|
||||||
import chronos, chronicles, metrics
|
import chronos, chronicles, metrics
|
||||||
import connection
|
import connection
|
||||||
import ../utility
|
import ../utility
|
||||||
|
|
||||||
|
export results
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "libp2p chronosstream"
|
topics = "libp2p chronosstream"
|
||||||
|
|
||||||
@ -60,7 +63,7 @@ proc init*(C: type ChronosStream,
|
|||||||
client: StreamTransport,
|
client: StreamTransport,
|
||||||
dir: Direction,
|
dir: Direction,
|
||||||
timeout = DefaultChronosStreamTimeout,
|
timeout = DefaultChronosStreamTimeout,
|
||||||
observedAddr: MultiAddress = MultiAddress()): ChronosStream =
|
observedAddr: Opt[MultiAddress]): ChronosStream =
|
||||||
result = C(client: client,
|
result = C(client: client,
|
||||||
timeout: timeout,
|
timeout: timeout,
|
||||||
dir: dir,
|
dir: dir,
|
||||||
@ -127,6 +130,9 @@ proc completeWrite(
|
|||||||
method write*(s: ChronosStream, msg: seq[byte]): Future[void] =
|
method write*(s: ChronosStream, msg: seq[byte]): Future[void] =
|
||||||
# Avoid a copy of msg being kept in the closure created by `{.async.}` as this
|
# Avoid a copy of msg being kept in the closure created by `{.async.}` as this
|
||||||
# drives up memory usage
|
# drives up memory usage
|
||||||
|
if msg.len == 0:
|
||||||
|
trace "Empty byte seq, nothing to write"
|
||||||
|
return
|
||||||
if s.closed:
|
if s.closed:
|
||||||
let fut = newFuture[void]("chronosstream.write.closed")
|
let fut = newFuture[void]("chronosstream.write.closed")
|
||||||
fut.fail(newLPStreamClosedError())
|
fut.fail(newLPStreamClosedError())
|
||||||
|
@ -13,13 +13,14 @@ else:
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import std/[hashes, oids, strformat]
|
import std/[hashes, oids, strformat]
|
||||||
|
import stew/results
|
||||||
import chronicles, chronos, metrics
|
import chronicles, chronos, metrics
|
||||||
import lpstream,
|
import lpstream,
|
||||||
../multiaddress,
|
../multiaddress,
|
||||||
../peerinfo,
|
../peerinfo,
|
||||||
../errors
|
../errors
|
||||||
|
|
||||||
export lpstream, peerinfo, errors
|
export lpstream, peerinfo, errors, results
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "libp2p connection"
|
topics = "libp2p connection"
|
||||||
@ -37,7 +38,7 @@ type
|
|||||||
timerTaskFut: Future[void] # the current timer instance
|
timerTaskFut: Future[void] # the current timer instance
|
||||||
timeoutHandler*: TimeoutHandler # timeout handler
|
timeoutHandler*: TimeoutHandler # timeout handler
|
||||||
peerId*: PeerId
|
peerId*: PeerId
|
||||||
observedAddr*: MultiAddress
|
observedAddr*: Opt[MultiAddress]
|
||||||
upgraded*: Future[void]
|
upgraded*: Future[void]
|
||||||
protocol*: string # protocol used by the connection, used as tag for metrics
|
protocol*: string # protocol used by the connection, used as tag for metrics
|
||||||
transportDir*: Direction # The bottom level transport (generally the socket) direction
|
transportDir*: Direction # The bottom level transport (generally the socket) direction
|
||||||
@ -160,9 +161,9 @@ method getWrapped*(s: Connection): Connection {.base.} =
|
|||||||
proc new*(C: type Connection,
|
proc new*(C: type Connection,
|
||||||
peerId: PeerId,
|
peerId: PeerId,
|
||||||
dir: Direction,
|
dir: Direction,
|
||||||
|
observedAddr: Opt[MultiAddress],
|
||||||
timeout: Duration = DefaultConnectionTimeout,
|
timeout: Duration = DefaultConnectionTimeout,
|
||||||
timeoutHandler: TimeoutHandler = nil,
|
timeoutHandler: TimeoutHandler = nil): Connection =
|
||||||
observedAddr: MultiAddress = MultiAddress()): Connection =
|
|
||||||
result = C(peerId: peerId,
|
result = C(peerId: peerId,
|
||||||
dir: dir,
|
dir: dir,
|
||||||
timeout: timeout,
|
timeout: timeout,
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
|
|
||||||
## Length Prefixed stream implementation
|
## Length Prefixed stream implementation
|
||||||
|
|
||||||
|
{.push gcsafe.}
|
||||||
when (NimMajor, NimMinor) < (1, 4):
|
when (NimMajor, NimMinor) < (1, 4):
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
else:
|
else:
|
||||||
@ -59,7 +60,18 @@ type
|
|||||||
LPStreamWriteError* = object of LPStreamError
|
LPStreamWriteError* = object of LPStreamError
|
||||||
par*: ref CatchableError
|
par*: ref CatchableError
|
||||||
LPStreamEOFError* = object of LPStreamError
|
LPStreamEOFError* = object of LPStreamError
|
||||||
LPStreamClosedError* = object of LPStreamError
|
|
||||||
|
# X | Read | Write
|
||||||
|
# Local close | Works | LPStreamClosedError
|
||||||
|
# Remote close | LPStreamRemoteClosedError | Works
|
||||||
|
# Local reset | LPStreamClosedError | LPStreamClosedError
|
||||||
|
# Remote reset | LPStreamResetError | LPStreamResetError
|
||||||
|
# Connection down | LPStreamConnDown | LPStreamConnDownError
|
||||||
|
|
||||||
|
LPStreamResetError* = object of LPStreamEOFError
|
||||||
|
LPStreamClosedError* = object of LPStreamEOFError
|
||||||
|
LPStreamRemoteClosedError* = object of LPStreamEOFError
|
||||||
|
LPStreamConnDownError* = object of LPStreamEOFError
|
||||||
|
|
||||||
InvalidVarintError* = object of LPStreamError
|
InvalidVarintError* = object of LPStreamError
|
||||||
MaxSizeError* = object of LPStreamError
|
MaxSizeError* = object of LPStreamError
|
||||||
@ -68,7 +80,7 @@ type
|
|||||||
opened*: uint64
|
opened*: uint64
|
||||||
closed*: uint64
|
closed*: uint64
|
||||||
|
|
||||||
proc setupStreamTracker(name: string): StreamTracker =
|
proc setupStreamTracker*(name: string): StreamTracker =
|
||||||
let tracker = new StreamTracker
|
let tracker = new StreamTracker
|
||||||
|
|
||||||
proc dumpTracking(): string {.gcsafe.} =
|
proc dumpTracking(): string {.gcsafe.} =
|
||||||
@ -119,9 +131,22 @@ proc newLPStreamIncorrectDefect*(m: string): ref LPStreamIncorrectDefect =
|
|||||||
proc newLPStreamEOFError*(): ref LPStreamEOFError =
|
proc newLPStreamEOFError*(): ref LPStreamEOFError =
|
||||||
result = newException(LPStreamEOFError, "Stream EOF!")
|
result = newException(LPStreamEOFError, "Stream EOF!")
|
||||||
|
|
||||||
|
proc newLPStreamResetError*(): ref LPStreamResetError =
|
||||||
|
result = newException(LPStreamResetError, "Stream Reset!")
|
||||||
|
|
||||||
proc newLPStreamClosedError*(): ref LPStreamClosedError =
|
proc newLPStreamClosedError*(): ref LPStreamClosedError =
|
||||||
result = newException(LPStreamClosedError, "Stream Closed!")
|
result = newException(LPStreamClosedError, "Stream Closed!")
|
||||||
|
|
||||||
|
proc newLPStreamRemoteClosedError*(): ref LPStreamRemoteClosedError =
|
||||||
|
result = newException(LPStreamRemoteClosedError, "Stream Remotely Closed!")
|
||||||
|
|
||||||
|
proc newLPStreamConnDownError*(
|
||||||
|
parentException: ref Exception = nil): ref LPStreamConnDownError =
|
||||||
|
result = newException(
|
||||||
|
LPStreamConnDownError,
|
||||||
|
"Stream Underlying Connection Closed!",
|
||||||
|
parentException)
|
||||||
|
|
||||||
func shortLog*(s: LPStream): auto =
|
func shortLog*(s: LPStream): auto =
|
||||||
if s.isNil: "LPStream(nil)"
|
if s.isNil: "LPStream(nil)"
|
||||||
else: $s.oid
|
else: $s.oid
|
||||||
@ -165,6 +190,8 @@ proc readExactly*(s: LPStream,
|
|||||||
## Waits for `nbytes` to be available, then read
|
## Waits for `nbytes` to be available, then read
|
||||||
## them and return them
|
## them and return them
|
||||||
if s.atEof:
|
if s.atEof:
|
||||||
|
var ch: char
|
||||||
|
discard await s.readOnce(addr ch, 1)
|
||||||
raise newLPStreamEOFError()
|
raise newLPStreamEOFError()
|
||||||
|
|
||||||
if nbytes == 0:
|
if nbytes == 0:
|
||||||
@ -183,6 +210,10 @@ proc readExactly*(s: LPStream,
|
|||||||
if read == 0:
|
if read == 0:
|
||||||
doAssert s.atEof()
|
doAssert s.atEof()
|
||||||
trace "couldn't read all bytes, stream EOF", s, nbytes, read
|
trace "couldn't read all bytes, stream EOF", s, nbytes, read
|
||||||
|
# Re-readOnce to raise a more specific error than EOF
|
||||||
|
# Raise EOF if it doesn't raise anything(shouldn't happen)
|
||||||
|
discard await s.readOnce(addr pbuffer[read], nbytes - read)
|
||||||
|
warn "Read twice while at EOF"
|
||||||
raise newLPStreamEOFError()
|
raise newLPStreamEOFError()
|
||||||
|
|
||||||
if read < nbytes:
|
if read < nbytes:
|
||||||
@ -200,8 +231,7 @@ proc readLine*(s: LPStream,
|
|||||||
|
|
||||||
while true:
|
while true:
|
||||||
var ch: char
|
var ch: char
|
||||||
if (await readOnce(s, addr ch, 1)) == 0:
|
await readExactly(s, addr ch, 1)
|
||||||
raise newLPStreamEOFError()
|
|
||||||
|
|
||||||
if sep[state] == ch:
|
if sep[state] == ch:
|
||||||
inc(state)
|
inc(state)
|
||||||
@ -224,8 +254,7 @@ proc readVarint*(conn: LPStream): Future[uint64] {.async, gcsafe, public.} =
|
|||||||
buffer: array[10, byte]
|
buffer: array[10, byte]
|
||||||
|
|
||||||
for i in 0..<len(buffer):
|
for i in 0..<len(buffer):
|
||||||
if (await conn.readOnce(addr buffer[i], 1)) == 0:
|
await conn.readExactly(addr buffer[i], 1)
|
||||||
raise newLPStreamEOFError()
|
|
||||||
|
|
||||||
var
|
var
|
||||||
varint: uint64
|
varint: uint64
|
||||||
|
@ -74,6 +74,28 @@ type
|
|||||||
peerStore*: PeerStore
|
peerStore*: PeerStore
|
||||||
nameResolver*: NameResolver
|
nameResolver*: NameResolver
|
||||||
started: bool
|
started: bool
|
||||||
|
services*: seq[Service]
|
||||||
|
|
||||||
|
Service* = ref object of RootObj
|
||||||
|
inUse: bool
|
||||||
|
|
||||||
|
|
||||||
|
method setup*(self: Service, switch: Switch): Future[bool] {.base, async, gcsafe.} =
|
||||||
|
if self.inUse:
|
||||||
|
warn "service setup has already been called"
|
||||||
|
return false
|
||||||
|
self.inUse = true
|
||||||
|
return true
|
||||||
|
|
||||||
|
method run*(self: Service, switch: Switch) {.base, async, gcsafe.} =
|
||||||
|
doAssert(false, "Not implemented!")
|
||||||
|
|
||||||
|
method stop*(self: Service, switch: Switch): Future[bool] {.base, async, gcsafe.} =
|
||||||
|
if not self.inUse:
|
||||||
|
warn "service is already stopped"
|
||||||
|
return false
|
||||||
|
self.inUse = false
|
||||||
|
return true
|
||||||
|
|
||||||
proc addConnEventHandler*(s: Switch,
|
proc addConnEventHandler*(s: Switch,
|
||||||
handler: ConnEventHandler,
|
handler: ConnEventHandler,
|
||||||
@ -108,6 +130,9 @@ method addTransport*(s: Switch, t: Transport) =
|
|||||||
s.transports &= t
|
s.transports &= t
|
||||||
s.dialer.addTransport(t)
|
s.dialer.addTransport(t)
|
||||||
|
|
||||||
|
proc connectedPeers*(s: Switch, dir: Direction): seq[PeerId] =
|
||||||
|
s.connManager.connectedPeers(dir)
|
||||||
|
|
||||||
proc isConnected*(s: Switch, peerId: PeerId): bool {.public.} =
|
proc isConnected*(s: Switch, peerId: PeerId): bool {.public.} =
|
||||||
## returns true if the peer has one or more
|
## returns true if the peer has one or more
|
||||||
## associated connections
|
## associated connections
|
||||||
@ -128,6 +153,18 @@ method connect*(
|
|||||||
|
|
||||||
s.dialer.connect(peerId, addrs, forceDial)
|
s.dialer.connect(peerId, addrs, forceDial)
|
||||||
|
|
||||||
|
method connect*(
|
||||||
|
s: Switch,
|
||||||
|
address: MultiAddress,
|
||||||
|
allowUnknownPeerId = false): Future[PeerId] =
|
||||||
|
## Connects to a peer and retrieve its PeerId
|
||||||
|
##
|
||||||
|
## If the P2P part is missing from the MA and `allowUnknownPeerId` is set
|
||||||
|
## to true, this will discover the PeerId while connecting. This exposes
|
||||||
|
## you to MiTM attacks, so it shouldn't be used without care!
|
||||||
|
|
||||||
|
s.dialer.connect(address, allowUnknownPeerId)
|
||||||
|
|
||||||
method dial*(
|
method dial*(
|
||||||
s: Switch,
|
s: Switch,
|
||||||
peerId: PeerId,
|
peerId: PeerId,
|
||||||
@ -282,6 +319,9 @@ proc stop*(s: Switch) {.async, public.} =
|
|||||||
if not a.finished:
|
if not a.finished:
|
||||||
a.cancel()
|
a.cancel()
|
||||||
|
|
||||||
|
for service in s.services:
|
||||||
|
discard await service.stop(s)
|
||||||
|
|
||||||
await s.ms.stop()
|
await s.ms.stop()
|
||||||
|
|
||||||
trace "Switch stopped"
|
trace "Switch stopped"
|
||||||
@ -289,14 +329,18 @@ proc stop*(s: Switch) {.async, public.} =
|
|||||||
proc start*(s: Switch) {.async, gcsafe, public.} =
|
proc start*(s: Switch) {.async, gcsafe, public.} =
|
||||||
## Start listening on every transport
|
## Start listening on every transport
|
||||||
|
|
||||||
|
if s.started:
|
||||||
|
warn "Switch has already been started"
|
||||||
|
return
|
||||||
|
|
||||||
trace "starting switch for peer", peerInfo = s.peerInfo
|
trace "starting switch for peer", peerInfo = s.peerInfo
|
||||||
var startFuts: seq[Future[void]]
|
var startFuts: seq[Future[void]]
|
||||||
for t in s.transports:
|
for t in s.transports:
|
||||||
let addrs = s.peerInfo.addrs.filterIt(
|
let addrs = s.peerInfo.listenAddrs.filterIt(
|
||||||
t.handles(it)
|
t.handles(it)
|
||||||
)
|
)
|
||||||
|
|
||||||
s.peerInfo.addrs.keepItIf(
|
s.peerInfo.listenAddrs.keepItIf(
|
||||||
it notin addrs
|
it notin addrs
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -305,22 +349,23 @@ proc start*(s: Switch) {.async, gcsafe, public.} =
|
|||||||
|
|
||||||
await allFutures(startFuts)
|
await allFutures(startFuts)
|
||||||
|
|
||||||
for s in startFuts:
|
for fut in startFuts:
|
||||||
if s.failed:
|
if fut.failed:
|
||||||
# TODO: replace this exception with a `listenError` callback. See
|
await s.stop()
|
||||||
# https://github.com/status-im/nim-libp2p/pull/662 for more info.
|
raise fut.error
|
||||||
raise newException(transport.TransportError,
|
|
||||||
"Failed to start one transport", s.error)
|
|
||||||
|
|
||||||
for t in s.transports: # for each transport
|
for t in s.transports: # for each transport
|
||||||
if t.addrs.len > 0 or t.running:
|
if t.addrs.len > 0 or t.running:
|
||||||
s.acceptFuts.add(s.accept(t))
|
s.acceptFuts.add(s.accept(t))
|
||||||
s.peerInfo.addrs &= t.addrs
|
s.peerInfo.listenAddrs &= t.addrs
|
||||||
|
|
||||||
s.peerInfo.update()
|
await s.peerInfo.update()
|
||||||
|
|
||||||
await s.ms.start()
|
await s.ms.start()
|
||||||
|
|
||||||
|
for service in s.services:
|
||||||
|
discard await service.setup(s)
|
||||||
|
|
||||||
s.started = true
|
s.started = true
|
||||||
|
|
||||||
debug "Started libp2p node", peer = s.peerInfo
|
debug "Started libp2p node", peer = s.peerInfo
|
||||||
@ -332,7 +377,8 @@ proc newSwitch*(peerInfo: PeerInfo,
|
|||||||
connManager: ConnManager,
|
connManager: ConnManager,
|
||||||
ms: MultistreamSelect,
|
ms: MultistreamSelect,
|
||||||
nameResolver: NameResolver = nil,
|
nameResolver: NameResolver = nil,
|
||||||
peerStore = PeerStore.new()): Switch
|
peerStore = PeerStore.new(),
|
||||||
|
services = newSeq[Service]()): Switch
|
||||||
{.raises: [Defect, LPError], public.} =
|
{.raises: [Defect, LPError], public.} =
|
||||||
if secureManagers.len == 0:
|
if secureManagers.len == 0:
|
||||||
raise newException(LPError, "Provide at least one secure manager")
|
raise newException(LPError, "Provide at least one secure manager")
|
||||||
@ -344,8 +390,10 @@ proc newSwitch*(peerInfo: PeerInfo,
|
|||||||
connManager: connManager,
|
connManager: connManager,
|
||||||
peerStore: peerStore,
|
peerStore: peerStore,
|
||||||
dialer: Dialer.new(peerInfo.peerId, connManager, transports, ms, nameResolver),
|
dialer: Dialer.new(peerInfo.peerId, connManager, transports, ms, nameResolver),
|
||||||
nameResolver: nameResolver)
|
nameResolver: nameResolver,
|
||||||
|
services: services)
|
||||||
|
|
||||||
switch.connManager.peerStore = peerStore
|
switch.connManager.peerStore = peerStore
|
||||||
switch.mount(identity)
|
switch.mount(identity)
|
||||||
|
|
||||||
return switch
|
return switch
|
||||||
|
@ -15,6 +15,7 @@ else:
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import std/[oids, sequtils]
|
import std/[oids, sequtils]
|
||||||
|
import stew/results
|
||||||
import chronos, chronicles
|
import chronos, chronicles
|
||||||
import transport,
|
import transport,
|
||||||
../errors,
|
../errors,
|
||||||
@ -31,7 +32,7 @@ import transport,
|
|||||||
logScope:
|
logScope:
|
||||||
topics = "libp2p tcptransport"
|
topics = "libp2p tcptransport"
|
||||||
|
|
||||||
export transport
|
export transport, results
|
||||||
|
|
||||||
const
|
const
|
||||||
TcpTransportTrackerName* = "libp2p.tcptransport"
|
TcpTransportTrackerName* = "libp2p.tcptransport"
|
||||||
@ -71,18 +72,20 @@ proc setupTcpTransportTracker(): TcpTransportTracker =
|
|||||||
result.isLeaked = leakTransport
|
result.isLeaked = leakTransport
|
||||||
addTracker(TcpTransportTrackerName, result)
|
addTracker(TcpTransportTrackerName, result)
|
||||||
|
|
||||||
proc connHandler*(self: TcpTransport,
|
proc getObservedAddr(client: StreamTransport): Future[MultiAddress] {.async.} =
|
||||||
client: StreamTransport,
|
|
||||||
dir: Direction): Future[Connection] {.async.} =
|
|
||||||
var observedAddr: MultiAddress = MultiAddress()
|
|
||||||
try:
|
try:
|
||||||
observedAddr = MultiAddress.init(client.remoteAddress).tryGet()
|
return MultiAddress.init(client.remoteAddress).tryGet()
|
||||||
except CatchableError as exc:
|
except CatchableError as exc:
|
||||||
trace "Failed to create observedAddr", exc = exc.msg
|
trace "Failed to create observedAddr", exc = exc.msg
|
||||||
if not(isNil(client) and client.closed):
|
if not(isNil(client) and client.closed):
|
||||||
await client.closeWait()
|
await client.closeWait()
|
||||||
raise exc
|
raise exc
|
||||||
|
|
||||||
|
proc connHandler*(self: TcpTransport,
|
||||||
|
client: StreamTransport,
|
||||||
|
observedAddr: Opt[MultiAddress],
|
||||||
|
dir: Direction): Future[Connection] {.async.} =
|
||||||
|
|
||||||
trace "Handling tcp connection", address = $observedAddr,
|
trace "Handling tcp connection", address = $observedAddr,
|
||||||
dir = $dir,
|
dir = $dir,
|
||||||
clients = self.clients[Direction.In].len +
|
clients = self.clients[Direction.In].len +
|
||||||
@ -222,7 +225,8 @@ method accept*(self: TcpTransport): Future[Connection] {.async, gcsafe.} =
|
|||||||
self.acceptFuts[index] = self.servers[index].accept()
|
self.acceptFuts[index] = self.servers[index].accept()
|
||||||
|
|
||||||
let transp = await finished
|
let transp = await finished
|
||||||
return await self.connHandler(transp, Direction.In)
|
let observedAddr = await getObservedAddr(transp)
|
||||||
|
return await self.connHandler(transp, Opt.some(observedAddr), Direction.In)
|
||||||
except TransportOsError as exc:
|
except TransportOsError as exc:
|
||||||
# TODO: it doesn't sound like all OS errors
|
# TODO: it doesn't sound like all OS errors
|
||||||
# can be ignored, we should re-raise those
|
# can be ignored, we should re-raise those
|
||||||
@ -250,7 +254,8 @@ method dial*(
|
|||||||
|
|
||||||
let transp = await connect(address)
|
let transp = await connect(address)
|
||||||
try:
|
try:
|
||||||
return await self.connHandler(transp, Direction.Out)
|
let observedAddr = await getObservedAddr(transp)
|
||||||
|
return await self.connHandler(transp, Opt.some(observedAddr), Direction.Out)
|
||||||
except CatchableError as err:
|
except CatchableError as err:
|
||||||
await transp.closeWait()
|
await transp.closeWait()
|
||||||
raise err
|
raise err
|
||||||
|
281
libp2p/transports/tortransport.nim
Normal file
281
libp2p/transports/tortransport.nim
Normal file
@ -0,0 +1,281 @@
|
|||||||
|
# Nim-LibP2P
|
||||||
|
# Copyright (c) 2022 Status Research & Development GmbH
|
||||||
|
# Licensed under either of
|
||||||
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||||
|
# at your option.
|
||||||
|
# This file may not be copied, modified, or distributed except according to
|
||||||
|
# those terms.
|
||||||
|
|
||||||
|
## Tor transport implementation
|
||||||
|
|
||||||
|
when (NimMajor, NimMinor) < (1, 4):
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
else:
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import std/strformat
|
||||||
|
import chronos, chronicles, strutils
|
||||||
|
import stew/[byteutils, endians2, results, objects]
|
||||||
|
import ../multicodec
|
||||||
|
import transport,
|
||||||
|
tcptransport,
|
||||||
|
../switch,
|
||||||
|
../builders,
|
||||||
|
../stream/[lpstream, connection, chronosstream],
|
||||||
|
../multiaddress,
|
||||||
|
../upgrademngrs/upgrade
|
||||||
|
|
||||||
|
const
|
||||||
|
IPTcp = mapAnd(IP, mapEq("tcp"))
|
||||||
|
IPv4Tcp = mapAnd(IP4, mapEq("tcp"))
|
||||||
|
IPv6Tcp = mapAnd(IP6, mapEq("tcp"))
|
||||||
|
DnsTcp = mapAnd(DNSANY, mapEq("tcp"))
|
||||||
|
|
||||||
|
Socks5ProtocolVersion = byte(5)
|
||||||
|
NMethods = byte(1)
|
||||||
|
|
||||||
|
type
|
||||||
|
TorTransport* = ref object of Transport
|
||||||
|
transportAddress: TransportAddress
|
||||||
|
tcpTransport: TcpTransport
|
||||||
|
|
||||||
|
Socks5AuthMethod* {.pure.} = enum
|
||||||
|
NoAuth = 0
|
||||||
|
GSSAPI = 1
|
||||||
|
UsernamePassword = 2
|
||||||
|
NoAcceptableMethod = 0xff
|
||||||
|
|
||||||
|
Socks5RequestCommand* {.pure.} = enum
|
||||||
|
Connect = 1, Bind = 2, UdpAssoc = 3
|
||||||
|
|
||||||
|
Socks5AddressType* {.pure.} = enum
|
||||||
|
IPv4 = 1, FQDN = 3, IPv6 = 4
|
||||||
|
|
||||||
|
Socks5ReplyType* {.pure.} = enum
|
||||||
|
Succeeded = (0, "Succeeded"), ServerFailure = (1, "Server Failure"),
|
||||||
|
ConnectionNotAllowed = (2, "Connection Not Allowed"), NetworkUnreachable = (3, "Network Unreachable"),
|
||||||
|
HostUnreachable = (4, "Host Unreachable"), ConnectionRefused = (5, "Connection Refused"),
|
||||||
|
TtlExpired = (6, "Ttl Expired"), CommandNotSupported = (7, "Command Not Supported"),
|
||||||
|
AddressTypeNotSupported = (8, "Address Type Not Supported")
|
||||||
|
|
||||||
|
TransportStartError* = object of transport.TransportError
|
||||||
|
|
||||||
|
Socks5Error* = object of CatchableError
|
||||||
|
Socks5AuthFailedError* = object of Socks5Error
|
||||||
|
Socks5VersionError* = object of Socks5Error
|
||||||
|
Socks5ServerReplyError* = object of Socks5Error
|
||||||
|
|
||||||
|
proc new*(
|
||||||
|
T: typedesc[TorTransport],
|
||||||
|
transportAddress: TransportAddress,
|
||||||
|
flags: set[ServerFlags] = {},
|
||||||
|
upgrade: Upgrade): T {.public.} =
|
||||||
|
## Creates a Tor transport
|
||||||
|
|
||||||
|
T(
|
||||||
|
transportAddress: transportAddress,
|
||||||
|
upgrader: upgrade,
|
||||||
|
tcpTransport: TcpTransport.new(flags, upgrade))
|
||||||
|
|
||||||
|
proc handlesDial(address: MultiAddress): bool {.gcsafe.} =
|
||||||
|
return Onion3.match(address) or TCP.match(address) or DNSANY.match(address)
|
||||||
|
|
||||||
|
proc handlesStart(address: MultiAddress): bool {.gcsafe.} =
|
||||||
|
return TcpOnion3.match(address)
|
||||||
|
|
||||||
|
proc connectToTorServer(
|
||||||
|
transportAddress: TransportAddress): Future[StreamTransport] {.async, gcsafe.} =
|
||||||
|
let transp = await connect(transportAddress)
|
||||||
|
try:
|
||||||
|
discard await transp.write(@[Socks5ProtocolVersion, NMethods, Socks5AuthMethod.NoAuth.byte])
|
||||||
|
let
|
||||||
|
serverReply = await transp.read(2)
|
||||||
|
socks5ProtocolVersion = serverReply[0]
|
||||||
|
serverSelectedMethod = serverReply[1]
|
||||||
|
if socks5ProtocolVersion != Socks5ProtocolVersion:
|
||||||
|
raise newException(Socks5VersionError, "Unsupported socks version")
|
||||||
|
if serverSelectedMethod != Socks5AuthMethod.NoAuth.byte:
|
||||||
|
raise newException(Socks5AuthFailedError, "Unsupported auth method")
|
||||||
|
return transp
|
||||||
|
except CatchableError as err:
|
||||||
|
await transp.closeWait()
|
||||||
|
raise err
|
||||||
|
|
||||||
|
proc readServerReply(transp: StreamTransport) {.async, gcsafe.} =
|
||||||
|
## The specification for this code is defined on
|
||||||
|
## [link text](https://www.rfc-editor.org/rfc/rfc1928#section-5)
|
||||||
|
## and [link text](https://www.rfc-editor.org/rfc/rfc1928#section-6).
|
||||||
|
let
|
||||||
|
portNumOctets = 2
|
||||||
|
ipV4NumOctets = 4
|
||||||
|
ipV6NumOctets = 16
|
||||||
|
firstFourOctets = await transp.read(4)
|
||||||
|
socks5ProtocolVersion = firstFourOctets[0]
|
||||||
|
serverReply = firstFourOctets[1]
|
||||||
|
if socks5ProtocolVersion != Socks5ProtocolVersion:
|
||||||
|
raise newException(Socks5VersionError, "Unsupported socks version")
|
||||||
|
if serverReply != Socks5ReplyType.Succeeded.byte:
|
||||||
|
var socks5ReplyType: Socks5ReplyType
|
||||||
|
if socks5ReplyType.checkedEnumAssign(serverReply):
|
||||||
|
raise newException(Socks5ServerReplyError, fmt"Server reply error: {socks5ReplyType}")
|
||||||
|
else:
|
||||||
|
raise newException(LPError, fmt"Unexpected server reply: {serverReply}")
|
||||||
|
let atyp = firstFourOctets[3]
|
||||||
|
case atyp:
|
||||||
|
of Socks5AddressType.IPv4.byte:
|
||||||
|
discard await transp.read(ipV4NumOctets + portNumOctets)
|
||||||
|
of Socks5AddressType.FQDN.byte:
|
||||||
|
let fqdnNumOctets = await transp.read(1)
|
||||||
|
discard await transp.read(int(uint8.fromBytes(fqdnNumOctets)) + portNumOctets)
|
||||||
|
of Socks5AddressType.IPv6.byte:
|
||||||
|
discard await transp.read(ipV6NumOctets + portNumOctets)
|
||||||
|
else:
|
||||||
|
raise newException(LPError, "Address not supported")
|
||||||
|
|
||||||
|
proc parseOnion3(address: MultiAddress): (byte, seq[byte], seq[byte]) {.raises: [Defect, LPError, ValueError].} =
|
||||||
|
var addressArray = ($address).split('/')
|
||||||
|
if addressArray.len < 2: raise newException(LPError, fmt"Onion address not supported {address}")
|
||||||
|
addressArray = addressArray[2].split(':')
|
||||||
|
if addressArray.len == 0: raise newException(LPError, fmt"Onion address not supported {address}")
|
||||||
|
let
|
||||||
|
addressStr = addressArray[0] & ".onion"
|
||||||
|
dstAddr = @(uint8(addressStr.len).toBytes()) & addressStr.toBytes()
|
||||||
|
dstPort = address.data.buffer[37..38]
|
||||||
|
return (Socks5AddressType.FQDN.byte, dstAddr, dstPort)
|
||||||
|
|
||||||
|
proc parseIpTcp(address: MultiAddress): (byte, seq[byte], seq[byte]) {.raises: [Defect, LPError, ValueError].} =
|
||||||
|
let (codec, atyp) =
|
||||||
|
if IPv4Tcp.match(address):
|
||||||
|
(multiCodec("ip4"), Socks5AddressType.IPv4.byte)
|
||||||
|
elif IPv6Tcp.match(address):
|
||||||
|
(multiCodec("ip6"), Socks5AddressType.IPv6.byte)
|
||||||
|
else:
|
||||||
|
raise newException(LPError, fmt"IP address not supported {address}")
|
||||||
|
let
|
||||||
|
dstAddr = address[codec].get().protoArgument().get()
|
||||||
|
dstPort = address[multiCodec("tcp")].get().protoArgument().get()
|
||||||
|
(atyp, dstAddr, dstPort)
|
||||||
|
|
||||||
|
proc parseDnsTcp(address: MultiAddress): (byte, seq[byte], seq[byte]) =
|
||||||
|
let
|
||||||
|
dnsAddress = address[multiCodec("dns")].get().protoArgument().get()
|
||||||
|
dstAddr = @(uint8(dnsAddress.len).toBytes()) & dnsAddress
|
||||||
|
dstPort = address[multiCodec("tcp")].get().protoArgument().get()
|
||||||
|
(Socks5AddressType.FQDN.byte, dstAddr, dstPort)
|
||||||
|
|
||||||
|
proc dialPeer(
|
||||||
|
transp: StreamTransport, address: MultiAddress) {.async, gcsafe.} =
|
||||||
|
let (atyp, dstAddr, dstPort) =
|
||||||
|
if Onion3.match(address):
|
||||||
|
parseOnion3(address)
|
||||||
|
elif IPTcp.match(address):
|
||||||
|
parseIpTcp(address)
|
||||||
|
elif DnsTcp.match(address):
|
||||||
|
parseDnsTcp(address)
|
||||||
|
else:
|
||||||
|
raise newException(LPError, fmt"Address not supported: {address}")
|
||||||
|
|
||||||
|
let reserved = byte(0)
|
||||||
|
let request = @[
|
||||||
|
Socks5ProtocolVersion,
|
||||||
|
Socks5RequestCommand.Connect.byte,
|
||||||
|
reserved,
|
||||||
|
atyp] & dstAddr & dstPort
|
||||||
|
discard await transp.write(request)
|
||||||
|
await readServerReply(transp)
|
||||||
|
|
||||||
|
method dial*(
|
||||||
|
self: TorTransport,
|
||||||
|
hostname: string,
|
||||||
|
address: MultiAddress): Future[Connection] {.async, gcsafe.} =
|
||||||
|
## dial a peer
|
||||||
|
##
|
||||||
|
if not handlesDial(address):
|
||||||
|
raise newException(LPError, fmt"Address not supported: {address}")
|
||||||
|
trace "Dialing remote peer", address = $address
|
||||||
|
let transp = await connectToTorServer(self.transportAddress)
|
||||||
|
|
||||||
|
try:
|
||||||
|
await dialPeer(transp, address)
|
||||||
|
return await self.tcpTransport.connHandler(transp, Opt.none(MultiAddress), Direction.Out)
|
||||||
|
except CatchableError as err:
|
||||||
|
await transp.closeWait()
|
||||||
|
raise err
|
||||||
|
|
||||||
|
method start*(
|
||||||
|
self: TorTransport,
|
||||||
|
addrs: seq[MultiAddress]) {.async.} =
|
||||||
|
## listen on the transport
|
||||||
|
##
|
||||||
|
|
||||||
|
var listenAddrs: seq[MultiAddress]
|
||||||
|
var onion3Addrs: seq[MultiAddress]
|
||||||
|
for i, ma in addrs:
|
||||||
|
if not handlesStart(ma):
|
||||||
|
warn "Invalid address detected, skipping!", address = ma
|
||||||
|
continue
|
||||||
|
|
||||||
|
let listenAddress = ma[0..1].get()
|
||||||
|
listenAddrs.add(listenAddress)
|
||||||
|
let onion3 = ma[multiCodec("onion3")].get()
|
||||||
|
onion3Addrs.add(onion3)
|
||||||
|
|
||||||
|
if len(listenAddrs) != 0 and len(onion3Addrs) != 0:
|
||||||
|
await procCall Transport(self).start(onion3Addrs)
|
||||||
|
await self.tcpTransport.start(listenAddrs)
|
||||||
|
else:
|
||||||
|
raise newException(TransportStartError, "Tor Transport couldn't start, no supported addr was provided.")
|
||||||
|
|
||||||
|
method accept*(self: TorTransport): Future[Connection] {.async, gcsafe.} =
|
||||||
|
## accept a new Tor connection
|
||||||
|
##
|
||||||
|
let conn = await self.tcpTransport.accept()
|
||||||
|
conn.observedAddr = Opt.none(MultiAddress)
|
||||||
|
return conn
|
||||||
|
|
||||||
|
method stop*(self: TorTransport) {.async, gcsafe.} =
|
||||||
|
## stop the transport
|
||||||
|
##
|
||||||
|
await procCall Transport(self).stop() # call base
|
||||||
|
await self.tcpTransport.stop()
|
||||||
|
|
||||||
|
method handles*(t: TorTransport, address: MultiAddress): bool {.gcsafe.} =
|
||||||
|
if procCall Transport(t).handles(address):
|
||||||
|
return handlesDial(address) or handlesStart(address)
|
||||||
|
|
||||||
|
type
|
||||||
|
TorSwitch* = ref object of Switch
|
||||||
|
|
||||||
|
proc new*(
|
||||||
|
T: typedesc[TorSwitch],
|
||||||
|
torServer: TransportAddress,
|
||||||
|
rng: ref HmacDrbgContext,
|
||||||
|
addresses: seq[MultiAddress] = @[],
|
||||||
|
flags: set[ServerFlags] = {}): TorSwitch
|
||||||
|
{.raises: [LPError, Defect], public.} =
|
||||||
|
var builder = SwitchBuilder.new()
|
||||||
|
.withRng(rng)
|
||||||
|
.withTransport(proc(upgr: Upgrade): Transport = TorTransport.new(torServer, flags, upgr))
|
||||||
|
if addresses.len != 0:
|
||||||
|
builder = builder.withAddresses(addresses)
|
||||||
|
let switch = builder.withMplex()
|
||||||
|
.withNoise()
|
||||||
|
.build()
|
||||||
|
let torSwitch = T(
|
||||||
|
peerInfo: switch.peerInfo,
|
||||||
|
ms: switch.ms,
|
||||||
|
transports: switch.transports,
|
||||||
|
connManager: switch.connManager,
|
||||||
|
peerStore: switch.peerStore,
|
||||||
|
dialer: Dialer.new(switch.peerInfo.peerId, switch.connManager, switch.transports, switch.ms, nil),
|
||||||
|
nameResolver: nil)
|
||||||
|
|
||||||
|
torSwitch.connManager.peerStore = switch.peerStore
|
||||||
|
return torSwitch
|
||||||
|
|
||||||
|
method addTransport*(s: TorSwitch, t: Transport) =
|
||||||
|
doAssert(false, "not implemented!")
|
||||||
|
|
||||||
|
method getTorTransport*(s: TorSwitch): Transport {.base.} =
|
||||||
|
return s.transports[0]
|
@ -87,12 +87,13 @@ method upgradeIncoming*(
|
|||||||
|
|
||||||
method upgradeOutgoing*(
|
method upgradeOutgoing*(
|
||||||
self: Transport,
|
self: Transport,
|
||||||
conn: Connection): Future[Connection] {.base, gcsafe.} =
|
conn: Connection,
|
||||||
|
peerId: Opt[PeerId]): Future[Connection] {.base, gcsafe.} =
|
||||||
## base upgrade method that the transport uses to perform
|
## base upgrade method that the transport uses to perform
|
||||||
## transport specific upgrades
|
## transport specific upgrades
|
||||||
##
|
##
|
||||||
|
|
||||||
self.upgrader.upgradeOutgoing(conn)
|
self.upgrader.upgradeOutgoing(conn, peerId)
|
||||||
|
|
||||||
method handles*(
|
method handles*(
|
||||||
self: Transport,
|
self: Transport,
|
||||||
|
@ -15,6 +15,7 @@ else:
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import std/[sequtils]
|
import std/[sequtils]
|
||||||
|
import stew/results
|
||||||
import chronos, chronicles
|
import chronos, chronicles
|
||||||
import transport,
|
import transport,
|
||||||
../errors,
|
../errors,
|
||||||
@ -31,22 +32,26 @@ import transport,
|
|||||||
logScope:
|
logScope:
|
||||||
topics = "libp2p wstransport"
|
topics = "libp2p wstransport"
|
||||||
|
|
||||||
export transport, websock
|
export transport, websock, results
|
||||||
|
|
||||||
const
|
const
|
||||||
WsTransportTrackerName* = "libp2p.wstransport"
|
|
||||||
|
|
||||||
DefaultHeadersTimeout = 3.seconds
|
DefaultHeadersTimeout = 3.seconds
|
||||||
|
|
||||||
type
|
type
|
||||||
WsStream = ref object of Connection
|
WsStream = ref object of Connection
|
||||||
session: WSSession
|
session: WSSession
|
||||||
|
|
||||||
|
method initStream*(s: WsStream) =
|
||||||
|
if s.objName.len == 0:
|
||||||
|
s.objName = "WsStream"
|
||||||
|
|
||||||
|
procCall Connection(s).initStream()
|
||||||
|
|
||||||
proc new*(T: type WsStream,
|
proc new*(T: type WsStream,
|
||||||
session: WSSession,
|
session: WSSession,
|
||||||
dir: Direction,
|
dir: Direction,
|
||||||
timeout = 10.minutes,
|
observedAddr: Opt[MultiAddress],
|
||||||
observedAddr: MultiAddress = MultiAddress()): T =
|
timeout = 10.minutes): T =
|
||||||
|
|
||||||
let stream = T(
|
let stream = T(
|
||||||
session: session,
|
session: session,
|
||||||
@ -221,8 +226,7 @@ proc connHandler(self: WsTransport,
|
|||||||
await stream.close()
|
await stream.close()
|
||||||
raise exc
|
raise exc
|
||||||
|
|
||||||
let conn = WsStream.new(stream, dir)
|
let conn = WsStream.new(stream, dir, Opt.some(observedAddr))
|
||||||
conn.observedAddr = observedAddr
|
|
||||||
|
|
||||||
self.connections[dir].add(conn)
|
self.connections[dir].add(conn)
|
||||||
proc onClose() {.async.} =
|
proc onClose() {.async.} =
|
||||||
@ -256,7 +260,7 @@ method accept*(self: WsTransport): Future[Connection] {.async, gcsafe.} =
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
let
|
let
|
||||||
wstransp = await self.wsserver.handleRequest(req)
|
wstransp = await self.wsserver.handleRequest(req).wait(self.handshakeTimeout)
|
||||||
isSecure = self.httpservers[index].secure
|
isSecure = self.httpservers[index].secure
|
||||||
|
|
||||||
return await self.connHandler(wstransp, isSecure, Direction.In)
|
return await self.connHandler(wstransp, isSecure, Direction.In)
|
||||||
@ -273,6 +277,8 @@ method accept*(self: WsTransport): Future[Connection] {.async, gcsafe.} =
|
|||||||
debug "AsyncStream Error", exc = exc.msg
|
debug "AsyncStream Error", exc = exc.msg
|
||||||
except TransportTooManyError as exc:
|
except TransportTooManyError as exc:
|
||||||
debug "Too many files opened", exc = exc.msg
|
debug "Too many files opened", exc = exc.msg
|
||||||
|
except AsyncTimeoutError as exc:
|
||||||
|
debug "Timed out", exc = exc.msg
|
||||||
except TransportUseClosedError as exc:
|
except TransportUseClosedError as exc:
|
||||||
debug "Server was closed", exc = exc.msg
|
debug "Server was closed", exc = exc.msg
|
||||||
raise newTransportClosedError(exc)
|
raise newTransportClosedError(exc)
|
||||||
|
@ -88,10 +88,11 @@ proc mux*(
|
|||||||
|
|
||||||
method upgradeOutgoing*(
|
method upgradeOutgoing*(
|
||||||
self: MuxedUpgrade,
|
self: MuxedUpgrade,
|
||||||
conn: Connection): Future[Connection] {.async, gcsafe.} =
|
conn: Connection,
|
||||||
|
peerId: Opt[PeerId]): Future[Connection] {.async, gcsafe.} =
|
||||||
trace "Upgrading outgoing connection", conn
|
trace "Upgrading outgoing connection", conn
|
||||||
|
|
||||||
let sconn = await self.secure(conn) # secure the connection
|
let sconn = await self.secure(conn, peerId) # secure the connection
|
||||||
if isNil(sconn):
|
if isNil(sconn):
|
||||||
raise newException(UpgradeFailedError,
|
raise newException(UpgradeFailedError,
|
||||||
"unable to secure connection, stopping upgrade")
|
"unable to secure connection, stopping upgrade")
|
||||||
@ -129,7 +130,7 @@ method upgradeIncoming*(
|
|||||||
|
|
||||||
var cconn = conn
|
var cconn = conn
|
||||||
try:
|
try:
|
||||||
var sconn = await secure.secure(cconn, false)
|
var sconn = await secure.secure(cconn, false, Opt.none(PeerId))
|
||||||
if isNil(sconn):
|
if isNil(sconn):
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
# This file may not be copied, modified, or distributed except according to
|
# This file may not be copied, modified, or distributed except according to
|
||||||
# those terms.
|
# those terms.
|
||||||
|
|
||||||
|
{.push gcsafe.}
|
||||||
when (NimMajor, NimMinor) < (1, 4):
|
when (NimMajor, NimMinor) < (1, 4):
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
else:
|
else:
|
||||||
@ -47,12 +48,14 @@ method upgradeIncoming*(
|
|||||||
|
|
||||||
method upgradeOutgoing*(
|
method upgradeOutgoing*(
|
||||||
self: Upgrade,
|
self: Upgrade,
|
||||||
conn: Connection): Future[Connection] {.base.} =
|
conn: Connection,
|
||||||
|
peerId: Opt[PeerId]): Future[Connection] {.base.} =
|
||||||
doAssert(false, "Not implemented!")
|
doAssert(false, "Not implemented!")
|
||||||
|
|
||||||
proc secure*(
|
proc secure*(
|
||||||
self: Upgrade,
|
self: Upgrade,
|
||||||
conn: Connection): Future[Connection] {.async, gcsafe.} =
|
conn: Connection,
|
||||||
|
peerId: Opt[PeerId]): Future[Connection] {.async, gcsafe.} =
|
||||||
if self.secureManagers.len <= 0:
|
if self.secureManagers.len <= 0:
|
||||||
raise newException(UpgradeFailedError, "No secure managers registered!")
|
raise newException(UpgradeFailedError, "No secure managers registered!")
|
||||||
|
|
||||||
@ -67,7 +70,7 @@ proc secure*(
|
|||||||
# let's avoid duplicating checks but detect if it fails to do it properly
|
# let's avoid duplicating checks but detect if it fails to do it properly
|
||||||
doAssert(secureProtocol.len > 0)
|
doAssert(secureProtocol.len > 0)
|
||||||
|
|
||||||
return await secureProtocol[0].secure(conn, true)
|
return await secureProtocol[0].secure(conn, true, peerId)
|
||||||
|
|
||||||
proc identify*(
|
proc identify*(
|
||||||
self: Upgrade,
|
self: Upgrade,
|
||||||
|
@ -25,6 +25,14 @@ template heartbeat*(name: string, interval: Duration, body: untyped): untyped =
|
|||||||
nextHeartbeat += interval
|
nextHeartbeat += interval
|
||||||
let now = Moment.now()
|
let now = Moment.now()
|
||||||
if nextHeartbeat < now:
|
if nextHeartbeat < now:
|
||||||
info "Missed heartbeat", heartbeat = name, delay = now - nextHeartbeat
|
let
|
||||||
nextHeartbeat = now + interval
|
delay = now - nextHeartbeat
|
||||||
|
itv = interval
|
||||||
|
if delay > itv:
|
||||||
|
info "Missed multiple heartbeats", heartbeat = name,
|
||||||
|
delay = delay, hinterval = itv
|
||||||
|
else:
|
||||||
|
debug "Missed heartbeat", heartbeat = name,
|
||||||
|
delay = delay, hinterval = itv
|
||||||
|
nextHeartbeat = now + itv
|
||||||
await sleepAsync(nextHeartbeat - now)
|
await sleepAsync(nextHeartbeat - now)
|
||||||
|
73
libp2p/utils/offsettedseq.nim
Normal file
73
libp2p/utils/offsettedseq.nim
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
# Nim-LibP2P
|
||||||
|
# Copyright (c) 2022 Status Research & Development GmbH
|
||||||
|
# Licensed under either of
|
||||||
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||||
|
# at your option.
|
||||||
|
# This file may not be copied, modified, or distributed except according to
|
||||||
|
# those terms.
|
||||||
|
|
||||||
|
import sequtils
|
||||||
|
|
||||||
|
type
|
||||||
|
OffsettedSeq*[T] = object
|
||||||
|
s*: seq[T]
|
||||||
|
offset*: int
|
||||||
|
|
||||||
|
proc initOffsettedSeq*[T](offset: int = 0): OffsettedSeq[T] =
|
||||||
|
OffsettedSeq[T](s: newSeq[T](), offset: offset)
|
||||||
|
|
||||||
|
proc all*[T](o: OffsettedSeq[T], pred: proc (x: T): bool): bool =
|
||||||
|
o.s.all(pred)
|
||||||
|
|
||||||
|
proc any*[T](o: OffsettedSeq[T], pred: proc (x: T): bool): bool =
|
||||||
|
o.s.any(pred)
|
||||||
|
|
||||||
|
proc apply*[T](o: OffsettedSeq[T], op: proc (x: T)) =
|
||||||
|
o.s.apply(pred)
|
||||||
|
|
||||||
|
proc apply*[T](o: OffsettedSeq[T], op: proc (x: T): T) =
|
||||||
|
o.s.apply(pred)
|
||||||
|
|
||||||
|
proc apply*[T](o: OffsettedSeq[T], op: proc (x: var T)) =
|
||||||
|
o.s.apply(pred)
|
||||||
|
|
||||||
|
func count*[T](o: OffsettedSeq[T], x: T): int =
|
||||||
|
o.s.count(x)
|
||||||
|
|
||||||
|
proc flushIf*[T](o: OffsettedSeq[T], pred: proc (x: T): bool) =
|
||||||
|
var i = 0
|
||||||
|
for e in o.s:
|
||||||
|
if not pred(e): break
|
||||||
|
i.inc()
|
||||||
|
if i > 0:
|
||||||
|
o.s.delete(0..<i)
|
||||||
|
o.offset.inc(i)
|
||||||
|
|
||||||
|
template flushIfIt*(o, pred: untyped) =
|
||||||
|
var i = 0
|
||||||
|
for it {.inject.} in o.s:
|
||||||
|
if not pred: break
|
||||||
|
i.inc()
|
||||||
|
if i > 0:
|
||||||
|
when (NimMajor, NimMinor) < (1, 4):
|
||||||
|
o.s.delete(0, i - 1)
|
||||||
|
else:
|
||||||
|
o.s.delete(0..<i)
|
||||||
|
o.offset.inc(i)
|
||||||
|
|
||||||
|
proc add*[T](o: var OffsettedSeq[T], v: T) =
|
||||||
|
o.s.add(v)
|
||||||
|
|
||||||
|
proc `[]`*[T](o: var OffsettedSeq[T], index: int): var T =
|
||||||
|
o.s[index - o.offset]
|
||||||
|
|
||||||
|
iterator items*[T](o: OffsettedSeq[T]): T =
|
||||||
|
for e in o.s:
|
||||||
|
yield e
|
||||||
|
|
||||||
|
proc high*[T](o: OffsettedSeq[T]): int =
|
||||||
|
o.s.high + o.offset
|
||||||
|
|
||||||
|
proc low*[T](o: OffsettedSeq[T]): int =
|
||||||
|
o.s.low + o.offset
|
@ -58,10 +58,7 @@ type
|
|||||||
SomeVarint* = PBSomeVarint | LPSomeVarint
|
SomeVarint* = PBSomeVarint | LPSomeVarint
|
||||||
SomeUVarint* = PBSomeUVarint | LPSomeUVarint
|
SomeUVarint* = PBSomeUVarint | LPSomeUVarint
|
||||||
|
|
||||||
template toUleb(x: uint64): uint64 = x
|
template toUleb[T: uint64|uint32|uint16|uint8|uint](x: T): T = x
|
||||||
template toUleb(x: uint32): uint32 = x
|
|
||||||
template toUleb(x: uint16): uint16 = x
|
|
||||||
template toUleb(x: uint8): uint8 = x
|
|
||||||
|
|
||||||
func toUleb(x: zint64): uint64 =
|
func toUleb(x: zint64): uint64 =
|
||||||
let v = cast[uint64](x)
|
let v = cast[uint64](x)
|
||||||
|
@ -39,21 +39,9 @@ proc len*(vb: VBuffer): int =
|
|||||||
result = len(vb.buffer) - vb.offset
|
result = len(vb.buffer) - vb.offset
|
||||||
doAssert(result >= 0)
|
doAssert(result >= 0)
|
||||||
|
|
||||||
proc isLiteral[T](s: seq[T]): bool {.inline.} =
|
|
||||||
when defined(gcOrc) or defined(gcArc):
|
|
||||||
false
|
|
||||||
else:
|
|
||||||
type
|
|
||||||
SeqHeader = object
|
|
||||||
length, reserved: int
|
|
||||||
(cast[ptr SeqHeader](s).reserved and (1 shl (sizeof(int) * 8 - 2))) != 0
|
|
||||||
|
|
||||||
proc initVBuffer*(data: seq[byte], offset = 0): VBuffer =
|
proc initVBuffer*(data: seq[byte], offset = 0): VBuffer =
|
||||||
## Initialize VBuffer with shallow copy of ``data``.
|
## Initialize VBuffer with shallow copy of ``data``.
|
||||||
if isLiteral(data):
|
result.buffer = data
|
||||||
result.buffer = data
|
|
||||||
else:
|
|
||||||
shallowCopy(result.buffer, data)
|
|
||||||
result.offset = offset
|
result.offset = offset
|
||||||
|
|
||||||
proc initVBuffer*(data: openArray[byte], offset = 0): VBuffer =
|
proc initVBuffer*(data: openArray[byte], offset = 0): VBuffer =
|
||||||
|
19
mkdocs.yml
19
mkdocs.yml
@ -3,7 +3,9 @@ site_name: nim-libp2p
|
|||||||
repo_url: https://github.com/status-im/nim-libp2p
|
repo_url: https://github.com/status-im/nim-libp2p
|
||||||
repo_name: status-im/nim-libp2p
|
repo_name: status-im/nim-libp2p
|
||||||
site_url: https://status-im.github.io/nim-libp2p/docs
|
site_url: https://status-im.github.io/nim-libp2p/docs
|
||||||
edit_uri: edit/unstable/examples/
|
# Can't find a way to point the edit to the .nim instead
|
||||||
|
# of the .md
|
||||||
|
edit_uri: ''
|
||||||
|
|
||||||
docs_dir: examples
|
docs_dir: examples
|
||||||
|
|
||||||
@ -18,8 +20,8 @@ markdown_extensions:
|
|||||||
- pymdownx.superfences
|
- pymdownx.superfences
|
||||||
|
|
||||||
theme:
|
theme:
|
||||||
logo: https://docs.libp2p.io/images/logo_small.png
|
logo: https://libp2p.io/img/logo_small.png
|
||||||
favicon: https://docs.libp2p.io/images/logo_small.png
|
favicon: https://libp2p.io/img/logo_small.png
|
||||||
name: material
|
name: material
|
||||||
features:
|
features:
|
||||||
- navigation.instant
|
- navigation.instant
|
||||||
@ -38,8 +40,13 @@ theme:
|
|||||||
name: Switch to light mode
|
name: Switch to light mode
|
||||||
|
|
||||||
nav:
|
nav:
|
||||||
- Introduction: README.md
|
|
||||||
- Tutorials:
|
- Tutorials:
|
||||||
- 'Part I: Simple connection': tutorial_1_connect.md
|
- 'Introduction': index.md
|
||||||
- 'Part II: Custom protocol': tutorial_2_customproto.md
|
- 'Simple connection': tutorial_1_connect.md
|
||||||
|
- 'Create a custom protocol': tutorial_2_customproto.md
|
||||||
|
- 'Protobuf': tutorial_3_protobuf.md
|
||||||
|
- 'GossipSub': tutorial_4_gossipsub.md
|
||||||
|
- 'Discovery Manager': tutorial_5_discovery.md
|
||||||
|
- 'Game': tutorial_6_game.md
|
||||||
|
- 'Circuit Relay': circuitrelay.md
|
||||||
- Reference: '/nim-libp2p/master/libp2p.html'
|
- Reference: '/nim-libp2p/master/libp2p.html'
|
||||||
|
167
nimble.lock
167
nimble.lock
@ -2,53 +2,68 @@
|
|||||||
"version": 1,
|
"version": 1,
|
||||||
"packages": {
|
"packages": {
|
||||||
"unittest2": {
|
"unittest2": {
|
||||||
"version": "0.0.4",
|
"version": "0.0.5",
|
||||||
"vcsRevision": "f180f596c88dfd266f746ed6f8dbebce39c824db",
|
"vcsRevision": "da8398c45cafd5bd7772da1fc96e3924a18d3823",
|
||||||
"url": "https://github.com/status-im/nim-unittest2.git",
|
"url": "https://github.com/status-im/nim-unittest2",
|
||||||
"downloadMethod": "git",
|
"downloadMethod": "git",
|
||||||
"dependencies": [],
|
"dependencies": [],
|
||||||
"checksums": {
|
"checksums": {
|
||||||
"sha1": "fa309c41eaf6ef57895b9e603f2620a2f6e11780"
|
"sha1": "b3f8493a4948989ef3e645a38b23aad77e851e26"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"testutils": {
|
||||||
|
"version": "0.5.0",
|
||||||
|
"vcsRevision": "dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34",
|
||||||
|
"url": "https://github.com/status-im/nim-testutils",
|
||||||
|
"downloadMethod": "git",
|
||||||
|
"dependencies": [
|
||||||
|
"unittest2"
|
||||||
|
],
|
||||||
|
"checksums": {
|
||||||
|
"sha1": "756d0757c4dd06a068f9d38c7f238576ba5ee897"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"stew": {
|
"stew": {
|
||||||
"version": "0.1.0",
|
"version": "0.1.0",
|
||||||
"vcsRevision": "6ad35b876fb6ebe0dfee0f697af173acc47906ee",
|
"vcsRevision": "7184d2424dc3945657884646a72715d494917aad",
|
||||||
"url": "https://github.com/status-im/nim-stew.git",
|
"url": "https://github.com/status-im/nim-stew",
|
||||||
"downloadMethod": "git",
|
"downloadMethod": "git",
|
||||||
"dependencies": [],
|
"dependencies": [
|
||||||
|
"unittest2"
|
||||||
|
],
|
||||||
"checksums": {
|
"checksums": {
|
||||||
"sha1": "46d58c4feb457f3241e3347778334e325dce5268"
|
"sha1": "f3125ed2fd126dfd3edbaea14275abd9fa57d703"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"bearssl": {
|
"bearssl": {
|
||||||
"version": "0.1.5",
|
"version": "0.2.0",
|
||||||
"vcsRevision": "ba80e2a0d7ae8aab666cee013e38ff8d33a3e5e7",
|
"vcsRevision": "a647994910904b0103a05db3a5ec1ecfc4d91a88",
|
||||||
"url": "https://github.com/status-im/nim-bearssl",
|
"url": "https://github.com/status-im/nim-bearssl",
|
||||||
"downloadMethod": "git",
|
"downloadMethod": "git",
|
||||||
"dependencies": [
|
"dependencies": [
|
||||||
"unittest2"
|
"unittest2"
|
||||||
],
|
],
|
||||||
"checksums": {
|
"checksums": {
|
||||||
"sha1": "383abd5becc77bf8e365b780a29d20529e1d9c4c"
|
"sha1": "d634751df2716ea9975912a2d5d0a090bb6bcfa9"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"httputils": {
|
"httputils": {
|
||||||
"version": "0.3.0",
|
"version": "0.3.0",
|
||||||
"vcsRevision": "689da19e9e9cfff4ced85e2b25c6b2b5598ed079",
|
"vcsRevision": "a85bd52ae0a956983ca6b3267c72961d2ec0245f",
|
||||||
"url": "https://github.com/status-im/nim-http-utils.git",
|
"url": "https://github.com/status-im/nim-http-utils",
|
||||||
"downloadMethod": "git",
|
"downloadMethod": "git",
|
||||||
"dependencies": [
|
"dependencies": [
|
||||||
"stew"
|
"stew",
|
||||||
|
"unittest2"
|
||||||
],
|
],
|
||||||
"checksums": {
|
"checksums": {
|
||||||
"sha1": "4ad3ad68d13c50184180ab4b2eacc0bd7ed2ed44"
|
"sha1": "92933b21bcd29335f68e377e2b2193fa331e28b3"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"chronos": {
|
"chronos": {
|
||||||
"version": "3.0.11",
|
"version": "3.0.11",
|
||||||
"vcsRevision": "17fed89c99beac5a92d3668d0d3e9b0e4ac13936",
|
"vcsRevision": "75d030ff71264513fb9701c75a326cd36fcb4692",
|
||||||
"url": "https://github.com/status-im/nim-chronos.git",
|
"url": "https://github.com/status-im/nim-chronos",
|
||||||
"downloadMethod": "git",
|
"downloadMethod": "git",
|
||||||
"dependencies": [
|
"dependencies": [
|
||||||
"stew",
|
"stew",
|
||||||
@ -57,52 +72,27 @@
|
|||||||
"unittest2"
|
"unittest2"
|
||||||
],
|
],
|
||||||
"checksums": {
|
"checksums": {
|
||||||
"sha1": "f6fffc87571e5f76af2a77c4ebcc0e00909ced4e"
|
"sha1": "57a674ba3c1a57a694fa7810d93ceb68f338a861"
|
||||||
}
|
|
||||||
},
|
|
||||||
"metrics": {
|
|
||||||
"version": "0.0.1",
|
|
||||||
"vcsRevision": "71e0f0e354e1f4c59e3dc92153989c8b723c3440",
|
|
||||||
"url": "https://github.com/status-im/nim-metrics",
|
|
||||||
"downloadMethod": "git",
|
|
||||||
"dependencies": [
|
|
||||||
"chronos"
|
|
||||||
],
|
|
||||||
"checksums": {
|
|
||||||
"sha1": "86da251fe532ef2163da30343688ab1c148c0340"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"testutils": {
|
|
||||||
"version": "0.4.2",
|
|
||||||
"vcsRevision": "aa6e5216f4b4ab5aa971cdcdd70e1ec1203cedf2",
|
|
||||||
"url": "https://github.com/status-im/nim-testutils",
|
|
||||||
"downloadMethod": "git",
|
|
||||||
"dependencies": [
|
|
||||||
"unittest2"
|
|
||||||
],
|
|
||||||
"checksums": {
|
|
||||||
"sha1": "94427e0cce0e0c5841edcd3a6530b4e6b857a3cb"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"faststreams": {
|
"faststreams": {
|
||||||
"version": "0.3.0",
|
"version": "0.3.0",
|
||||||
"vcsRevision": "1b561a9e71b6bdad1c1cdff753418906037e9d09",
|
"vcsRevision": "b42daf41d8eb4fbce40add6836bed838f8d85b6f",
|
||||||
"url": "https://github.com/status-im/nim-faststreams.git",
|
"url": "https://github.com/status-im/nim-faststreams",
|
||||||
"downloadMethod": "git",
|
"downloadMethod": "git",
|
||||||
"dependencies": [
|
"dependencies": [
|
||||||
"stew",
|
"stew",
|
||||||
"testutils",
|
|
||||||
"chronos",
|
"chronos",
|
||||||
"unittest2"
|
"unittest2"
|
||||||
],
|
],
|
||||||
"checksums": {
|
"checksums": {
|
||||||
"sha1": "97edf9797924af48566a0af8267203dc21d80c77"
|
"sha1": "62f7ac8fb200a8ecb9e6c63f5553a7dad66ae613"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"serialization": {
|
"serialization": {
|
||||||
"version": "0.1.0",
|
"version": "0.1.0",
|
||||||
"vcsRevision": "fcd0eadadde0ee000a63df8ab21dc4e9f015a790",
|
"vcsRevision": "d77417cba6896c26287a68e6a95762e45a1b87e5",
|
||||||
"url": "https://github.com/status-im/nim-serialization.git",
|
"url": "https://github.com/status-im/nim-serialization",
|
||||||
"downloadMethod": "git",
|
"downloadMethod": "git",
|
||||||
"dependencies": [
|
"dependencies": [
|
||||||
"faststreams",
|
"faststreams",
|
||||||
@ -110,70 +100,72 @@
|
|||||||
"stew"
|
"stew"
|
||||||
],
|
],
|
||||||
"checksums": {
|
"checksums": {
|
||||||
"sha1": "fef59519892cac70cccd81b612085caaa5e3e6cf"
|
"sha1": "e17244c6654de22254acb9bcf71d8ddbeca8b2aa"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"metrics": {
|
||||||
|
"version": "0.0.1",
|
||||||
|
"vcsRevision": "21e99a2e9d9f80e68bef65c80ef781613005fccb",
|
||||||
|
"url": "https://github.com/status-im/nim-metrics",
|
||||||
|
"downloadMethod": "git",
|
||||||
|
"dependencies": [
|
||||||
|
"chronos"
|
||||||
|
],
|
||||||
|
"checksums": {
|
||||||
|
"sha1": "ab1c994bbcd6b04f2500f05d8ea4e463f33dd310"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nimcrypto": {
|
||||||
|
"version": "0.5.4",
|
||||||
|
"vcsRevision": "24e006df85927f64916e60511620583b11403178",
|
||||||
|
"url": "https://github.com/cheatfate/nimcrypto",
|
||||||
|
"downloadMethod": "git",
|
||||||
|
"dependencies": [],
|
||||||
|
"checksums": {
|
||||||
|
"sha1": "a4db2105de265930f1578bb7957f49fa39b10d9b"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"json_serialization": {
|
"json_serialization": {
|
||||||
"version": "0.1.0",
|
"version": "0.1.0",
|
||||||
"vcsRevision": "c5f0e2465e8375dfc7aa0f56ccef67cb680bc6b0",
|
"vcsRevision": "a7d815ed92f200f490c95d3cfd722089cc923ce6",
|
||||||
"url": "https://github.com/status-im/nim-json-serialization.git",
|
"url": "https://github.com/status-im/nim-json-serialization",
|
||||||
"downloadMethod": "git",
|
"downloadMethod": "git",
|
||||||
"dependencies": [
|
"dependencies": [
|
||||||
"serialization",
|
"serialization",
|
||||||
"stew"
|
"stew"
|
||||||
],
|
],
|
||||||
"checksums": {
|
"checksums": {
|
||||||
"sha1": "d89d79d0679a3a41b350e3ad4be56c0308cc5ec6"
|
"sha1": "50fc34a992ef3df68a7bee88af096bb8ed42572f"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"chronicles": {
|
"chronicles": {
|
||||||
"version": "0.10.2",
|
"version": "0.10.3",
|
||||||
"vcsRevision": "1682096306ddba8185dcfac360a8c3f952d721e4",
|
"vcsRevision": "32ac8679680ea699f7dbc046e8e0131cac97d41a",
|
||||||
"url": "https://github.com/status-im/nim-chronicles.git",
|
"url": "https://github.com/status-im/nim-chronicles",
|
||||||
"downloadMethod": "git",
|
"downloadMethod": "git",
|
||||||
"dependencies": [
|
"dependencies": [
|
||||||
"testutils",
|
"testutils",
|
||||||
"json_serialization"
|
"json_serialization"
|
||||||
],
|
],
|
||||||
"checksums": {
|
"checksums": {
|
||||||
"sha1": "9a5bebb76b0f7d587a31e621d260119279e91c76"
|
"sha1": "79f09526d4d9b9196dd2f6a75310d71a890c4f88"
|
||||||
}
|
|
||||||
},
|
|
||||||
"asynctest": {
|
|
||||||
"version": "0.3.1",
|
|
||||||
"vcsRevision": "5347c59b4b057443a014722aa40800cd8bb95c69",
|
|
||||||
"url": "https://github.com/status-im/asynctest.git",
|
|
||||||
"downloadMethod": "git",
|
|
||||||
"dependencies": [],
|
|
||||||
"checksums": {
|
|
||||||
"sha1": "53e0b610d13700296755a4ebe789882cae47a3b9"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nimcrypto": {
|
|
||||||
"version": "0.5.4",
|
|
||||||
"vcsRevision": "a5742a9a214ac33f91615f3862c7b099aec43b00",
|
|
||||||
"url": "https://github.com/cheatfate/nimcrypto",
|
|
||||||
"downloadMethod": "git",
|
|
||||||
"dependencies": [],
|
|
||||||
"checksums": {
|
|
||||||
"sha1": "f76c87707cd4e96355b8bb6ef27e7f8b0aac1e08"
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"zlib": {
|
"zlib": {
|
||||||
"version": "0.1.0",
|
"version": "0.1.0",
|
||||||
"vcsRevision": "74cdeb54b21bededb5a515d36f608bc1850555a2",
|
"vcsRevision": "6a6670afba6b97b29b920340e2641978c05ab4d8",
|
||||||
"url": "https://github.com/status-im/nim-zlib",
|
"url": "https://github.com/status-im/nim-zlib",
|
||||||
"downloadMethod": "git",
|
"downloadMethod": "git",
|
||||||
"dependencies": [
|
"dependencies": [
|
||||||
"stew"
|
"stew"
|
||||||
],
|
],
|
||||||
"checksums": {
|
"checksums": {
|
||||||
"sha1": "01d330dc4c1924e56b1559ee73bc760e526f635c"
|
"sha1": "2621e46369be2a6846713e8c3d681a5bba3e0325"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"websock": {
|
"websock": {
|
||||||
"version": "0.1.0",
|
"version": "0.1.0",
|
||||||
"vcsRevision": "73edde4417f7b45003113b7a34212c3ccd95b9fd",
|
"vcsRevision": "691f069b209d372b1240d5ae1f57fb7bbafeaba7",
|
||||||
"url": "https://github.com/status-im/nim-websock",
|
"url": "https://github.com/status-im/nim-websock",
|
||||||
"downloadMethod": "git",
|
"downloadMethod": "git",
|
||||||
"dependencies": [
|
"dependencies": [
|
||||||
@ -181,36 +173,35 @@
|
|||||||
"httputils",
|
"httputils",
|
||||||
"chronicles",
|
"chronicles",
|
||||||
"stew",
|
"stew",
|
||||||
"asynctest",
|
|
||||||
"nimcrypto",
|
"nimcrypto",
|
||||||
"bearssl",
|
"bearssl",
|
||||||
"zlib"
|
"zlib"
|
||||||
],
|
],
|
||||||
"checksums": {
|
"checksums": {
|
||||||
"sha1": "ec2b137543f280298ca48de9ed4461a033ba88d3"
|
"sha1": "c71edfce064e7c0cadde0e687c6edc0caaf9ec07"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"dnsclient": {
|
"dnsclient": {
|
||||||
"version": "0.1.2",
|
"version": "0.3.2",
|
||||||
"vcsRevision": "fbb76f8af8a33ab818184a7d4406d9fee20993be",
|
"vcsRevision": "fcd7443634b950eaea574e5eaa00a628ae029823",
|
||||||
"url": "https://github.com/ba0f3/dnsclient.nim.git",
|
"url": "https://github.com/ba0f3/dnsclient.nim",
|
||||||
"downloadMethod": "git",
|
"downloadMethod": "git",
|
||||||
"dependencies": [],
|
"dependencies": [],
|
||||||
"checksums": {
|
"checksums": {
|
||||||
"sha1": "663239a914c814204b30dda6e0902cc0fbd0b8ee"
|
"sha1": "146aa4a8d512a3a786c5bf54311b79900166d9d7"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"secp256k1": {
|
"secp256k1": {
|
||||||
"version": "0.5.2",
|
"version": "0.5.2",
|
||||||
"vcsRevision": "5340cf188168d6afcafc8023770d880f067c0b2f",
|
"vcsRevision": "fd173fdff863ce2e211cf64c9a03bc7539fe40b0",
|
||||||
"url": "https://github.com/status-im/nim-secp256k1.git",
|
"url": "https://github.com/status-im/nim-secp256k1",
|
||||||
"downloadMethod": "git",
|
"downloadMethod": "git",
|
||||||
"dependencies": [
|
"dependencies": [
|
||||||
"stew",
|
"stew",
|
||||||
"nimcrypto"
|
"nimcrypto"
|
||||||
],
|
],
|
||||||
"checksums": {
|
"checksums": {
|
||||||
"sha1": "ae9cbea4487be94a06653ffee075a7f1bd1e231e"
|
"sha1": "657c79f6f2b1b6da92a9cda81ffc9f95d26443cb"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3,7 +3,7 @@ import chronos, chronicles, stew/byteutils
|
|||||||
import helpers
|
import helpers
|
||||||
import ../libp2p
|
import ../libp2p
|
||||||
import ../libp2p/[daemon/daemonapi, varint, transports/wstransport, crypto/crypto]
|
import ../libp2p/[daemon/daemonapi, varint, transports/wstransport, crypto/crypto]
|
||||||
import ../libp2p/protocols/relay/[relay, client, utils]
|
import ../libp2p/protocols/connectivity/relay/[relay, client, utils]
|
||||||
|
|
||||||
type
|
type
|
||||||
SwitchCreator = proc(
|
SwitchCreator = proc(
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
{.used.}
|
{.used.}
|
||||||
|
|
||||||
import sequtils
|
import sequtils
|
||||||
import chronos, stew/byteutils
|
import chronos, stew/[byteutils, results]
|
||||||
import ../libp2p/[stream/connection,
|
import ../libp2p/[stream/connection,
|
||||||
transports/transport,
|
transports/transport,
|
||||||
upgrademngrs/upgrade,
|
upgrademngrs/upgrade,
|
||||||
@ -13,36 +13,37 @@ import ./helpers
|
|||||||
|
|
||||||
type TransportProvider* = proc(): Transport {.gcsafe, raises: [Defect].}
|
type TransportProvider* = proc(): Transport {.gcsafe, raises: [Defect].}
|
||||||
|
|
||||||
proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
|
template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string = "") =
|
||||||
suite name & " common tests":
|
block:
|
||||||
teardown:
|
let transpProvider = prov
|
||||||
checkTrackers()
|
|
||||||
|
|
||||||
asyncTest "can handle local address":
|
asyncTest "can handle local address":
|
||||||
let ma = @[MultiAddress.init(ma).tryGet()]
|
let ma = @[MultiAddress.init(ma1).tryGet()]
|
||||||
let transport1 = prov()
|
let transport1 = transpProvider()
|
||||||
await transport1.start(ma)
|
await transport1.start(ma)
|
||||||
check transport1.handles(transport1.addrs[0])
|
check transport1.handles(transport1.addrs[0])
|
||||||
await transport1.stop()
|
await transport1.stop()
|
||||||
|
|
||||||
asyncTest "e2e: handle observedAddr":
|
asyncTest "e2e: handle observedAddr":
|
||||||
let ma = @[MultiAddress.init(ma).tryGet()]
|
let ma = @[MultiAddress.init(ma1).tryGet()]
|
||||||
|
|
||||||
let transport1 = prov()
|
let transport1 = transpProvider()
|
||||||
await transport1.start(ma)
|
await transport1.start(ma)
|
||||||
|
|
||||||
let transport2 = prov()
|
let transport2 = transpProvider()
|
||||||
|
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async, gcsafe.} =
|
||||||
let conn = await transport1.accept()
|
let conn = await transport1.accept()
|
||||||
check transport1.handles(conn.observedAddr)
|
if conn.observedAddr.isSome():
|
||||||
|
check transport1.handles(conn.observedAddr.get())
|
||||||
await conn.close()
|
await conn.close()
|
||||||
|
|
||||||
let handlerWait = acceptHandler()
|
let handlerWait = acceptHandler()
|
||||||
|
|
||||||
let conn = await transport2.dial(transport1.addrs[0])
|
let conn = await transport2.dial(transport1.addrs[0])
|
||||||
|
|
||||||
check transport2.handles(conn.observedAddr)
|
if conn.observedAddr.isSome():
|
||||||
|
check transport2.handles(conn.observedAddr.get())
|
||||||
|
|
||||||
await conn.close() #for some protocols, closing requires actively reading, so we must close here
|
await conn.close() #for some protocols, closing requires actively reading, so we must close here
|
||||||
|
|
||||||
@ -54,9 +55,9 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
|
|||||||
await handlerWait.wait(1.seconds) # when no issues will not wait that long!
|
await handlerWait.wait(1.seconds) # when no issues will not wait that long!
|
||||||
|
|
||||||
asyncTest "e2e: handle write":
|
asyncTest "e2e: handle write":
|
||||||
let ma = @[MultiAddress.init(ma).tryGet()]
|
let ma = @[MultiAddress.init(ma1).tryGet()]
|
||||||
|
|
||||||
let transport1 = prov()
|
let transport1 = transpProvider()
|
||||||
await transport1.start(ma)
|
await transport1.start(ma)
|
||||||
|
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async, gcsafe.} =
|
||||||
@ -66,7 +67,7 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
|
|||||||
|
|
||||||
let handlerWait = acceptHandler()
|
let handlerWait = acceptHandler()
|
||||||
|
|
||||||
let transport2 = prov()
|
let transport2 = transpProvider()
|
||||||
let conn = await transport2.dial(transport1.addrs[0])
|
let conn = await transport2.dial(transport1.addrs[0])
|
||||||
var msg = newSeq[byte](6)
|
var msg = newSeq[byte](6)
|
||||||
await conn.readExactly(addr msg[0], 6)
|
await conn.readExactly(addr msg[0], 6)
|
||||||
@ -82,8 +83,8 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
|
|||||||
await handlerWait.wait(1.seconds) # when no issues will not wait that long!
|
await handlerWait.wait(1.seconds) # when no issues will not wait that long!
|
||||||
|
|
||||||
asyncTest "e2e: handle read":
|
asyncTest "e2e: handle read":
|
||||||
let ma = @[MultiAddress.init(ma).tryGet()]
|
let ma = @[MultiAddress.init(ma1).tryGet()]
|
||||||
let transport1 = prov()
|
let transport1 = transpProvider()
|
||||||
await transport1.start(ma)
|
await transport1.start(ma)
|
||||||
|
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async, gcsafe.} =
|
||||||
@ -95,7 +96,7 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
|
|||||||
|
|
||||||
let handlerWait = acceptHandler()
|
let handlerWait = acceptHandler()
|
||||||
|
|
||||||
let transport2 = prov()
|
let transport2 = transpProvider()
|
||||||
let conn = await transport2.dial(transport1.addrs[0])
|
let conn = await transport2.dial(transport1.addrs[0])
|
||||||
await conn.write("Hello!")
|
await conn.write("Hello!")
|
||||||
|
|
||||||
@ -108,12 +109,12 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
|
|||||||
transport2.stop()))
|
transport2.stop()))
|
||||||
|
|
||||||
asyncTest "e2e: handle dial cancellation":
|
asyncTest "e2e: handle dial cancellation":
|
||||||
let ma = @[MultiAddress.init(ma).tryGet()]
|
let ma = @[MultiAddress.init(ma1).tryGet()]
|
||||||
|
|
||||||
let transport1 = prov()
|
let transport1 = transpProvider()
|
||||||
await transport1.start(ma)
|
await transport1.start(ma)
|
||||||
|
|
||||||
let transport2 = prov()
|
let transport2 = transpProvider()
|
||||||
let cancellation = transport2.dial(transport1.addrs[0])
|
let cancellation = transport2.dial(transport1.addrs[0])
|
||||||
|
|
||||||
await cancellation.cancelAndWait()
|
await cancellation.cancelAndWait()
|
||||||
@ -125,9 +126,9 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
|
|||||||
transport2.stop()))
|
transport2.stop()))
|
||||||
|
|
||||||
asyncTest "e2e: handle accept cancellation":
|
asyncTest "e2e: handle accept cancellation":
|
||||||
let ma = @[MultiAddress.init(ma).tryGet()]
|
let ma = @[MultiAddress.init(ma1).tryGet()]
|
||||||
|
|
||||||
let transport1 = prov()
|
let transport1 = transpProvider()
|
||||||
await transport1.start(ma)
|
await transport1.start(ma)
|
||||||
|
|
||||||
let acceptHandler = transport1.accept()
|
let acceptHandler = transport1.accept()
|
||||||
@ -141,11 +142,11 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
|
|||||||
# this randomly locks the Windows CI job
|
# this randomly locks the Windows CI job
|
||||||
skip()
|
skip()
|
||||||
return
|
return
|
||||||
let addrs = @[MultiAddress.init(ma).tryGet(),
|
let addrs = @[MultiAddress.init(ma1).tryGet(),
|
||||||
MultiAddress.init(ma).tryGet()]
|
MultiAddress.init(if ma2 == "": ma1 else: ma2).tryGet()]
|
||||||
|
|
||||||
|
|
||||||
let transport1 = prov()
|
let transport1 = transpProvider()
|
||||||
await transport1.start(addrs)
|
await transport1.start(addrs)
|
||||||
|
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async, gcsafe.} =
|
||||||
@ -190,12 +191,12 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
|
|||||||
await transport1.stop()
|
await transport1.stop()
|
||||||
|
|
||||||
asyncTest "e2e: stopping transport kills connections":
|
asyncTest "e2e: stopping transport kills connections":
|
||||||
let ma = @[MultiAddress.init(ma).tryGet()]
|
let ma = @[MultiAddress.init(ma1).tryGet()]
|
||||||
|
|
||||||
let transport1 = prov()
|
let transport1 = transpProvider()
|
||||||
await transport1.start(ma)
|
await transport1.start(ma)
|
||||||
|
|
||||||
let transport2 = prov()
|
let transport2 = transpProvider()
|
||||||
|
|
||||||
let acceptHandler = transport1.accept()
|
let acceptHandler = transport1.accept()
|
||||||
let conn = await transport2.dial(transport1.addrs[0])
|
let conn = await transport2.dial(transport1.addrs[0])
|
||||||
@ -210,8 +211,8 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
|
|||||||
check conn.closed()
|
check conn.closed()
|
||||||
|
|
||||||
asyncTest "read or write on closed connection":
|
asyncTest "read or write on closed connection":
|
||||||
let ma = @[MultiAddress.init(ma).tryGet()]
|
let ma = @[MultiAddress.init(ma1).tryGet()]
|
||||||
let transport1 = prov()
|
let transport1 = transpProvider()
|
||||||
await transport1.start(ma)
|
await transport1.start(ma)
|
||||||
|
|
||||||
proc acceptHandler() {.async, gcsafe.} =
|
proc acceptHandler() {.async, gcsafe.} =
|
||||||
|
5
tests/config.nims
Normal file
5
tests/config.nims
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
import ../config.nims
|
||||||
|
|
||||||
|
--threads:on
|
||||||
|
--d:metrics
|
||||||
|
--d:withoutPCRE
|
@ -105,16 +105,15 @@ proc bridgedConnections*: (Connection, Connection) =
|
|||||||
return (connA, connB)
|
return (connA, connB)
|
||||||
|
|
||||||
|
|
||||||
proc checkExpiringInternal(cond: proc(): bool {.raises: [Defect].} ): Future[bool] {.async, gcsafe.} =
|
proc checkExpiringInternal(cond: proc(): bool {.raises: [Defect], gcsafe.} ): Future[bool] {.async, gcsafe.} =
|
||||||
{.gcsafe.}:
|
let start = Moment.now()
|
||||||
let start = Moment.now()
|
while true:
|
||||||
while true:
|
if Moment.now() > (start + chronos.seconds(5)):
|
||||||
if Moment.now() > (start + chronos.seconds(5)):
|
return false
|
||||||
return false
|
elif cond():
|
||||||
elif cond():
|
return true
|
||||||
return true
|
else:
|
||||||
else:
|
await sleepAsync(1.millis)
|
||||||
await sleepAsync(1.millis)
|
|
||||||
|
|
||||||
template checkExpiring*(code: untyped): untyped =
|
template checkExpiring*(code: untyped): untyped =
|
||||||
checkExpiringInternal(proc(): bool = code)
|
check await checkExpiringInternal(proc(): bool = code)
|
||||||
|
@ -1 +0,0 @@
|
|||||||
--threads:on
|
|
@ -351,7 +351,7 @@ suite "FloodSub":
|
|||||||
check (await smallNode[0].publish("foo", smallMessage1)) > 0
|
check (await smallNode[0].publish("foo", smallMessage1)) > 0
|
||||||
check (await bigNode[0].publish("foo", smallMessage2)) > 0
|
check (await bigNode[0].publish("foo", smallMessage2)) > 0
|
||||||
|
|
||||||
check (await checkExpiring(messageReceived == 2)) == true
|
checkExpiring: messageReceived == 2
|
||||||
|
|
||||||
check (await smallNode[0].publish("foo", bigMessage)) > 0
|
check (await smallNode[0].publish("foo", bigMessage)) > 0
|
||||||
check (await bigNode[0].publish("foo", bigMessage)) > 0
|
check (await bigNode[0].publish("foo", bigMessage)) > 0
|
||||||
@ -362,3 +362,35 @@ suite "FloodSub":
|
|||||||
)
|
)
|
||||||
|
|
||||||
await allFuturesThrowing(nodesFut)
|
await allFuturesThrowing(nodesFut)
|
||||||
|
|
||||||
|
asyncTest "FloodSub message size validation 2":
|
||||||
|
var messageReceived = 0
|
||||||
|
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
|
||||||
|
inc(messageReceived)
|
||||||
|
|
||||||
|
let
|
||||||
|
bigNode1 = generateNodes(1, maxMessageSize = 20000000)
|
||||||
|
bigNode2 = generateNodes(1, maxMessageSize = 20000000)
|
||||||
|
|
||||||
|
# start switches
|
||||||
|
nodesFut = await allFinished(
|
||||||
|
bigNode1[0].switch.start(),
|
||||||
|
bigNode2[0].switch.start(),
|
||||||
|
)
|
||||||
|
|
||||||
|
await subscribeNodes(bigNode1 & bigNode2)
|
||||||
|
bigNode2[0].subscribe("foo", handler)
|
||||||
|
await waitSub(bigNode1[0], bigNode2[0], "foo")
|
||||||
|
|
||||||
|
let bigMessage = newSeq[byte](19000000)
|
||||||
|
|
||||||
|
check (await bigNode1[0].publish("foo", bigMessage)) > 0
|
||||||
|
|
||||||
|
checkExpiring: messageReceived == 1
|
||||||
|
|
||||||
|
await allFuturesThrowing(
|
||||||
|
bigNode1[0].switch.stop(),
|
||||||
|
bigNode2[0].switch.stop()
|
||||||
|
)
|
||||||
|
|
||||||
|
await allFuturesThrowing(nodesFut)
|
||||||
|
@ -22,10 +22,7 @@ proc getPubSubPeer(p: TestGossipSub, peerId: PeerId): PubSubPeer =
|
|||||||
proc getConn(): Future[Connection] =
|
proc getConn(): Future[Connection] =
|
||||||
p.switch.dial(peerId, GossipSubCodec)
|
p.switch.dial(peerId, GossipSubCodec)
|
||||||
|
|
||||||
proc dropConn(peer: PubSubPeer) =
|
let pubSubPeer = PubSubPeer.new(peerId, getConn, nil, GossipSubCodec, 1024 * 1024)
|
||||||
discard # we don't care about it here yet
|
|
||||||
|
|
||||||
let pubSubPeer = PubSubPeer.new(peerId, getConn, dropConn, nil, GossipSubCodec, 1024 * 1024)
|
|
||||||
debug "created new pubsub peer", peerId
|
debug "created new pubsub peer", peerId
|
||||||
|
|
||||||
p.peers[peerId] = pubSubPeer
|
p.peers[peerId] = pubSubPeer
|
||||||
|
@ -9,7 +9,7 @@
|
|||||||
|
|
||||||
{.used.}
|
{.used.}
|
||||||
|
|
||||||
import sequtils, options, tables, sets
|
import sequtils, options, tables, sets, sugar
|
||||||
import chronos, stew/byteutils
|
import chronos, stew/byteutils
|
||||||
import chronicles
|
import chronicles
|
||||||
import utils, ../../libp2p/[errors,
|
import utils, ../../libp2p/[errors,
|
||||||
@ -29,46 +29,15 @@ import ../helpers
|
|||||||
|
|
||||||
proc `$`(peer: PubSubPeer): string = shortLog(peer)
|
proc `$`(peer: PubSubPeer): string = shortLog(peer)
|
||||||
|
|
||||||
proc waitSub(sender, receiver: auto; key: string) {.async, gcsafe.} =
|
template tryPublish(call: untyped, require: int, wait = 10.milliseconds, timeout = 5.seconds): untyped =
|
||||||
if sender == receiver:
|
|
||||||
return
|
|
||||||
# turn things deterministic
|
|
||||||
# this is for testing purposes only
|
|
||||||
# peers can be inside `mesh` and `fanout`, not just `gossipsub`
|
|
||||||
var ceil = 15
|
|
||||||
let fsub = GossipSub(sender)
|
|
||||||
let ev = newAsyncEvent()
|
|
||||||
fsub.heartbeatEvents.add(ev)
|
|
||||||
|
|
||||||
# await first heartbeat
|
|
||||||
await ev.wait()
|
|
||||||
ev.clear()
|
|
||||||
|
|
||||||
while (not fsub.gossipsub.hasKey(key) or
|
|
||||||
not fsub.gossipsub.hasPeerId(key, receiver.peerInfo.peerId)) and
|
|
||||||
(not fsub.mesh.hasKey(key) or
|
|
||||||
not fsub.mesh.hasPeerId(key, receiver.peerInfo.peerId)) and
|
|
||||||
(not fsub.fanout.hasKey(key) or
|
|
||||||
not fsub.fanout.hasPeerId(key , receiver.peerInfo.peerId)):
|
|
||||||
trace "waitSub sleeping..."
|
|
||||||
|
|
||||||
# await more heartbeats
|
|
||||||
await ev.wait()
|
|
||||||
ev.clear()
|
|
||||||
|
|
||||||
dec ceil
|
|
||||||
doAssert(ceil > 0, "waitSub timeout!")
|
|
||||||
|
|
||||||
template tryPublish(call: untyped, require: int, wait: Duration = 1.seconds, times: int = 10): untyped =
|
|
||||||
var
|
var
|
||||||
limit = times
|
expiration = Moment.now() + timeout
|
||||||
pubs = 0
|
pubs = 0
|
||||||
while pubs < require and limit > 0:
|
while pubs < require and Moment.now() < expiration:
|
||||||
pubs = pubs + call
|
pubs = pubs + call
|
||||||
await sleepAsync(wait)
|
await sleepAsync(wait)
|
||||||
limit.dec()
|
|
||||||
if limit == 0:
|
doAssert pubs >= require, "Failed to publish!"
|
||||||
doAssert(false, "Failed to publish!")
|
|
||||||
|
|
||||||
suite "GossipSub":
|
suite "GossipSub":
|
||||||
teardown:
|
teardown:
|
||||||
@ -343,14 +312,13 @@ suite "GossipSub":
|
|||||||
await subscribeNodes(nodes)
|
await subscribeNodes(nodes)
|
||||||
|
|
||||||
nodes[1].subscribe("foobar", handler)
|
nodes[1].subscribe("foobar", handler)
|
||||||
await sleepAsync(10.seconds)
|
|
||||||
|
|
||||||
let gossip1 = GossipSub(nodes[0])
|
let gossip1 = GossipSub(nodes[0])
|
||||||
let gossip2 = GossipSub(nodes[1])
|
let gossip2 = GossipSub(nodes[1])
|
||||||
|
|
||||||
check:
|
checkExpiring:
|
||||||
"foobar" in gossip2.topics
|
"foobar" in gossip2.topics and
|
||||||
"foobar" in gossip1.gossipsub
|
"foobar" in gossip1.gossipsub and
|
||||||
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId)
|
||||||
|
|
||||||
await allFuturesThrowing(
|
await allFuturesThrowing(
|
||||||
@ -494,7 +462,7 @@ suite "GossipSub":
|
|||||||
nodes[0].unsubscribe("foobar", handler)
|
nodes[0].unsubscribe("foobar", handler)
|
||||||
|
|
||||||
let gsNode = GossipSub(nodes[1])
|
let gsNode = GossipSub(nodes[1])
|
||||||
check await checkExpiring(gsNode.mesh.getOrDefault("foobar").len == 0)
|
checkExpiring: gsNode.mesh.getOrDefault("foobar").len == 0
|
||||||
|
|
||||||
nodes[0].subscribe("foobar", handler)
|
nodes[0].subscribe("foobar", handler)
|
||||||
|
|
||||||
@ -613,7 +581,7 @@ suite "GossipSub":
|
|||||||
gossip1.seen = TimedCache[MessageId].init()
|
gossip1.seen = TimedCache[MessageId].init()
|
||||||
gossip3.seen = TimedCache[MessageId].init()
|
gossip3.seen = TimedCache[MessageId].init()
|
||||||
let msgId = toSeq(gossip2.validationSeen.keys)[0]
|
let msgId = toSeq(gossip2.validationSeen.keys)[0]
|
||||||
check await checkExpiring(try: gossip2.validationSeen[msgId].len > 0 except: false)
|
checkExpiring(try: gossip2.validationSeen[msgId].len > 0 except: false)
|
||||||
result = ValidationResult.Accept
|
result = ValidationResult.Accept
|
||||||
bFinished.complete()
|
bFinished.complete()
|
||||||
|
|
||||||
@ -701,14 +669,14 @@ suite "GossipSub":
|
|||||||
seenFut.complete()
|
seenFut.complete()
|
||||||
|
|
||||||
dialer.subscribe("foobar", handler)
|
dialer.subscribe("foobar", handler)
|
||||||
await waitSub(nodes[0], dialer, "foobar")
|
await waitSubGraph(nodes, "foobar")
|
||||||
|
|
||||||
tryPublish await wait(nodes[0].publish("foobar",
|
tryPublish await wait(nodes[0].publish("foobar",
|
||||||
toBytes("from node " &
|
toBytes("from node " &
|
||||||
$nodes[0].peerInfo.peerId)),
|
$nodes[0].peerInfo.peerId)),
|
||||||
1.minutes), 1, 5.seconds
|
1.minutes), 1
|
||||||
|
|
||||||
await wait(seenFut, 2.minutes)
|
await wait(seenFut, 1.minutes)
|
||||||
check: seen.len >= runs
|
check: seen.len >= runs
|
||||||
for k, v in seen.pairs:
|
for k, v in seen.pairs:
|
||||||
check: v >= 1
|
check: v >= 1
|
||||||
@ -737,10 +705,11 @@ suite "GossipSub":
|
|||||||
|
|
||||||
var seen: Table[string, int]
|
var seen: Table[string, int]
|
||||||
var seenFut = newFuture[void]()
|
var seenFut = newFuture[void]()
|
||||||
|
|
||||||
for i in 0..<nodes.len:
|
for i in 0..<nodes.len:
|
||||||
let dialer = nodes[i]
|
let dialer = nodes[i]
|
||||||
var handler: TopicHandler
|
var handler: TopicHandler
|
||||||
closureScope:
|
capture dialer, i:
|
||||||
var peerName = $dialer.peerInfo.peerId
|
var peerName = $dialer.peerInfo.peerId
|
||||||
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
|
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
|
||||||
if peerName notin seen:
|
if peerName notin seen:
|
||||||
@ -751,14 +720,14 @@ suite "GossipSub":
|
|||||||
seenFut.complete()
|
seenFut.complete()
|
||||||
|
|
||||||
dialer.subscribe("foobar", handler)
|
dialer.subscribe("foobar", handler)
|
||||||
await waitSub(nodes[0], dialer, "foobar")
|
|
||||||
|
|
||||||
|
await waitSubGraph(nodes, "foobar")
|
||||||
tryPublish await wait(nodes[0].publish("foobar",
|
tryPublish await wait(nodes[0].publish("foobar",
|
||||||
toBytes("from node " &
|
toBytes("from node " &
|
||||||
$nodes[0].peerInfo.peerId)),
|
$nodes[0].peerInfo.peerId)),
|
||||||
1.minutes), 1, 5.seconds
|
1.minutes), 1
|
||||||
|
|
||||||
await wait(seenFut, 5.minutes)
|
await wait(seenFut, 60.seconds)
|
||||||
check: seen.len >= runs
|
check: seen.len >= runs
|
||||||
for k, v in seen.pairs:
|
for k, v in seen.pairs:
|
||||||
check: v >= 1
|
check: v >= 1
|
||||||
|
@ -10,8 +10,7 @@
|
|||||||
{.used.}
|
{.used.}
|
||||||
|
|
||||||
import sequtils, options, tables, sets
|
import sequtils, options, tables, sets
|
||||||
import chronos, stew/byteutils
|
import chronos, stew/byteutils, chronicles
|
||||||
import chronicles
|
|
||||||
import utils, ../../libp2p/[errors,
|
import utils, ../../libp2p/[errors,
|
||||||
peerid,
|
peerid,
|
||||||
peerinfo,
|
peerinfo,
|
||||||
@ -25,46 +24,15 @@ import utils, ../../libp2p/[errors,
|
|||||||
protocols/pubsub/rpc/messages]
|
protocols/pubsub/rpc/messages]
|
||||||
import ../helpers
|
import ../helpers
|
||||||
|
|
||||||
proc waitSub(sender, receiver: auto; key: string) {.async, gcsafe.} =
|
template tryPublish(call: untyped, require: int, wait = 10.milliseconds, timeout = 10.seconds): untyped =
|
||||||
if sender == receiver:
|
|
||||||
return
|
|
||||||
# turn things deterministic
|
|
||||||
# this is for testing purposes only
|
|
||||||
# peers can be inside `mesh` and `fanout`, not just `gossipsub`
|
|
||||||
var ceil = 15
|
|
||||||
let fsub = GossipSub(sender)
|
|
||||||
let ev = newAsyncEvent()
|
|
||||||
fsub.heartbeatEvents.add(ev)
|
|
||||||
|
|
||||||
# await first heartbeat
|
|
||||||
await ev.wait()
|
|
||||||
ev.clear()
|
|
||||||
|
|
||||||
while (not fsub.gossipsub.hasKey(key) or
|
|
||||||
not fsub.gossipsub.hasPeerId(key, receiver.peerInfo.peerId)) and
|
|
||||||
(not fsub.mesh.hasKey(key) or
|
|
||||||
not fsub.mesh.hasPeerId(key, receiver.peerInfo.peerId)) and
|
|
||||||
(not fsub.fanout.hasKey(key) or
|
|
||||||
not fsub.fanout.hasPeerId(key , receiver.peerInfo.peerId)):
|
|
||||||
trace "waitSub sleeping..."
|
|
||||||
|
|
||||||
# await more heartbeats
|
|
||||||
await ev.wait()
|
|
||||||
ev.clear()
|
|
||||||
|
|
||||||
dec ceil
|
|
||||||
doAssert(ceil > 0, "waitSub timeout!")
|
|
||||||
|
|
||||||
template tryPublish(call: untyped, require: int, wait: Duration = 1.seconds, times: int = 10): untyped =
|
|
||||||
var
|
var
|
||||||
limit = times
|
expiration = Moment.now() + timeout
|
||||||
pubs = 0
|
pubs = 0
|
||||||
while pubs < require and limit > 0:
|
while pubs < require and Moment.now() < expiration:
|
||||||
pubs = pubs + call
|
pubs = pubs + call
|
||||||
await sleepAsync(wait)
|
await sleepAsync(wait)
|
||||||
limit.dec()
|
|
||||||
if limit == 0:
|
doAssert pubs >= require, "Failed to publish!"
|
||||||
doAssert(false, "Failed to publish!")
|
|
||||||
|
|
||||||
suite "GossipSub":
|
suite "GossipSub":
|
||||||
teardown:
|
teardown:
|
||||||
@ -280,7 +248,7 @@ suite "GossipSub":
|
|||||||
|
|
||||||
await allFuturesThrowing(nodesFut.concat())
|
await allFuturesThrowing(nodesFut.concat())
|
||||||
|
|
||||||
asyncTest "GossipsSub peers disconnections mechanics":
|
asyncTest "GossipSub peers disconnections mechanics":
|
||||||
var runs = 10
|
var runs = 10
|
||||||
|
|
||||||
let
|
let
|
||||||
@ -305,7 +273,8 @@ suite "GossipSub":
|
|||||||
seenFut.complete()
|
seenFut.complete()
|
||||||
|
|
||||||
dialer.subscribe("foobar", handler)
|
dialer.subscribe("foobar", handler)
|
||||||
await waitSub(nodes[0], dialer, "foobar")
|
|
||||||
|
await waitSubGraph(nodes, "foobar")
|
||||||
|
|
||||||
# ensure peer stats are stored properly and kept properly
|
# ensure peer stats are stored properly and kept properly
|
||||||
check:
|
check:
|
||||||
@ -314,7 +283,7 @@ suite "GossipSub":
|
|||||||
tryPublish await wait(nodes[0].publish("foobar",
|
tryPublish await wait(nodes[0].publish("foobar",
|
||||||
toBytes("from node " &
|
toBytes("from node " &
|
||||||
$nodes[0].peerInfo.peerId)),
|
$nodes[0].peerInfo.peerId)),
|
||||||
1.minutes), 1, 5.seconds
|
1.minutes), 1, 5.seconds, 3.minutes
|
||||||
|
|
||||||
await wait(seenFut, 5.minutes)
|
await wait(seenFut, 5.minutes)
|
||||||
check: seen.len >= runs
|
check: seen.len >= runs
|
||||||
@ -337,11 +306,9 @@ suite "GossipSub":
|
|||||||
# Waiting 2 heartbeats
|
# Waiting 2 heartbeats
|
||||||
|
|
||||||
for _ in 0..1:
|
for _ in 0..1:
|
||||||
for i in 0..<runs:
|
let evnt = newAsyncEvent()
|
||||||
if i mod 3 == 0:
|
GossipSub(nodes[0]).heartbeatEvents &= evnt
|
||||||
let evnt = newAsyncEvent()
|
await evnt.wait()
|
||||||
GossipSub(nodes[i]).heartbeatEvents &= evnt
|
|
||||||
await evnt.wait()
|
|
||||||
|
|
||||||
# ensure peer stats are stored properly and kept properly
|
# ensure peer stats are stored properly and kept properly
|
||||||
check:
|
check:
|
||||||
@ -359,11 +326,9 @@ suite "GossipSub":
|
|||||||
# Waiting 2 heartbeats
|
# Waiting 2 heartbeats
|
||||||
|
|
||||||
for _ in 0..1:
|
for _ in 0..1:
|
||||||
for i in 0..<runs:
|
let evnt = newAsyncEvent()
|
||||||
if i mod 3 == 0:
|
GossipSub(nodes[0]).heartbeatEvents &= evnt
|
||||||
let evnt = newAsyncEvent()
|
await evnt.wait()
|
||||||
GossipSub(nodes[i]).heartbeatEvents &= evnt
|
|
||||||
await evnt.wait()
|
|
||||||
|
|
||||||
# ensure peer stats are stored properly and kept properly
|
# ensure peer stats are stored properly and kept properly
|
||||||
check:
|
check:
|
||||||
|
@ -4,7 +4,7 @@ const
|
|||||||
libp2p_pubsub_verify {.booldefine.} = true
|
libp2p_pubsub_verify {.booldefine.} = true
|
||||||
libp2p_pubsub_anonymize {.booldefine.} = false
|
libp2p_pubsub_anonymize {.booldefine.} = false
|
||||||
|
|
||||||
import hashes, random, tables
|
import hashes, random, tables, sets, sequtils
|
||||||
import chronos, stew/[byteutils, results]
|
import chronos, stew/[byteutils, results]
|
||||||
import ../../libp2p/[builders,
|
import ../../libp2p/[builders,
|
||||||
protocols/pubsub/errors,
|
protocols/pubsub/errors,
|
||||||
@ -13,6 +13,7 @@ import ../../libp2p/[builders,
|
|||||||
protocols/pubsub/floodsub,
|
protocols/pubsub/floodsub,
|
||||||
protocols/pubsub/rpc/messages,
|
protocols/pubsub/rpc/messages,
|
||||||
protocols/secure/secure]
|
protocols/secure/secure]
|
||||||
|
import chronicles
|
||||||
|
|
||||||
export builders
|
export builders
|
||||||
|
|
||||||
@ -102,3 +103,43 @@ proc subscribeRandom*(nodes: seq[PubSub]) {.async.} =
|
|||||||
if dialer.peerInfo.peerId != node.peerInfo.peerId:
|
if dialer.peerInfo.peerId != node.peerInfo.peerId:
|
||||||
await dialer.switch.connect(node.peerInfo.peerId, node.peerInfo.addrs)
|
await dialer.switch.connect(node.peerInfo.peerId, node.peerInfo.addrs)
|
||||||
dialed.add(node.peerInfo.peerId)
|
dialed.add(node.peerInfo.peerId)
|
||||||
|
|
||||||
|
proc waitSub*(sender, receiver: auto; key: string) {.async, gcsafe.} =
|
||||||
|
if sender == receiver:
|
||||||
|
return
|
||||||
|
let timeout = Moment.now() + 5.seconds
|
||||||
|
let fsub = GossipSub(sender)
|
||||||
|
|
||||||
|
# this is for testing purposes only
|
||||||
|
# peers can be inside `mesh` and `fanout`, not just `gossipsub`
|
||||||
|
while (not fsub.gossipsub.hasKey(key) or
|
||||||
|
not fsub.gossipsub.hasPeerId(key, receiver.peerInfo.peerId)) and
|
||||||
|
(not fsub.mesh.hasKey(key) or
|
||||||
|
not fsub.mesh.hasPeerId(key, receiver.peerInfo.peerId)) and
|
||||||
|
(not fsub.fanout.hasKey(key) or
|
||||||
|
not fsub.fanout.hasPeerId(key , receiver.peerInfo.peerId)):
|
||||||
|
trace "waitSub sleeping..."
|
||||||
|
|
||||||
|
# await
|
||||||
|
await sleepAsync(5.milliseconds)
|
||||||
|
doAssert Moment.now() < timeout, "waitSub timeout!"
|
||||||
|
|
||||||
|
proc waitSubGraph*(nodes: seq[PubSub], key: string) {.async, gcsafe.} =
|
||||||
|
let timeout = Moment.now() + 5.seconds
|
||||||
|
while true:
|
||||||
|
var
|
||||||
|
nodesMesh: Table[PeerId, seq[PeerId]]
|
||||||
|
seen: HashSet[PeerId]
|
||||||
|
for n in nodes:
|
||||||
|
nodesMesh[n.peerInfo.peerId] = toSeq(GossipSub(n).mesh.getOrDefault(key).items()).mapIt(it.peerId)
|
||||||
|
proc explore(p: PeerId) =
|
||||||
|
if p in seen: return
|
||||||
|
seen.incl(p)
|
||||||
|
for peer in nodesMesh.getOrDefault(p):
|
||||||
|
explore(peer)
|
||||||
|
explore(nodes[0].peerInfo.peerId)
|
||||||
|
if seen.len == nodes.len: return
|
||||||
|
trace "waitSubGraph sleeping..."
|
||||||
|
|
||||||
|
await sleepAsync(5.milliseconds)
|
||||||
|
doAssert Moment.now() < timeout, "waitSubGraph timeout!"
|
||||||
|
44
tests/stubs/autonatstub.nim
Normal file
44
tests/stubs/autonatstub.nim
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
{.used.}
|
||||||
|
|
||||||
|
when (NimMajor, NimMinor) < (1, 4):
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
else:
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import chronos
|
||||||
|
import ../../libp2p/protocols/connectivity/autonat
|
||||||
|
import ../../libp2p/peerid
|
||||||
|
import ../../libp2p/multiaddress
|
||||||
|
|
||||||
|
type
|
||||||
|
AutonatStub* = ref object of Autonat
|
||||||
|
answer*: Answer
|
||||||
|
dials: int
|
||||||
|
expectedDials: int
|
||||||
|
finished*: Future[void]
|
||||||
|
|
||||||
|
Answer* = enum
|
||||||
|
Reachable,
|
||||||
|
NotReachable,
|
||||||
|
Unknown
|
||||||
|
|
||||||
|
proc new*(T: typedesc[AutonatStub], expectedDials: int): T =
|
||||||
|
return T(dials: 0, expectedDials: expectedDials, finished: newFuture[void]())
|
||||||
|
|
||||||
|
method dialMe*(
|
||||||
|
self: AutonatStub,
|
||||||
|
pid: PeerId,
|
||||||
|
addrs: seq[MultiAddress] = newSeq[MultiAddress]()):
|
||||||
|
Future[MultiAddress] {.async.} =
|
||||||
|
|
||||||
|
self.dials += 1
|
||||||
|
|
||||||
|
if self.dials == self.expectedDials:
|
||||||
|
self.finished.complete()
|
||||||
|
case self.answer:
|
||||||
|
of Reachable:
|
||||||
|
return MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||||
|
of NotReachable:
|
||||||
|
raise newException(AutonatUnreachableError, "")
|
||||||
|
of Unknown:
|
||||||
|
raise newException(AutonatError, "")
|
86
tests/stubs/torstub.nim
Normal file
86
tests/stubs/torstub.nim
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
{.used.}
|
||||||
|
|
||||||
|
when (NimMajor, NimMinor) < (1, 4):
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
else:
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import tables
|
||||||
|
import chronos, stew/[byteutils, endians2, shims/net]
|
||||||
|
import ../../libp2p/[stream/connection,
|
||||||
|
protocols/connectivity/relay/utils,
|
||||||
|
transports/tcptransport,
|
||||||
|
transports/tortransport,
|
||||||
|
upgrademngrs/upgrade,
|
||||||
|
multiaddress,
|
||||||
|
errors,
|
||||||
|
builders]
|
||||||
|
|
||||||
|
type
|
||||||
|
TorServerStub* = ref object of RootObj
|
||||||
|
tcpTransport: TcpTransport
|
||||||
|
addrTable: Table[string, string]
|
||||||
|
|
||||||
|
proc new*(
|
||||||
|
T: typedesc[TorServerStub]): T {.public.} =
|
||||||
|
|
||||||
|
T(
|
||||||
|
tcpTransport: TcpTransport.new(flags = {ReuseAddr}, upgrade = Upgrade()),
|
||||||
|
addrTable: initTable[string, string]())
|
||||||
|
|
||||||
|
proc registerAddr*(self: TorServerStub, key: string, val: string) =
|
||||||
|
self.addrTable[key] = val
|
||||||
|
|
||||||
|
proc start*(self: TorServerStub, address: TransportAddress) {.async.} =
|
||||||
|
let ma = @[MultiAddress.init(address).tryGet()]
|
||||||
|
|
||||||
|
await self.tcpTransport.start(ma)
|
||||||
|
|
||||||
|
var msg = newSeq[byte](3)
|
||||||
|
while self.tcpTransport.running:
|
||||||
|
let connSrc = await self.tcpTransport.accept()
|
||||||
|
await connSrc.readExactly(addr msg[0], 3)
|
||||||
|
|
||||||
|
await connSrc.write(@[05'u8, 00])
|
||||||
|
|
||||||
|
msg = newSeq[byte](4)
|
||||||
|
await connSrc.readExactly(addr msg[0], 4)
|
||||||
|
let atyp = msg[3]
|
||||||
|
let address = case atyp:
|
||||||
|
of Socks5AddressType.IPv4.byte:
|
||||||
|
let n = 4 + 2 # +2 bytes for the port
|
||||||
|
msg = newSeq[byte](n)
|
||||||
|
await connSrc.readExactly(addr msg[0], n)
|
||||||
|
var ip: array[4, byte]
|
||||||
|
for i, e in msg[0..^3]:
|
||||||
|
ip[i] = e
|
||||||
|
$(ipv4(ip)) & ":" & $(Port(fromBytesBE(uint16, msg[^2..^1])))
|
||||||
|
of Socks5AddressType.IPv6.byte:
|
||||||
|
let n = 16 + 2 # +2 bytes for the port
|
||||||
|
msg = newSeq[byte](n) # +2 bytes for the port
|
||||||
|
await connSrc.readExactly(addr msg[0], n)
|
||||||
|
var ip: array[16, byte]
|
||||||
|
for i, e in msg[0..^3]:
|
||||||
|
ip[i] = e
|
||||||
|
$(ipv6(ip)) & ":" & $(Port(fromBytesBE(uint16, msg[^2..^1])))
|
||||||
|
of Socks5AddressType.FQDN.byte:
|
||||||
|
await connSrc.readExactly(addr msg[0], 1)
|
||||||
|
let n = int(uint8.fromBytes(msg[0..0])) + 2 # +2 bytes for the port
|
||||||
|
msg = newSeq[byte](n)
|
||||||
|
await connSrc.readExactly(addr msg[0], n)
|
||||||
|
string.fromBytes(msg[0..^3]) & ":" & $(Port(fromBytesBE(uint16, msg[^2..^1])))
|
||||||
|
else:
|
||||||
|
raise newException(LPError, "Address not supported")
|
||||||
|
|
||||||
|
let tcpIpAddr = self.addrTable[$(address)]
|
||||||
|
|
||||||
|
await connSrc.write(@[05'u8, 00, 00, 01, 00, 00, 00, 00, 00, 00])
|
||||||
|
|
||||||
|
let connDst = await self.tcpTransport.dial("", MultiAddress.init(tcpIpAddr).tryGet())
|
||||||
|
|
||||||
|
await bridge(connSrc, connDst)
|
||||||
|
await allFutures(connSrc.close(), connDst.close())
|
||||||
|
|
||||||
|
|
||||||
|
proc stop*(self: TorServerStub) {.async.} =
|
||||||
|
await self.tcpTransport.stop()
|
89
tests/testautonat.nim
Normal file
89
tests/testautonat.nim
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
import std/options
|
||||||
|
import chronos
|
||||||
|
import
|
||||||
|
../libp2p/[
|
||||||
|
transports/tcptransport,
|
||||||
|
upgrademngrs/upgrade,
|
||||||
|
builders,
|
||||||
|
protocols/connectivity/autonat
|
||||||
|
],
|
||||||
|
./helpers
|
||||||
|
|
||||||
|
proc createAutonatSwitch(): Switch =
|
||||||
|
result = SwitchBuilder.new()
|
||||||
|
.withRng(newRng())
|
||||||
|
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
|
||||||
|
.withTcpTransport()
|
||||||
|
.withMplex()
|
||||||
|
.withAutonat()
|
||||||
|
.withNoise()
|
||||||
|
.build()
|
||||||
|
|
||||||
|
proc makeAutonatServicePrivate(): Switch =
|
||||||
|
var autonatProtocol = new LPProtocol
|
||||||
|
autonatProtocol.handler = proc (conn: Connection, proto: string) {.async, gcsafe.} =
|
||||||
|
discard await conn.readLp(1024)
|
||||||
|
await conn.writeLp(AutonatDialResponse(
|
||||||
|
status: DialError,
|
||||||
|
text: some("dial failed"),
|
||||||
|
ma: none(MultiAddress)).encode().buffer)
|
||||||
|
await conn.close()
|
||||||
|
autonatProtocol.codec = AutonatCodec
|
||||||
|
result = newStandardSwitch()
|
||||||
|
result.mount(autonatProtocol)
|
||||||
|
|
||||||
|
suite "Autonat":
|
||||||
|
teardown:
|
||||||
|
checkTrackers()
|
||||||
|
|
||||||
|
asyncTest "dialMe returns public address":
|
||||||
|
let
|
||||||
|
src = newStandardSwitch()
|
||||||
|
dst = createAutonatSwitch()
|
||||||
|
await src.start()
|
||||||
|
await dst.start()
|
||||||
|
|
||||||
|
await src.connect(dst.peerInfo.peerId, dst.peerInfo.addrs)
|
||||||
|
let ma = await Autonat.new(src).dialMe(dst.peerInfo.peerId, dst.peerInfo.addrs)
|
||||||
|
check ma in src.peerInfo.addrs
|
||||||
|
await allFutures(src.stop(), dst.stop())
|
||||||
|
|
||||||
|
asyncTest "dialMe handles dial error msg":
|
||||||
|
let
|
||||||
|
src = newStandardSwitch()
|
||||||
|
dst = makeAutonatServicePrivate()
|
||||||
|
|
||||||
|
await src.start()
|
||||||
|
await dst.start()
|
||||||
|
|
||||||
|
await src.connect(dst.peerInfo.peerId, dst.peerInfo.addrs)
|
||||||
|
expect AutonatUnreachableError:
|
||||||
|
discard await Autonat.new(src).dialMe(dst.peerInfo.peerId, dst.peerInfo.addrs)
|
||||||
|
await allFutures(src.stop(), dst.stop())
|
||||||
|
|
||||||
|
asyncTest "Timeout is triggered in autonat handle":
|
||||||
|
let
|
||||||
|
src = newStandardSwitch()
|
||||||
|
dst = newStandardSwitch()
|
||||||
|
autonat = Autonat.new(dst, dialTimeout = 1.seconds)
|
||||||
|
doesNothingListener = TcpTransport.new(upgrade = Upgrade())
|
||||||
|
|
||||||
|
dst.mount(autonat)
|
||||||
|
await src.start()
|
||||||
|
await dst.start()
|
||||||
|
await doesNothingListener.start(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
|
||||||
|
|
||||||
|
await src.connect(dst.peerInfo.peerId, dst.peerInfo.addrs)
|
||||||
|
let conn = await src.dial(dst.peerInfo.peerId, @[AutonatCodec])
|
||||||
|
let buffer = AutonatDial(peerInfo: some(AutonatPeerInfo(
|
||||||
|
id: some(src.peerInfo.peerId),
|
||||||
|
# we ask to be dialed in the does nothing listener instead
|
||||||
|
addrs: doesNothingListener.addrs
|
||||||
|
))).encode().buffer
|
||||||
|
await conn.writeLp(buffer)
|
||||||
|
let response = AutonatMsg.decode(await conn.readLp(1024)).get().response.get()
|
||||||
|
check:
|
||||||
|
response.status == DialError
|
||||||
|
response.text.get() == "Timeout exceeded!"
|
||||||
|
response.ma.isNone()
|
||||||
|
await allFutures(doesNothingListener.stop(), src.stop(), dst.stop())
|
251
tests/testautonatservice.nim
Normal file
251
tests/testautonatservice.nim
Normal file
@ -0,0 +1,251 @@
|
|||||||
|
# Nim-LibP2P
|
||||||
|
# Copyright (c) 2022 Status Research & Development GmbH
|
||||||
|
# Licensed under either of
|
||||||
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||||
|
# at your option.
|
||||||
|
# This file may not be copied, modified, or distributed except according to
|
||||||
|
# those terms.
|
||||||
|
|
||||||
|
import std/options
|
||||||
|
import chronos, metrics
|
||||||
|
import unittest2
|
||||||
|
import ../libp2p/[builders,
|
||||||
|
switch,
|
||||||
|
services/autonatservice,
|
||||||
|
protocols/connectivity/autonat]
|
||||||
|
import ./helpers
|
||||||
|
import stubs/autonatstub
|
||||||
|
|
||||||
|
proc createSwitch(autonatSvc: Service = nil, withAutonat = true): Switch =
|
||||||
|
var builder = SwitchBuilder.new()
|
||||||
|
.withRng(newRng())
|
||||||
|
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
|
||||||
|
.withTcpTransport()
|
||||||
|
.withMplex()
|
||||||
|
.withNoise()
|
||||||
|
|
||||||
|
if withAutonat:
|
||||||
|
builder = builder.withAutonat()
|
||||||
|
|
||||||
|
if autonatSvc != nil:
|
||||||
|
builder = builder.withServices(@[autonatSvc])
|
||||||
|
|
||||||
|
return builder.build()
|
||||||
|
|
||||||
|
suite "Autonat Service":
|
||||||
|
teardown:
|
||||||
|
checkTrackers()
|
||||||
|
|
||||||
|
asyncTest "Peer must be not reachable":
|
||||||
|
|
||||||
|
let autonatStub = AutonatStub.new(expectedDials = 3)
|
||||||
|
autonatStub.answer = NotReachable
|
||||||
|
|
||||||
|
let autonatService = AutonatService.new(autonatStub, newRng())
|
||||||
|
|
||||||
|
let switch1 = createSwitch(autonatService)
|
||||||
|
let switch2 = createSwitch()
|
||||||
|
let switch3 = createSwitch()
|
||||||
|
let switch4 = createSwitch()
|
||||||
|
|
||||||
|
check autonatService.networkReachability() == NetworkReachability.Unknown
|
||||||
|
|
||||||
|
await switch1.start()
|
||||||
|
await switch2.start()
|
||||||
|
await switch3.start()
|
||||||
|
await switch4.start()
|
||||||
|
|
||||||
|
await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs)
|
||||||
|
await switch1.connect(switch3.peerInfo.peerId, switch3.peerInfo.addrs)
|
||||||
|
await switch1.connect(switch4.peerInfo.peerId, switch4.peerInfo.addrs)
|
||||||
|
|
||||||
|
await autonatStub.finished
|
||||||
|
|
||||||
|
check autonatService.networkReachability() == NetworkReachability.NotReachable
|
||||||
|
check libp2p_autonat_reachability_confidence.value(["NotReachable"]) == 0.3
|
||||||
|
|
||||||
|
await allFuturesThrowing(
|
||||||
|
switch1.stop(), switch2.stop(), switch3.stop(), switch4.stop())
|
||||||
|
|
||||||
|
asyncTest "Peer must be reachable":
|
||||||
|
|
||||||
|
let autonat = Autonat.new(switch = nil)
|
||||||
|
|
||||||
|
let autonatService = AutonatService.new(autonat, newRng(), some(1.seconds))
|
||||||
|
|
||||||
|
let switch1 = createSwitch(autonatService)
|
||||||
|
let switch2 = createSwitch()
|
||||||
|
let switch3 = createSwitch()
|
||||||
|
let switch4 = createSwitch()
|
||||||
|
|
||||||
|
let awaiter = newFuture[void]()
|
||||||
|
|
||||||
|
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Option[float]) {.gcsafe, async.} =
|
||||||
|
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() >= 0.3:
|
||||||
|
if not awaiter.finished:
|
||||||
|
awaiter.complete()
|
||||||
|
|
||||||
|
autonat.switch = switch1
|
||||||
|
check autonatService.networkReachability() == NetworkReachability.Unknown
|
||||||
|
|
||||||
|
autonatService.statusAndConfidenceHandler(statusAndConfidenceHandler)
|
||||||
|
|
||||||
|
await switch1.start()
|
||||||
|
await switch2.start()
|
||||||
|
await switch3.start()
|
||||||
|
await switch4.start()
|
||||||
|
|
||||||
|
await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs)
|
||||||
|
await switch1.connect(switch3.peerInfo.peerId, switch3.peerInfo.addrs)
|
||||||
|
await switch1.connect(switch4.peerInfo.peerId, switch4.peerInfo.addrs)
|
||||||
|
|
||||||
|
await awaiter
|
||||||
|
|
||||||
|
check autonatService.networkReachability() == NetworkReachability.Reachable
|
||||||
|
check libp2p_autonat_reachability_confidence.value(["Reachable"]) == 0.3
|
||||||
|
|
||||||
|
await allFuturesThrowing(
|
||||||
|
switch1.stop(), switch2.stop(), switch3.stop(), switch4.stop())
|
||||||
|
|
||||||
|
asyncTest "Peer must be not reachable and then reachable":
|
||||||
|
|
||||||
|
let autonatStub = AutonatStub.new(expectedDials = 6)
|
||||||
|
autonatStub.answer = NotReachable
|
||||||
|
|
||||||
|
let autonatService = AutonatService.new(autonatStub, newRng(), some(1.seconds))
|
||||||
|
|
||||||
|
let switch1 = createSwitch(autonatService)
|
||||||
|
let switch2 = createSwitch()
|
||||||
|
let switch3 = createSwitch()
|
||||||
|
let switch4 = createSwitch()
|
||||||
|
|
||||||
|
let awaiter = newFuture[void]()
|
||||||
|
|
||||||
|
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Option[float]) {.gcsafe, async.} =
|
||||||
|
if networkReachability == NetworkReachability.NotReachable and confidence.isSome() and confidence.get() >= 0.3:
|
||||||
|
if not awaiter.finished:
|
||||||
|
autonatStub.answer = Reachable
|
||||||
|
awaiter.complete()
|
||||||
|
|
||||||
|
check autonatService.networkReachability() == NetworkReachability.Unknown
|
||||||
|
|
||||||
|
autonatService.statusAndConfidenceHandler(statusAndConfidenceHandler)
|
||||||
|
|
||||||
|
await switch1.start()
|
||||||
|
await switch2.start()
|
||||||
|
await switch3.start()
|
||||||
|
await switch4.start()
|
||||||
|
|
||||||
|
await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs)
|
||||||
|
await switch1.connect(switch3.peerInfo.peerId, switch3.peerInfo.addrs)
|
||||||
|
await switch1.connect(switch4.peerInfo.peerId, switch4.peerInfo.addrs)
|
||||||
|
|
||||||
|
await awaiter
|
||||||
|
|
||||||
|
check autonatService.networkReachability() == NetworkReachability.NotReachable
|
||||||
|
check libp2p_autonat_reachability_confidence.value(["NotReachable"]) == 0.3
|
||||||
|
|
||||||
|
await autonatStub.finished
|
||||||
|
|
||||||
|
check autonatService.networkReachability() == NetworkReachability.Reachable
|
||||||
|
check libp2p_autonat_reachability_confidence.value(["Reachable"]) == 0.3
|
||||||
|
|
||||||
|
await allFuturesThrowing(switch1.stop(), switch2.stop(), switch3.stop(), switch4.stop())
|
||||||
|
|
||||||
|
asyncTest "Peer must be reachable when one connected peer has autonat disabled":
|
||||||
|
let autonat = Autonat.new(switch = nil)
|
||||||
|
|
||||||
|
let autonatService = AutonatService.new(autonat, newRng(), some(1.seconds), maxQueueSize = 2)
|
||||||
|
|
||||||
|
let switch1 = createSwitch(autonatService)
|
||||||
|
let switch2 = createSwitch(withAutonat = false)
|
||||||
|
let switch3 = createSwitch()
|
||||||
|
let switch4 = createSwitch()
|
||||||
|
|
||||||
|
let awaiter = newFuture[void]()
|
||||||
|
|
||||||
|
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Option[float]) {.gcsafe, async.} =
|
||||||
|
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
|
||||||
|
if not awaiter.finished:
|
||||||
|
awaiter.complete()
|
||||||
|
|
||||||
|
autonat.switch = switch1
|
||||||
|
check autonatService.networkReachability() == NetworkReachability.Unknown
|
||||||
|
|
||||||
|
autonatService.statusAndConfidenceHandler(statusAndConfidenceHandler)
|
||||||
|
|
||||||
|
await switch1.start()
|
||||||
|
await switch2.start()
|
||||||
|
await switch3.start()
|
||||||
|
await switch4.start()
|
||||||
|
|
||||||
|
await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs)
|
||||||
|
await switch1.connect(switch3.peerInfo.peerId, switch3.peerInfo.addrs)
|
||||||
|
await switch1.connect(switch4.peerInfo.peerId, switch4.peerInfo.addrs)
|
||||||
|
|
||||||
|
await awaiter
|
||||||
|
|
||||||
|
check autonatService.networkReachability() == NetworkReachability.Reachable
|
||||||
|
check libp2p_autonat_reachability_confidence.value(["Reachable"]) == 1
|
||||||
|
|
||||||
|
await allFuturesThrowing(
|
||||||
|
switch1.stop(), switch2.stop(), switch3.stop(), switch4.stop())
|
||||||
|
|
||||||
|
asyncTest "Unknown answers must be ignored":
|
||||||
|
|
||||||
|
let autonatStub = AutonatStub.new(expectedDials = 6)
|
||||||
|
autonatStub.answer = NotReachable
|
||||||
|
|
||||||
|
let autonatService = AutonatService.new(autonatStub, newRng(), some(1.seconds), maxQueueSize = 3)
|
||||||
|
|
||||||
|
let switch1 = createSwitch(autonatService)
|
||||||
|
let switch2 = createSwitch()
|
||||||
|
let switch3 = createSwitch()
|
||||||
|
let switch4 = createSwitch()
|
||||||
|
|
||||||
|
let awaiter = newFuture[void]()
|
||||||
|
|
||||||
|
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Option[float]) {.gcsafe, async.} =
|
||||||
|
if networkReachability == NetworkReachability.NotReachable and confidence.isSome() and confidence.get() >= 0.3:
|
||||||
|
if not awaiter.finished:
|
||||||
|
autonatStub.answer = Unknown
|
||||||
|
awaiter.complete()
|
||||||
|
|
||||||
|
check autonatService.networkReachability() == NetworkReachability.Unknown
|
||||||
|
|
||||||
|
autonatService.statusAndConfidenceHandler(statusAndConfidenceHandler)
|
||||||
|
|
||||||
|
await switch1.start()
|
||||||
|
await switch2.start()
|
||||||
|
await switch3.start()
|
||||||
|
await switch4.start()
|
||||||
|
|
||||||
|
await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs)
|
||||||
|
await switch1.connect(switch3.peerInfo.peerId, switch3.peerInfo.addrs)
|
||||||
|
await switch1.connect(switch4.peerInfo.peerId, switch4.peerInfo.addrs)
|
||||||
|
|
||||||
|
await awaiter
|
||||||
|
|
||||||
|
check autonatService.networkReachability() == NetworkReachability.NotReachable
|
||||||
|
check libp2p_autonat_reachability_confidence.value(["NotReachable"]) == 1/3
|
||||||
|
|
||||||
|
await autonatStub.finished
|
||||||
|
|
||||||
|
check autonatService.networkReachability() == NetworkReachability.NotReachable
|
||||||
|
check libp2p_autonat_reachability_confidence.value(["NotReachable"]) == 1/3
|
||||||
|
|
||||||
|
await allFuturesThrowing(switch1.stop(), switch2.stop(), switch3.stop(), switch4.stop())
|
||||||
|
|
||||||
|
asyncTest "Calling setup and stop twice must work":
|
||||||
|
|
||||||
|
let switch = createSwitch()
|
||||||
|
let autonatService = AutonatService.new(AutonatStub.new(expectedDials = 0), newRng(), some(1.seconds))
|
||||||
|
|
||||||
|
check (await autonatService.setup(switch)) == true
|
||||||
|
check (await autonatService.setup(switch)) == false
|
||||||
|
|
||||||
|
check (await autonatService.stop(switch)) == true
|
||||||
|
check (await autonatService.stop(switch)) == false
|
||||||
|
|
||||||
|
await allFuturesThrowing(switch.stop())
|
@ -1,4 +1,5 @@
|
|||||||
import sequtils
|
import sequtils
|
||||||
|
import stew/results
|
||||||
import chronos
|
import chronos
|
||||||
import ../libp2p/[connmanager,
|
import ../libp2p/[connmanager,
|
||||||
stream/connection,
|
stream/connection,
|
||||||
@ -9,6 +10,9 @@ import ../libp2p/[connmanager,
|
|||||||
|
|
||||||
import helpers
|
import helpers
|
||||||
|
|
||||||
|
proc getConnection(peerId: PeerId, dir: Direction = Direction.In): Connection =
|
||||||
|
return Connection.new(peerId, dir, Opt.none(MultiAddress))
|
||||||
|
|
||||||
type
|
type
|
||||||
TestMuxer = ref object of Muxer
|
TestMuxer = ref object of Muxer
|
||||||
peerId: PeerId
|
peerId: PeerId
|
||||||
@ -18,7 +22,7 @@ method newStream*(
|
|||||||
name: string = "",
|
name: string = "",
|
||||||
lazy: bool = false):
|
lazy: bool = false):
|
||||||
Future[Connection] {.async, gcsafe.} =
|
Future[Connection] {.async, gcsafe.} =
|
||||||
result = Connection.new(m.peerId, Direction.Out)
|
result = getConnection(m.peerId, Direction.Out)
|
||||||
|
|
||||||
suite "Connection Manager":
|
suite "Connection Manager":
|
||||||
teardown:
|
teardown:
|
||||||
@ -27,7 +31,7 @@ suite "Connection Manager":
|
|||||||
asyncTest "add and retrieve a connection":
|
asyncTest "add and retrieve a connection":
|
||||||
let connMngr = ConnManager.new()
|
let connMngr = ConnManager.new()
|
||||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
||||||
let conn = Connection.new(peerId, Direction.In)
|
let conn = getConnection(peerId)
|
||||||
|
|
||||||
connMngr.storeConn(conn)
|
connMngr.storeConn(conn)
|
||||||
check conn in connMngr
|
check conn in connMngr
|
||||||
@ -41,7 +45,7 @@ suite "Connection Manager":
|
|||||||
asyncTest "shouldn't allow a closed connection":
|
asyncTest "shouldn't allow a closed connection":
|
||||||
let connMngr = ConnManager.new()
|
let connMngr = ConnManager.new()
|
||||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
||||||
let conn = Connection.new(peerId, Direction.In)
|
let conn = getConnection(peerId)
|
||||||
await conn.close()
|
await conn.close()
|
||||||
|
|
||||||
expect CatchableError:
|
expect CatchableError:
|
||||||
@ -52,7 +56,7 @@ suite "Connection Manager":
|
|||||||
asyncTest "shouldn't allow an EOFed connection":
|
asyncTest "shouldn't allow an EOFed connection":
|
||||||
let connMngr = ConnManager.new()
|
let connMngr = ConnManager.new()
|
||||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
||||||
let conn = Connection.new(peerId, Direction.In)
|
let conn = getConnection(peerId)
|
||||||
conn.isEof = true
|
conn.isEof = true
|
||||||
|
|
||||||
expect CatchableError:
|
expect CatchableError:
|
||||||
@ -64,7 +68,7 @@ suite "Connection Manager":
|
|||||||
asyncTest "add and retrieve a muxer":
|
asyncTest "add and retrieve a muxer":
|
||||||
let connMngr = ConnManager.new()
|
let connMngr = ConnManager.new()
|
||||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
||||||
let conn = Connection.new(peerId, Direction.In)
|
let conn = getConnection(peerId)
|
||||||
let muxer = new Muxer
|
let muxer = new Muxer
|
||||||
muxer.connection = conn
|
muxer.connection = conn
|
||||||
|
|
||||||
@ -80,7 +84,7 @@ suite "Connection Manager":
|
|||||||
asyncTest "shouldn't allow a muxer for an untracked connection":
|
asyncTest "shouldn't allow a muxer for an untracked connection":
|
||||||
let connMngr = ConnManager.new()
|
let connMngr = ConnManager.new()
|
||||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
||||||
let conn = Connection.new(peerId, Direction.In)
|
let conn = getConnection(peerId)
|
||||||
let muxer = new Muxer
|
let muxer = new Muxer
|
||||||
muxer.connection = conn
|
muxer.connection = conn
|
||||||
|
|
||||||
@ -94,8 +98,8 @@ suite "Connection Manager":
|
|||||||
asyncTest "get conn with direction":
|
asyncTest "get conn with direction":
|
||||||
let connMngr = ConnManager.new()
|
let connMngr = ConnManager.new()
|
||||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
||||||
let conn1 = Connection.new(peerId, Direction.Out)
|
let conn1 = getConnection(peerId, Direction.Out)
|
||||||
let conn2 = Connection.new(peerId, Direction.In)
|
let conn2 = getConnection(peerId)
|
||||||
|
|
||||||
connMngr.storeConn(conn1)
|
connMngr.storeConn(conn1)
|
||||||
connMngr.storeConn(conn2)
|
connMngr.storeConn(conn2)
|
||||||
@ -114,7 +118,7 @@ suite "Connection Manager":
|
|||||||
asyncTest "get muxed stream for peer":
|
asyncTest "get muxed stream for peer":
|
||||||
let connMngr = ConnManager.new()
|
let connMngr = ConnManager.new()
|
||||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
||||||
let conn = Connection.new(peerId, Direction.In)
|
let conn = getConnection(peerId)
|
||||||
|
|
||||||
let muxer = new TestMuxer
|
let muxer = new TestMuxer
|
||||||
muxer.peerId = peerId
|
muxer.peerId = peerId
|
||||||
@ -134,7 +138,7 @@ suite "Connection Manager":
|
|||||||
asyncTest "get stream from directed connection":
|
asyncTest "get stream from directed connection":
|
||||||
let connMngr = ConnManager.new()
|
let connMngr = ConnManager.new()
|
||||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
||||||
let conn = Connection.new(peerId, Direction.In)
|
let conn = getConnection(peerId)
|
||||||
|
|
||||||
let muxer = new TestMuxer
|
let muxer = new TestMuxer
|
||||||
muxer.peerId = peerId
|
muxer.peerId = peerId
|
||||||
@ -155,7 +159,7 @@ suite "Connection Manager":
|
|||||||
asyncTest "get stream from any connection":
|
asyncTest "get stream from any connection":
|
||||||
let connMngr = ConnManager.new()
|
let connMngr = ConnManager.new()
|
||||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
||||||
let conn = Connection.new(peerId, Direction.In)
|
let conn = getConnection(peerId)
|
||||||
|
|
||||||
let muxer = new TestMuxer
|
let muxer = new TestMuxer
|
||||||
muxer.peerId = peerId
|
muxer.peerId = peerId
|
||||||
@ -175,11 +179,11 @@ suite "Connection Manager":
|
|||||||
let connMngr = ConnManager.new(maxConnsPerPeer = 1)
|
let connMngr = ConnManager.new(maxConnsPerPeer = 1)
|
||||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
||||||
|
|
||||||
connMngr.storeConn(Connection.new(peerId, Direction.In))
|
connMngr.storeConn(getConnection(peerId))
|
||||||
|
|
||||||
let conns = @[
|
let conns = @[
|
||||||
Connection.new(peerId, Direction.In),
|
getConnection(peerId),
|
||||||
Connection.new(peerId, Direction.In)]
|
getConnection(peerId)]
|
||||||
|
|
||||||
expect TooManyConnectionsError:
|
expect TooManyConnectionsError:
|
||||||
connMngr.storeConn(conns[0])
|
connMngr.storeConn(conns[0])
|
||||||
@ -193,7 +197,7 @@ suite "Connection Manager":
|
|||||||
asyncTest "cleanup on connection close":
|
asyncTest "cleanup on connection close":
|
||||||
let connMngr = ConnManager.new()
|
let connMngr = ConnManager.new()
|
||||||
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
let peerId = PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet()
|
||||||
let conn = Connection.new(peerId, Direction.In)
|
let conn = getConnection(peerId)
|
||||||
let muxer = new Muxer
|
let muxer = new Muxer
|
||||||
|
|
||||||
muxer.connection = conn
|
muxer.connection = conn
|
||||||
@ -220,7 +224,7 @@ suite "Connection Manager":
|
|||||||
Direction.In else:
|
Direction.In else:
|
||||||
Direction.Out
|
Direction.Out
|
||||||
|
|
||||||
let conn = Connection.new(peerId, dir)
|
let conn = getConnection(peerId, dir)
|
||||||
let muxer = new Muxer
|
let muxer = new Muxer
|
||||||
muxer.connection = conn
|
muxer.connection = conn
|
||||||
|
|
||||||
@ -353,7 +357,7 @@ suite "Connection Manager":
|
|||||||
let slot = await ((connMngr.getOutgoingSlot()).wait(10.millis))
|
let slot = await ((connMngr.getOutgoingSlot()).wait(10.millis))
|
||||||
|
|
||||||
let conn =
|
let conn =
|
||||||
Connection.new(
|
getConnection(
|
||||||
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
|
||||||
Direction.In)
|
Direction.In)
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user