Merge remote-tracking branch 'origin/unstable' into autorelay

This commit is contained in:
Ludovic Chenut 2022-12-20 14:43:45 +01:00
commit eaa55abe48
No known key found for this signature in database
GPG Key ID: D9A59B1907F1D50C
53 changed files with 1630 additions and 585 deletions

131
.github/actions/install_nim/action.yml vendored Normal file
View File

@ -0,0 +1,131 @@
name: Install Nim
inputs:
os:
description: "Operating system to build for"
required: true
cpu:
description: "CPU to build for"
default: "amd64"
nim_branch:
description: "Nim version"
default: "version-1-6"
shell:
description: "Shell to run commands in"
default: "bash --noprofile --norc -e -o pipefail"
runs:
using: "composite"
steps:
- name: Install build dependencies (Linux i386)
shell: ${{ inputs.shell }}
if: inputs.os == 'Linux' && inputs.cpu == 'i386'
run: |
sudo dpkg --add-architecture i386
sudo apt-get update -qq
sudo DEBIAN_FRONTEND='noninteractive' apt-get install \
--no-install-recommends -yq gcc-multilib g++-multilib \
libssl-dev:i386
mkdir -p external/bin
cat << EOF > external/bin/gcc
#!/bin/bash
exec $(which gcc) -m32 "\$@"
EOF
cat << EOF > external/bin/g++
#!/bin/bash
exec $(which g++) -m32 "\$@"
EOF
chmod 755 external/bin/gcc external/bin/g++
echo '${{ github.workspace }}/external/bin' >> $GITHUB_PATH
- name: MSYS2 (Windows i386)
if: inputs.os == 'Windows' && inputs.cpu == 'i386'
uses: msys2/setup-msys2@v2
with:
path-type: inherit
msystem: MINGW32
install: >-
base-devel
git
mingw-w64-i686-toolchain
- name: MSYS2 (Windows amd64)
if: inputs.os == 'Windows' && inputs.cpu == 'amd64'
uses: msys2/setup-msys2@v2
with:
path-type: inherit
install: >-
base-devel
git
mingw-w64-x86_64-toolchain
- name: Restore Nim DLLs dependencies (Windows) from cache
if: inputs.os == 'Windows'
id: windows-dlls-cache
uses: actions/cache@v3
with:
path: external/dlls
key: 'dlls'
- name: Install DLL dependencies (Windows)
shell: ${{ inputs.shell }}
if: >
steps.windows-dlls-cache.outputs.cache-hit != 'true' &&
inputs.os == 'Windows'
run: |
mkdir external
curl -L "https://nim-lang.org/download/windeps.zip" -o external/windeps.zip
7z x external/windeps.zip -oexternal/dlls
- name: Path to cached dependencies (Windows)
shell: ${{ inputs.shell }}
if: >
inputs.os == 'Windows'
run: |
echo '${{ github.workspace }}'"/external/dlls" >> $GITHUB_PATH
- name: Derive environment variables
shell: ${{ inputs.shell }}
run: |
if [[ '${{ inputs.cpu }}' == 'amd64' ]]; then
PLATFORM=x64
else
PLATFORM=x86
fi
echo "PLATFORM=$PLATFORM" >> $GITHUB_ENV
ncpu=
MAKE_CMD="make"
case '${{ inputs.os }}' in
'Linux')
ncpu=$(nproc)
;;
'macOS')
ncpu=$(sysctl -n hw.ncpu)
;;
'Windows')
ncpu=$NUMBER_OF_PROCESSORS
MAKE_CMD="mingw32-make"
;;
esac
[[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1
echo "ncpu=$ncpu" >> $GITHUB_ENV
echo "MAKE_CMD=${MAKE_CMD}" >> $GITHUB_ENV
echo '${{ github.workspace }}/nim/bin' >> $GITHUB_PATH
- name: Restore Nim from cache
id: nim-cache
uses: actions/cache@v3
with:
path: '${{ github.workspace }}/nim'
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_branch }}-cache-${{ env.cache_nonce }}
- name: Build Nim and Nimble
shell: ${{ inputs.shell }}
if: ${{ steps.nim-cache.outputs.cache-hit != 'true' }}
run: |
# We don't want partial matches of the cache restored
rm -rf nim
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} NIM_COMMIT=${{ inputs.nim_branch }} \
QUICK_AND_DIRTY_COMPILER=1 QUICK_AND_DIRTY_NIMBLE=1 CC=gcc \
bash build_nim.sh nim csources dist/nimble NimBinaries

View File

@ -7,6 +7,10 @@ on:
pull_request:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
build:
timeout-minutes: 90
@ -45,111 +49,20 @@ jobs:
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
runs-on: ${{ matrix.builder }}
continue-on-error: ${{ matrix.branch == 'version-1-6' || matrix.branch == 'devel' }}
continue-on-error: ${{ matrix.branch == 'devel' }}
steps:
- name: Checkout
uses: actions/checkout@v2
with:
submodules: true
- name: Install build dependencies (Linux i386)
if: runner.os == 'Linux' && matrix.target.cpu == 'i386'
run: |
sudo dpkg --add-architecture i386
sudo apt-get update -qq
sudo DEBIAN_FRONTEND='noninteractive' apt-get install \
--no-install-recommends -yq gcc-multilib g++-multilib \
libssl-dev:i386
mkdir -p external/bin
cat << EOF > external/bin/gcc
#!/bin/bash
exec $(which gcc) -m32 "\$@"
EOF
cat << EOF > external/bin/g++
#!/bin/bash
exec $(which g++) -m32 "\$@"
EOF
chmod 755 external/bin/gcc external/bin/g++
echo '${{ github.workspace }}/external/bin' >> $GITHUB_PATH
- name: MSYS2 (Windows i386)
if: runner.os == 'Windows' && matrix.target.cpu == 'i386'
uses: msys2/setup-msys2@v2
- name: Setup Nim
uses: "./.github/actions/install_nim"
with:
path-type: inherit
msystem: MINGW32
install: >-
base-devel
git
mingw-w64-i686-toolchain
- name: MSYS2 (Windows amd64)
if: runner.os == 'Windows' && matrix.target.cpu == 'amd64'
uses: msys2/setup-msys2@v2
with:
path-type: inherit
install: >-
base-devel
git
mingw-w64-x86_64-toolchain
- name: Restore Nim DLLs dependencies (Windows) from cache
if: runner.os == 'Windows'
id: windows-dlls-cache
uses: actions/cache@v2
with:
path: external/dlls
key: 'dlls'
- name: Install DLL dependencies (Windows)
if: >
steps.windows-dlls-cache.outputs.cache-hit != 'true' &&
runner.os == 'Windows'
run: |
mkdir external
curl -L "https://nim-lang.org/download/windeps.zip" -o external/windeps.zip
7z x external/windeps.zip -oexternal/dlls
- name: Path to cached dependencies (Windows)
if: >
runner.os == 'Windows'
run: |
echo '${{ github.workspace }}'"/external/dlls" >> $GITHUB_PATH
- name: Derive environment variables
run: |
if [[ '${{ matrix.target.cpu }}' == 'amd64' ]]; then
PLATFORM=x64
else
PLATFORM=x86
fi
echo "PLATFORM=$PLATFORM" >> $GITHUB_ENV
ncpu=
MAKE_CMD="make"
case '${{ runner.os }}' in
'Linux')
ncpu=$(nproc)
;;
'macOS')
ncpu=$(sysctl -n hw.ncpu)
;;
'Windows')
ncpu=$NUMBER_OF_PROCESSORS
MAKE_CMD="mingw32-make"
;;
esac
[[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1
echo "ncpu=$ncpu" >> $GITHUB_ENV
echo "MAKE_CMD=${MAKE_CMD}" >> $GITHUB_ENV
- name: Build Nim and Nimble
run: |
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} NIM_COMMIT=${{ matrix.branch }} \
QUICK_AND_DIRTY_COMPILER=1 QUICK_AND_DIRTY_NIMBLE=1 CC=gcc \
bash build_nim.sh nim csources dist/nimble NimBinaries
echo '${{ github.workspace }}/nim/bin' >> $GITHUB_PATH
os: ${{ matrix.target.os }}
cpu: ${{ matrix.target.cpu }}
shell: ${{ matrix.shell }}
nim_branch: ${{ matrix.branch }}
- name: Setup Go
uses: actions/setup-go@v2
@ -160,9 +73,20 @@ jobs:
run: |
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
- name: Restore deps from cache
id: deps-cache
uses: actions/cache@v3
with:
path: nimbledeps
key: nimbledeps-${{ hashFiles('.pinned') }}
- name: Install deps
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
run: |
nimble install_pinned
- name: Run tests
run: |
nim --version
nimble --version
nimble install_pinned
nimble test

View File

@ -5,136 +5,61 @@ on:
push:
branches:
- master
- unstable
pull_request:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
GossipSub:
Coverage:
runs-on: ubuntu-20.04
strategy:
matrix:
nim-options: [
"",
"-d:libp2p_pubsub_anonymize=true -d:libp2p_pubsub_sign=false -d:libp2p_pubsub_verify=false",
"-d:libp2p_pubsub_sign=true -d:libp2p_pubsub_verify=true"
]
test-program: [
"tests/pubsub/testpubsub",
"tests/pubsub/testfloodsub",
"tests/pubsub/testgossipinternal"
]
env:
CICOV: YES
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Setup Nim
uses: "./.github/actions/install_nim"
with:
os: linux
cpu: amd64
shell: bash
- name: Restore deps from cache
id: deps-cache
uses: actions/cache@v3
with:
path: nimbledeps
key: nimbledeps-${{ hashFiles('.pinned') }}
- name: Install deps
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
run: |
nimble install_pinned
- name: Run
run: |
sudo apt-get update
sudo apt-get install -y lcov build-essential git curl
mkdir coverage
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
env MAKE="make -j${NPROC}" bash build_nim.sh Nim csources dist/nimble NimBinaries
export PATH="$PATH:$PWD/Nim/bin"
nimble install_pinned
export NIM_OPTIONS="--opt:speed -d:debug --verbosity:0 --hints:off --lineDir:on -d:chronicles_log_level=INFO --warning[CaseTransition]:off --warning[ObservableStores]:off --warning[LockLevel]:off --nimcache:nimcache --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage ${{ matrix.nim-options }}"
nim c $NIM_OPTIONS -r ${{ matrix.test-program }}
cd nimcache; rm *.c; cd ..
export NIMFLAGS="--lineDir:on --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage"
nimble testnative
nimble testpubsub
nimble testfilter
find nimcache -name *.c -delete
lcov --capture --directory nimcache --output-file coverage/coverage.info
shopt -s globstar
ls `pwd`/libp2p/{*,**/*}.nim
lcov --extract coverage/coverage.info `pwd`/libp2p/{*,**/*}.nim --output-file coverage/coverage.f.info
export COV_UUID=`cksum <<< "${{ matrix.test-program }} $NIM_OPTIONS" | cut -f 1 -d ' '`
genhtml coverage/coverage.f.info --output-directory coverage/$COV_UUID-output
echo ${{ matrix.test-program }} > coverage/$COV_UUID-nim_options.txt
echo $NIM_OPTIONS >> coverage/$COV_UUID-nim_options.txt
genhtml coverage/coverage.f.info --output-directory coverage/output
bash <(curl -s https://codecov.io/bash) -f coverage/coverage.f.info || echo "Codecov did not collect coverage reports"
- uses: actions/upload-artifact@master
with:
name: coverage
path: coverage
Tests:
runs-on: ubuntu-20.04
strategy:
matrix:
nim-options: [
""
]
test-program: [
"tests/testnative",
]
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Run
run: |
sudo apt-get update
sudo apt-get install -y lcov build-essential git curl
mkdir coverage
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
env MAKE="make -j${NPROC}" bash build_nim.sh Nim csources dist/nimble NimBinaries
export PATH="$PATH:$PWD/Nim/bin"
nimble install_pinned
export NIM_OPTIONS="--opt:speed -d:debug --verbosity:0 --hints:off --lineDir:on -d:chronicles_log_level=INFO --warning[CaseTransition]:off --warning[ObservableStores]:off --warning[LockLevel]:off --nimcache:nimcache --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage ${{ matrix.nim-options }} --clearNimblePath --NimblePath:nimbledeps/pkgs"
nim c $NIM_OPTIONS -r ${{ matrix.test-program }}
cd nimcache; rm *.c; cd ..
lcov --capture --directory nimcache --output-file coverage/coverage.info
shopt -s globstar
ls `pwd`/libp2p/{*,**/*}.nim
lcov --extract coverage/coverage.info `pwd`/libp2p/{*,**/*}.nim --output-file coverage/coverage.f.info
export COV_UUID=`cksum <<< "${{ matrix.test-program }} $NIM_OPTIONS" | cut -f 1 -d ' '`
genhtml coverage/coverage.f.info --output-directory coverage/$COV_UUID-output
echo ${{ matrix.test-program }} > coverage/$COV_UUID-nim_options.txt
echo $NIM_OPTIONS >> coverage/$COV_UUID-nim_options.txt
bash <(curl -s https://codecov.io/bash) -f coverage/coverage.f.info || echo "Codecov did not collect coverage reports"
- uses: actions/upload-artifact@master
with:
name: coverage
path: coverage
Filter:
runs-on: ubuntu-20.04
strategy:
matrix:
nim-options: [
"",
"-d:libp2p_pki_schemes=secp256k1",
"-d:libp2p_pki_schemes=secp256k1;ed25519",
"-d:libp2p_pki_schemes=secp256k1;ed25519;ecnist",
]
test-program: [
"tests/testpkifilter",
]
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Run
run: |
sudo apt-get update
sudo apt-get install -y lcov build-essential git curl
mkdir coverage
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
env MAKE="make -j${NPROC}" bash build_nim.sh Nim csources dist/nimble NimBinaries
export PATH="$PATH:$PWD/Nim/bin"
nimble install_pinned
export NIM_OPTIONS="--opt:speed -d:debug --verbosity:0 --hints:off --lineDir:on -d:chronicles_log_level=INFO --warning[CaseTransition]:off --warning[ObservableStores]:off --warning[LockLevel]:off --nimcache:nimcache --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage ${{ matrix.nim-options }}"
nim c $NIM_OPTIONS -r ${{ matrix.test-program }}
cd nimcache; rm *.c; cd ..
lcov --capture --directory nimcache --output-file coverage/coverage.info
shopt -s globstar
ls `pwd`/libp2p/{*,**/*}.nim
lcov --extract coverage/coverage.info `pwd`/libp2p/{*,**/*}.nim --output-file coverage/coverage.f.info
export COV_UUID=`cksum <<< "${{ matrix.test-program }} $NIM_OPTIONS" | cut -f 1 -d ' '`
genhtml coverage/coverage.f.info --output-directory coverage/$COV_UUID-output
echo ${{ matrix.test-program }} > coverage/$COV_UUID-nim_options.txt
echo $NIM_OPTIONS >> coverage/$COV_UUID-nim_options.txt
bash <(curl -s https://codecov.io/bash) -f coverage/coverage.f.info || echo "Codecov did not collect coverage reports"
- uses: actions/upload-artifact@master
with:
name: coverage
path: coverage
#- uses: actions/upload-artifact@master
# with:
# name: coverage
# path: coverage

View File

@ -5,7 +5,13 @@ on:
workflow_dispatch:
jobs:
delete-cache:
runs-on: ubuntu-latest
steps:
- uses: snnaplab/delete-branch-cache-action@v1
build:
needs: delete-cache
timeout-minutes: 120
strategy:
fail-fast: false
@ -42,112 +48,18 @@ jobs:
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
runs-on: ${{ matrix.builder }}
continue-on-error: ${{ matrix.branch == 'version-1-6' || matrix.branch == 'devel' }}
continue-on-error: ${{ matrix.branch == 'devel' }}
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup Nim
uses: "./.github/actions/install_nim"
with:
ref: unstable
submodules: true
- name: Install build dependencies (Linux i386)
if: runner.os == 'Linux' && matrix.target.cpu == 'i386'
run: |
sudo dpkg --add-architecture i386
sudo apt-get update -qq
sudo DEBIAN_FRONTEND='noninteractive' apt-get install \
--no-install-recommends -yq gcc-multilib g++-multilib \
libssl-dev:i386
mkdir -p external/bin
cat << EOF > external/bin/gcc
#!/bin/bash
exec $(which gcc) -m32 "\$@"
EOF
cat << EOF > external/bin/g++
#!/bin/bash
exec $(which g++) -m32 "\$@"
EOF
chmod 755 external/bin/gcc external/bin/g++
echo '${{ github.workspace }}/external/bin' >> $GITHUB_PATH
- name: MSYS2 (Windows i386)
if: runner.os == 'Windows' && matrix.target.cpu == 'i386'
uses: msys2/setup-msys2@v2
with:
path-type: inherit
msystem: MINGW32
install: >-
base-devel
git
mingw-w64-i686-toolchain
- name: MSYS2 (Windows amd64)
if: runner.os == 'Windows' && matrix.target.cpu == 'amd64'
uses: msys2/setup-msys2@v2
with:
path-type: inherit
install: >-
base-devel
git
mingw-w64-x86_64-toolchain
- name: Restore Nim DLLs dependencies (Windows) from cache
if: runner.os == 'Windows'
id: windows-dlls-cache
uses: actions/cache@v2
with:
path: external/dlls
key: 'dlls'
- name: Install DLL dependencies (Windows)
if: >
steps.windows-dlls-cache.outputs.cache-hit != 'true' &&
runner.os == 'Windows'
run: |
mkdir external
curl -L "https://nim-lang.org/download/windeps.zip" -o external/windeps.zip
7z x external/windeps.zip -oexternal/dlls
- name: Path to cached dependencies (Windows)
if: >
runner.os == 'Windows'
run: |
echo '${{ github.workspace }}'"/external/dlls" >> $GITHUB_PATH
- name: Derive environment variables
run: |
if [[ '${{ matrix.target.cpu }}' == 'amd64' ]]; then
PLATFORM=x64
else
PLATFORM=x86
fi
echo "PLATFORM=$PLATFORM" >> $GITHUB_ENV
ncpu=
MAKE_CMD="make"
case '${{ runner.os }}' in
'Linux')
ncpu=$(nproc)
;;
'macOS')
ncpu=$(sysctl -n hw.ncpu)
;;
'Windows')
ncpu=$NUMBER_OF_PROCESSORS
MAKE_CMD="mingw32-make"
;;
esac
[[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1
echo "ncpu=$ncpu" >> $GITHUB_ENV
echo "MAKE_CMD=${MAKE_CMD}" >> $GITHUB_ENV
- name: Build Nim and Nimble
run: |
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} NIM_COMMIT=${{ matrix.branch }} \
QUICK_AND_DIRTY_COMPILER=1 QUICK_AND_DIRTY_NIMBLE=1 CC=gcc \
bash build_nim.sh nim csources dist/nimble NimBinaries
echo '${{ github.workspace }}/nim/bin' >> $GITHUB_PATH
os: ${{ matrix.target.os }}
shell: ${{ matrix.shell }}
nim_branch: ${{ matrix.branch }}
cpu: ${{ matrix.target.cpu }}
- name: Setup Go
uses: actions/setup-go@v2
@ -163,9 +75,8 @@ jobs:
nim --version
nimble --version
nimble install -y --depsOnly
nimble test
if [[ "${{ matrix.branch }}" == "version-1-6" || "${{ matrix.branch }}" == "devel" ]]; then
NIMFLAGS="${NIMFLAGS} --gc:refc" nimble test
if [[ "${{ matrix.branch }}" == "devel" ]]; then
echo -e "\nTesting with '--gc:orc':\n"
export NIMFLAGS="${NIMFLAGS} --gc:orc"
nimble test
NIMFLAGS="${NIMFLAGS} --gc:orc" nimble test
fi

22
.pinned
View File

@ -1,16 +1,16 @@
bearssl;https://github.com/status-im/nim-bearssl@#f4c4233de453cb7eac0ce3f3ffad6496295f83ab
bearssl;https://github.com/status-im/nim-bearssl@#a647994910904b0103a05db3a5ec1ecfc4d91a88
chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
chronos;https://github.com/status-im/nim-chronos@#266e2c0ed26b455872bccb3ddbd316815a283659
chronos;https://github.com/status-im/nim-chronos@#75d030ff71264513fb9701c75a326cd36fcb4692
dnsclient;https://github.com/ba0f3/dnsclient.nim@#fcd7443634b950eaea574e5eaa00a628ae029823
faststreams;https://github.com/status-im/nim-faststreams@#6112432b3a81d9db116cd5d64c39648881cfff29
httputils;https://github.com/status-im/nim-http-utils@#e88e231dfcef4585fe3b2fbd9b664dbd28a88040
json_serialization;https://github.com/status-im/nim-json-serialization@#e5b18fb710c3d0167ec79f3b892f5a7a1bc6d1a4
metrics;https://github.com/status-im/nim-metrics@#0a6477268e850d7bc98347b3875301524871765f
faststreams;https://github.com/status-im/nim-faststreams@#b42daf41d8eb4fbce40add6836bed838f8d85b6f
httputils;https://github.com/status-im/nim-http-utils@#a85bd52ae0a956983ca6b3267c72961d2ec0245f
json_serialization;https://github.com/status-im/nim-json-serialization@#a7d815ed92f200f490c95d3cfd722089cc923ce6
metrics;https://github.com/status-im/nim-metrics@#21e99a2e9d9f80e68bef65c80ef781613005fccb
nimcrypto;https://github.com/cheatfate/nimcrypto@#24e006df85927f64916e60511620583b11403178
secp256k1;https://github.com/status-im/nim-secp256k1@#c7f1a37d9b0f17292649bfed8bf6cef83cf4221f
serialization;https://github.com/status-im/nim-serialization@#493d18b8292fc03aa4f835fd825dea1183f97466
stew;https://github.com/status-im/nim-stew@#23da07c9b59c0ba3d4efa7e4e6e2c4121ae5a156
secp256k1;https://github.com/status-im/nim-secp256k1@#fd173fdff863ce2e211cf64c9a03bc7539fe40b0
serialization;https://github.com/status-im/nim-serialization@#d77417cba6896c26287a68e6a95762e45a1b87e5
stew;https://github.com/status-im/nim-stew@#7184d2424dc3945657884646a72715d494917aad
testutils;https://github.com/status-im/nim-testutils@#dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34
unittest2;https://github.com/status-im/nim-unittest2@#f180f596c88dfd266f746ed6f8dbebce39c824db
websock;https://github.com/status-im/nim-websock@#acbe30e9ca1e51dcbbfe4c552ee6f16c7eede538
unittest2;https://github.com/status-im/nim-unittest2@#da8398c45cafd5bd7772da1fc96e3924a18d3823
websock;https://github.com/status-im/nim-websock@#691f069b209d372b1240d5ae1f57fb7bbafeaba7
zlib;https://github.com/status-im/nim-zlib@#6a6670afba6b97b29b920340e2641978c05ab4d8

View File

@ -2,7 +2,7 @@
<a href="https://libp2p.io"><img width="250" src="./.assets/full-logo.svg?raw=true" alt="nim-libp2p logo" /></a>
</h1>
<h3 align="center">The Nim implementation of the libp2p Networking Stack.</h3>
<h3 align="center">The <a href="https://nim-lang.org/">Nim</a> implementation of the <a href="https://libp2p.io/">libp2p</a> Networking Stack.</h3>
<p align="center">
<a href="https://github.com/status-im/nim-libp2p/actions"><img src="https://github.com/status-im/nim-libp2p/actions/workflows/ci.yml/badge.svg" /></a>
@ -16,30 +16,26 @@
<img src="https://img.shields.io/badge/nim-%3E%3D1.2.0-orange.svg?style=flat-square" />
</p>
## Introduction
An implementation of [libp2p](https://libp2p.io/) in [Nim](https://nim-lang.org/).
# Table of Contents
- [Background](#background)
- [Install](#install)
- [Getting Started](#getting-started)
- [Modules](#modules)
- [Users](#users)
- [Stability](#stability)
- [Development](#development)
- [Contribute](#contribute)
- [Core Developers](#core-developers)
- [Contributors](#contributors)
- [Core Maintainers](#core-maintainers)
- [License](#license)
## Background
libp2p is a networking stack and library modularized out of [The IPFS Project](https://github.com/ipfs/ipfs), and bundled separately for other tools to use.
libp2p is a [Peer-to-Peer](https://en.wikipedia.org/wiki/Peer-to-peer) networking stack, with [implementations](https://github.com/libp2p/libp2p#implementations) in multiple languages derived from the same [specifications.](https://github.com/libp2p/specs)
libp2p is the product of a long and arduous quest of understanding; a deep dive into the internet's network stack and the peer-to-peer protocols from the past. Building large scale peer-to-peer systems has been complex and difficult in the last 15 years and libp2p is a way to fix that. It is a "network stack", a suite of networking protocols that cleanly separates concerns and enables sophisticated applications to only use the protocols they absolutely need, without giving up interoperability and upgradeability.
Building large scale peer-to-peer systems has been complex and difficult in the last 15 years and libp2p is a way to fix that. It's striving to be a modular stack, with sane and secure defaults, useful protocols, while remain open and extensible.
This implementation in native Nim, relying on [chronos](https://github.com/status-im/nim-chronos) for async. It's used in production by a few [projects](#users)
libp2p grew out of IPFS, but it is built so that lots of people can use it, for lots of different projects.
- Learn more about libp2p at [**libp2p.io**](https://libp2p.io) and follow our evolving documentation efforts at [**docs.libp2p.io**](https://docs.libp2p.io).
- [Here](https://github.com/libp2p/libp2p#description) is an overview of libp2p and its implementations in other programming languages.
Learn more about libp2p at [**libp2p.io**](https://libp2p.io) and follow libp2p's documentation [**docs.libp2p.io**](https://docs.libp2p.io).
## Install
**Prerequisite**
@ -49,7 +45,7 @@ nimble install libp2p
```
## Getting Started
You'll find the documentation [here](https://status-im.github.io/nim-libp2p/docs/).
You'll find the nim-libp2p documentation [here](https://status-im.github.io/nim-libp2p/docs/).
**Go Daemon:**
Please find the installation and usage intructions in [daemonapi.md](examples/go-daemon/daemonapi.md).
@ -63,25 +59,28 @@ List of packages modules implemented in nim-libp2p:
| **Libp2p** | |
| [libp2p](libp2p/switch.nim) | The core of the project |
| [connmanager](libp2p/connmanager.nim) | Connection manager |
| [identify / push identify](libp2p/protocols/identify.nim) | [Identify](https://docs.libp2p.io/concepts/protocols/#identify) protocol |
| [ping](libp2p/protocols/ping.nim) | [Ping](https://docs.libp2p.io/concepts/protocols/#ping) protocol |
| [identify / push identify](libp2p/protocols/identify.nim) | [Identify](https://docs.libp2p.io/concepts/fundamentals/protocols/#identify) protocol |
| [ping](libp2p/protocols/ping.nim) | [Ping](https://docs.libp2p.io/concepts/fundamentals/protocols/#ping) protocol |
| [libp2p-daemon-client](libp2p/daemon/daemonapi.nim) | [go-daemon](https://github.com/libp2p/go-libp2p-daemon) nim wrapper |
| [interop-libp2p](tests/testinterop.nim) | Interop tests |
| **Transports** | |
| [libp2p-tcp](libp2p/transports/tcptransport.nim) | TCP transport |
| [libp2p-ws](libp2p/transports/wstransport.nim) | WebSocket & WebSocket Secure transport |
| [libp2p-tor](libp2p/transports/tortransport.nim) | Tor Transport |
| **Secure Channels** | |
| [libp2p-secio](libp2p/protocols/secure/secio.nim) | [Secio](https://docs.libp2p.io/concepts/protocols/#secio) secure channel |
| [libp2p-noise](libp2p/protocols/secure/noise.nim) | [Noise](https://github.com/libp2p/specs/tree/master/noise) secure channel |
| [libp2p-plaintext](libp2p/protocols/secure/plaintext.nim) | [Plain Text](https://github.com/libp2p/specs/tree/master/plaintext) for development purposes |
| [libp2p-secio](libp2p/protocols/secure/secio.nim) | Secio secure channel |
| [libp2p-noise](libp2p/protocols/secure/noise.nim) | [Noise](https://docs.libp2p.io/concepts/secure-comm/noise/) secure channel |
| [libp2p-plaintext](libp2p/protocols/secure/plaintext.nim) | Plain Text for development purposes |
| **Stream Multiplexers** | |
| [libp2p-mplex](libp2p/muxers/mplex/mplex.nim) | [MPlex](https://github.com/libp2p/specs/tree/master/mplex) multiplexer |
| [libp2p-yamux](libp2p/muxers/yamux/yamux.nim) | [Yamux](https://docs.libp2p.io/concepts/multiplex/yamux/) multiplexer |
| **Data Types** | |
| [peer-id](libp2p/peerid.nim) | [Cryptographic identifiers](https://docs.libp2p.io/concepts/peer-id/) |
| [peer-store](libp2p/peerstore.nim) | ["Phone book" of known peers](https://docs.libp2p.io/concepts/peer-id/#peerinfo) |
| [peer-id](libp2p/peerid.nim) | [Cryptographic identifiers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-id) |
| [peer-store](libp2p/peerstore.nim) | ["Address book" of known peers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-store) |
| [multiaddress](libp2p/multiaddress.nim) | [Composable network addresses](https://github.com/multiformats/multiaddr) |
| [signed envelope](libp2p/signed_envelope.nim) | [Signed generic data container](https://github.com/libp2p/specs/blob/master/RFC/0002-signed-envelopes.md) |
| [routing record](libp2p/routing_record.nim) | [Signed peer dialing informations](https://github.com/libp2p/specs/blob/master/RFC/0003-routing-records.md) |
| [discovery manager](libp2p/discovery/discoverymngr.nim) | Discovery Manager |
| **Utilities** | |
| [libp2p-crypto](libp2p/crypto) | Cryptographic backend |
| [libp2p-crypto-secp256k1](libp2p/crypto/secp.nim) | |
@ -113,7 +112,10 @@ Clone and Install dependencies:
```sh
git clone https://github.com/status-im/nim-libp2p
cd nim-libp2p
# to use dependencies computed by nimble
nimble install -dy
# OR to install the dependencies versions used in CI
nimble install_pinned
```
Run unit tests:
@ -133,8 +135,19 @@ The libp2p implementation in Nim is a work in progress. We welcome contributors
The code follows the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
### Core Developers
[@cheatfate](https://github.com/cheatfate), [Dmitriy Ryajov](https://github.com/dryajov), [Tanguy](https://github.com/Menduist), [Zahary Karadjov](https://github.com/zah)
### Contributors
<a href="https://github.com/status-im/nim-libp2p/graphs/contributors"><img src="https://contrib.rocks/image?repo=status-im/nim-libp2p" alt="nim-libp2p contributors"></a>
### Core Maintainers
<table>
<tbody>
<tr>
<td align="center"><a href="https://github.com/Menduist"><img src="https://avatars.githubusercontent.com/u/13471753?v=4?s=100" width="100px;" alt="Tanguy"/><br /><sub><b>Tanguy (Menduist)</b></sub></a></td>
<td align="center"><a href="https://github.com/lchenut"><img src="https://avatars.githubusercontent.com/u/11214565?v=4?s=100" width="100px;" alt="Ludovic"/><br /><sub><b>Ludovic</b></sub></a></td>
<td align="center"><a href="https://github.com/diegomrsantos"><img src="https://avatars.githubusercontent.com/u/7316595?v=4?s=100" width="100px;" alt="Diego"/><br /><sub><b>Diego</b></sub></a></td>
</tr>
</tbody>
</table>
### Compile time flags

View File

@ -1,14 +1,8 @@
codecov:
notify:
require_ci_to_pass: true
# must be the number of coverage report builds
# notice that this number is for PRs;
# like this we disabled notify on pure branches report
# which is fine I guess
after_n_builds: 28
comment:
layout: "reach, diff, flags, files"
after_n_builds: 28 # must be the number of coverage report builds
coverage:
status:
project:
@ -16,4 +10,4 @@ coverage:
# basic settings
target: auto
threshold: 5%
base: auto
base: auto

View File

@ -1,6 +1,5 @@
# nim-libp2p documentation
# nim-libp2p examples
Welcome to the nim-libp2p documentation!
In this folder, you'll find the sources of the [nim-libp2p website](https://status-im.github.io/nim-libp2p/docs/)
Here, you'll find [tutorials](tutorial_1_connect.md) to help you get started, as well as
the [full reference](https://status-im.github.io/nim-libp2p/master/libp2p.html).
We recommand to follow the tutorials on the website, but feel free to grok the sources here!

6
examples/index.md Normal file
View File

@ -0,0 +1,6 @@
# nim-libp2p documentation
Welcome to the nim-libp2p documentation!
Here, you'll find [tutorials](tutorial_1_connect.md) to help you get started, as well as
the [full reference](https://status-im.github.io/nim-libp2p/master/libp2p.html).

View File

@ -32,7 +32,7 @@ proc new(T: typedesc[TestProto]): T =
# We must close the connections ourselves when we're done with it
await conn.close()
return T(codecs: @[TestCodec], handler: handle)
return T.new(codecs = @[TestCodec], handler = handle)
## This is a constructor for our `TestProto`, that will specify our `codecs` and a `handler`, which will be called for each incoming peer asking for this protocol.
## In our handle, we simply read a message from the connection and `echo` it.

View File

@ -107,7 +107,7 @@ type
metricGetter: MetricCallback
proc new(_: typedesc[MetricProto], cb: MetricCallback): MetricProto =
let res = MetricProto(metricGetter: cb)
var res: MetricProto
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
let
metrics = await res.metricGetter()
@ -115,8 +115,8 @@ proc new(_: typedesc[MetricProto], cb: MetricCallback): MetricProto =
await conn.writeLp(asProtobuf.buffer)
await conn.close()
res.codecs = @["/metric-getter/1.0.0"]
res.handler = handle
res = MetricProto.new(@["/metric-getter/1.0.0"], handle)
res.metricGetter = cb
return res
proc fetch(p: MetricProto, conn: Connection): Future[MetricList] {.async.} =

View File

@ -36,7 +36,7 @@ proc new(T: typedesc[DumbProto], nodeNumber: int): T =
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
await conn.close()
return T(codecs: @[DumbCodec], handler: handle)
return T.new(codecs = @[DumbCodec], handler = handle)
## ## Bootnodes
## The first time a p2p program is ran, he needs to know how to join

View File

@ -157,7 +157,7 @@ proc new(T: typedesc[GameProto], g: Game): T =
# The handler of a protocol must wait for the stream to
# be finished before returning
await conn.join()
return T(codecs: @["/tron/1.0.0"], handler: handle)
return T.new(codecs = @["/tron/1.0.0"], handler = handle)
proc networking(g: Game) {.async.} =
# Create our switch, similar to the GossipSub example and

View File

@ -16,8 +16,10 @@ requires "nim >= 1.2.0",
"metrics",
"secp256k1",
"stew#head",
"websock"
"websock",
"unittest2 >= 0.0.5 & < 0.1.0"
import hashes
proc runTest(filename: string, verify: bool = true, sign: bool = true,
moreoptions: string = "") =
var excstr = "nim c --skipParentCfg --opt:speed -d:debug -d:libp2p_agents_metrics -d:libp2p_protobuf_metrics -d:libp2p_network_protocols_metrics -d:libp2p_mplex_metrics "
@ -28,6 +30,8 @@ proc runTest(filename: string, verify: bool = true, sign: bool = true,
excstr.add(" -d:libp2p_pubsub_sign=" & $sign)
excstr.add(" -d:libp2p_pubsub_verify=" & $verify)
excstr.add(" " & moreoptions & " ")
if getEnv("CICOV").len > 0:
excstr &= " --nimcache:nimcache/" & filename & "-" & $excstr.hash
exec excstr & " -r " & " tests/" & filename
rmFile "tests/" & filename.toExe
@ -108,6 +112,7 @@ task examples_build, "Build the samples":
buildSample("tutorial_4_gossipsub", true)
buildSample("tutorial_5_discovery", true)
# Nico doesn't work in 1.2
exec "nimble install -y nimpng@#HEAD" # this is to fix broken build on 1.7.3, remove it when nimpng version 0.3.2 or later is released
exec "nimble install -y nico"
buildSample("tutorial_6_game", false, "--styleCheck:off")
@ -134,9 +139,13 @@ task install_pinned, "Reads the lockfile":
# Remove the automatically installed deps
# (inefficient you say?)
let allowedDirectories = toInstall.mapIt(it[0] & "-" & it[1].split('@')[1])
for dependency in listDirs("nimbledeps/pkgs"):
if dependency.extractFilename notin allowedDirectories:
let nimblePkgs =
if system.dirExists("nimbledeps/pkgs"): "nimbledeps/pkgs"
else: "nimbledeps/pkgs2"
for dependency in listDirs(nimblePkgs):
let filename = dependency.extractFilename
if toInstall.anyIt(filename.startsWith(it[0]) and
filename.endsWith(it[1].split('#')[^1])) == false:
rmDir(dependency)
task unpin, "Restore global package use":

View File

@ -111,7 +111,7 @@ proc connCount*(c: ConnManager, peerId: PeerId): int =
c.conns.getOrDefault(peerId).len
proc connectedPeers*(c: ConnManager, dir: Direction): seq[PeerId] =
var peers = newSeq[PeerId]();
var peers = newSeq[PeerId]()
for peerId, conns in c.conns:
if conns.anyIt(it.dir == dir):
peers.add(peerId)
@ -544,3 +544,4 @@ proc close*(c: ConnManager) {.async.} =
await conn.close()
trace "Closed ConnManager"

View File

@ -492,7 +492,7 @@ proc recvMessage(conn: StreamTransport): Future[seq[byte]] {.async.} =
res = PB.getUVarint(buffer.toOpenArray(0, i), length, size)
if res.isOk():
break
if res.isErr() or size > MaxMessageSize:
if res.isErr() or size > 1'u shl 22:
buffer.setLen(0)
result = buffer
return

View File

@ -36,7 +36,8 @@ method connect*(
method connect*(
self: Dial,
addrs: seq[MultiAddress]): Future[PeerId] {.async, base.} =
address: MultiAddress,
allowUnknownPeerId = false): Future[PeerId] {.async, base.} =
## Connects to a peer and retrieve its PeerId
doAssert(false, "Not implemented!")

View File

@ -219,11 +219,23 @@ method connect*(
method connect*(
self: Dialer,
addrs: seq[MultiAddress],
): Future[PeerId] {.async.} =
address: MultiAddress,
allowUnknownPeerId = false): Future[PeerId] {.async.} =
## Connects to a peer and retrieve its PeerId
return (await self.internalConnect(Opt.none(PeerId), addrs, false)).peerId
let fullAddress = parseFullAddress(address)
if fullAddress.isOk:
return (await self.internalConnect(
Opt.some(fullAddress.get()[0]),
@[fullAddress.get()[1]],
false)).peerId
else:
if allowUnknownPeerId == false:
raise newException(DialFailedError, "Address without PeerID and unknown peer id disabled!")
return (await self.internalConnect(
Opt.none(PeerId),
@[address],
false)).peerId
proc negotiateStream(
self: Dialer,

View File

@ -470,6 +470,8 @@ const
WS* = mapAnd(TCP, mapEq("ws"))
WSS* = mapAnd(TCP, mapEq("wss"))
WebSockets* = mapOr(WS, WSS)
Onion3* = mapEq("onion3")
TcpOnion3* = mapAnd(TCP, Onion3)
Unreliable* = mapOr(UDP)

View File

@ -12,7 +12,7 @@ when (NimMajor, NimMinor) < (1, 4):
else:
{.push raises: [].}
import std/[strutils, sequtils]
import std/[strutils, sequtils, tables]
import chronos, chronicles, stew/byteutils
import stream/connection,
protocols/protocol
@ -21,7 +21,7 @@ logScope:
topics = "libp2p multistream"
const
MsgSize* = 64*1024
MsgSize* = 1024
Codec* = "/multistream/1.0.0"
MSCodec* = "\x13" & Codec & "\n"
@ -33,17 +33,20 @@ type
MultiStreamError* = object of LPError
HandlerHolder* = object
HandlerHolder* = ref object
protos*: seq[string]
protocol*: LPProtocol
match*: Matcher
openedStreams: CountTable[PeerId]
MultistreamSelect* = ref object of RootObj
handlers*: seq[HandlerHolder]
codec*: string
proc new*(T: typedesc[MultistreamSelect]): T =
T(codec: MSCodec)
T(
codec: MSCodec,
)
template validateSuffix(str: string): untyped =
if str.endsWith("\n"):
@ -169,9 +172,22 @@ proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.asy
for h in m.handlers:
if (not isNil(h.match) and h.match(ms)) or h.protos.contains(ms):
trace "found handler", conn, protocol = ms
await conn.writeLp(ms & "\n")
conn.protocol = ms
await h.protocol.handler(conn, ms)
var protocolHolder = h
let maxIncomingStreams = protocolHolder.protocol.maxIncomingStreams
if protocolHolder.openedStreams.getOrDefault(conn.peerId) >= maxIncomingStreams:
debug "Max streams for protocol reached, blocking new stream",
conn, protocol = ms, maxIncomingStreams
return
protocolHolder.openedStreams.inc(conn.peerId)
try:
await conn.writeLp(ms & "\n")
conn.protocol = ms
await protocolHolder.protocol.handler(conn, ms)
finally:
protocolHolder.openedStreams.inc(conn.peerId, -1)
if protocolHolder.openedStreams[conn.peerId] == 0:
protocolHolder.openedStreams.del(conn.peerId)
return
debug "no handlers", conn, protocol = ms
await conn.write(Na)

View File

@ -414,7 +414,8 @@ method close*(m: Yamux) {.async.} =
let channels = toSeq(m.channels.values())
for channel in channels:
await channel.reset(true)
await m.connection.write(YamuxHeader.goAway(NormalTermination))
try: await m.connection.write(YamuxHeader.goAway(NormalTermination))
except CatchableError as exc: trace "failed to send goAway", msg=exc.msg
await m.connection.close()
trace "Closed yamux"

View File

@ -184,6 +184,11 @@ func init*(t: typedesc[PeerId], seckey: PrivateKey): Result[PeerId, cstring] =
## Create new peer id from private key ``seckey``.
PeerId.init(? seckey.getPublicKey().orError(cstring("invalid private key")))
proc random*(t: typedesc[PeerId], rng = newRng()): Result[PeerId, cstring] =
## Create new peer id with random public key.
let randomKey = PrivateKey.random(Secp256k1, rng[])[]
PeerId.init(randomKey).orError(cstring("failed to generate random key"))
func match*(pid: PeerId, pubkey: PublicKey): bool =
## Returns ``true`` if ``pid`` matches public key ``pubkey``.
let p = PeerId.init(pubkey)

View File

@ -16,8 +16,7 @@ runnableExamples:
# Create a custom book type
type MoodBook = ref object of PeerBook[string]
var somePeerId: PeerId
discard somePeerId.init("")
var somePeerId = PeerId.random().get()
peerStore[MoodBook][somePeerId] = "Happy"
doAssert peerStore[MoodBook][somePeerId] == "Happy"
@ -153,6 +152,9 @@ proc updatePeerInfo*(
if info.addrs.len > 0:
peerStore[AddressBook][info.peerId] = info.addrs
if info.pubkey.isSome:
peerStore[KeyBook][info.peerId] = info.pubkey.get()
if info.agentVersion.isSome:
peerStore[AgentBook][info.peerId] = info.agentVersion.get().string

View File

@ -19,8 +19,7 @@ export results, utility
{.push public.}
const
MaxMessageSize* = 1'u shl 22
const MaxMessageSize = 1'u shl 22
type
ProtoFieldKind* = enum
@ -37,6 +36,7 @@ type
buffer*: seq[byte]
offset*: int
length*: int
maxSize*: uint
ProtoHeader* = object
wire*: ProtoFieldKind
@ -122,23 +122,28 @@ proc vsizeof*(field: ProtoField): int {.inline.} =
0
proc initProtoBuffer*(data: seq[byte], offset = 0,
options: set[ProtoFlags] = {}): ProtoBuffer =
options: set[ProtoFlags] = {},
maxSize = MaxMessageSize): ProtoBuffer =
## Initialize ProtoBuffer with shallow copy of ``data``.
result.buffer = data
result.offset = offset
result.options = options
result.maxSize = maxSize
proc initProtoBuffer*(data: openArray[byte], offset = 0,
options: set[ProtoFlags] = {}): ProtoBuffer =
options: set[ProtoFlags] = {},
maxSize = MaxMessageSize): ProtoBuffer =
## Initialize ProtoBuffer with copy of ``data``.
result.buffer = @data
result.offset = offset
result.options = options
result.maxSize = maxSize
proc initProtoBuffer*(options: set[ProtoFlags] = {}): ProtoBuffer =
proc initProtoBuffer*(options: set[ProtoFlags] = {}, maxSize = MaxMessageSize): ProtoBuffer =
## Initialize ProtoBuffer with new sequence of capacity ``cap``.
result.buffer = newSeq[byte]()
result.options = options
result.maxSize = maxSize
if WithVarintLength in options:
# Our buffer will start from position 10, so we can store length of buffer
# in [0, 9].
@ -335,7 +340,7 @@ proc skipValue(data: var ProtoBuffer, header: ProtoHeader): ProtoResult[void] =
var bsize = 0'u64
if PB.getUVarint(data.toOpenArray(), length, bsize).isOk():
data.offset += length
if bsize <= uint64(MaxMessageSize):
if bsize <= uint64(data.maxSize):
if data.isEnough(int(bsize)):
data.offset += int(bsize)
ok()
@ -399,7 +404,7 @@ proc getValue[T:byte|char](data: var ProtoBuffer, header: ProtoHeader,
outLength = 0
if PB.getUVarint(data.toOpenArray(), length, bsize).isOk():
data.offset += length
if bsize <= uint64(MaxMessageSize):
if bsize <= uint64(data.maxSize):
if data.isEnough(int(bsize)):
outLength = int(bsize)
if len(outBytes) >= int(bsize):
@ -427,7 +432,7 @@ proc getValue[T:seq[byte]|string](data: var ProtoBuffer, header: ProtoHeader,
if PB.getUVarint(data.toOpenArray(), length, bsize).isOk():
data.offset += length
if bsize <= uint64(MaxMessageSize):
if bsize <= uint64(data.maxSize):
if data.isEnough(int(bsize)):
outBytes.setLen(bsize)
if bsize > 0'u64:

View File

@ -32,6 +32,7 @@ const
type
AutonatError* = object of LPError
AutonatUnreachableError* = object of LPError
MsgType* = enum
Dial = 0
@ -203,25 +204,37 @@ type
sem: AsyncSemaphore
switch*: Switch
proc dialMe*(a: Autonat, pid: PeerId, ma: MultiAddress|seq[MultiAddress]):
Future[MultiAddress] {.async.} =
let addrs = when ma is MultiAddress: @[ma] else: ma
let conn = await a.switch.dial(pid, addrs, AutonatCodec)
method dialMe*(a: Autonat, pid: PeerId, addrs: seq[MultiAddress] = newSeq[MultiAddress]()):
Future[MultiAddress] {.base, async.} =
proc getResponseOrRaise(autonatMsg: Option[AutonatMsg]): AutonatDialResponse {.raises: [UnpackError, AutonatError].} =
if autonatMsg.isNone() or
autonatMsg.get().msgType != DialResponse or
autonatMsg.get().response.isNone() or
autonatMsg.get().response.get().ma.isNone():
raise newException(AutonatError, "Unexpected response")
else:
autonatMsg.get().response.get()
let conn =
try:
if addrs.len == 0:
await a.switch.dial(pid, @[AutonatCodec])
else:
await a.switch.dial(pid, addrs, AutonatCodec)
except CatchableError as err:
raise newException(AutonatError, "Unexpected error when dialling", err)
defer: await conn.close()
await conn.sendDial(a.switch.peerInfo.peerId, a.switch.peerInfo.addrs)
let msgOpt = AutonatMsg.decode(await conn.readLp(1024))
if msgOpt.isNone() or
msgOpt.get().msgType != DialResponse or
msgOpt.get().response.isNone():
raise newException(AutonatError, "Unexpected response")
let response = msgOpt.get().response.get()
if response.status != ResponseStatus.Ok:
raise newException(AutonatError, "Bad status " &
$response.status & " " &
response.text.get(""))
if response.ma.isNone():
raise newException(AutonatError, "Missing address")
return response.ma.get()
let response = getResponseOrRaise(AutonatMsg.decode(await conn.readLp(1024)))
return case response.status:
of ResponseStatus.Ok:
response.ma.get()
of ResponseStatus.DialError:
raise newException(AutonatUnreachableError, "Peer could not dial us back")
else:
raise newException(AutonatError, "Bad status " & $response.status & " " & response.text.get(""))
proc tryDial(a: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.async.} =
try:

View File

@ -25,6 +25,7 @@ import ./relay,
../../../multiaddress,
../../../stream/connection
export options
logScope:
topics = "libp2p relay relay-client"

View File

@ -12,9 +12,14 @@ when (NimMajor, NimMinor) < (1, 4):
else:
{.push raises: [].}
import chronos
import chronos, stew/results
import ../stream/connection
export results
const
DefaultMaxIncomingStreams* = 10
type
LPProtoHandler* = proc (
conn: Connection,
@ -26,11 +31,17 @@ type
codecs*: seq[string]
handler*: LPProtoHandler ## this handler gets invoked by the protocol negotiator
started*: bool
maxIncomingStreams: Opt[int]
method init*(p: LPProtocol) {.base, gcsafe.} = discard
method start*(p: LPProtocol) {.async, base.} = p.started = true
method stop*(p: LPProtocol) {.async, base.} = p.started = false
proc maxIncomingStreams*(p: LPProtocol): int =
p.maxIncomingStreams.get(DefaultMaxIncomingStreams)
proc `maxIncomingStreams=`*(p: LPProtocol, val: int) =
p.maxIncomingStreams = Opt.some(val)
func codec*(p: LPProtocol): string =
assert(p.codecs.len > 0, "Codecs sequence was empty!")
@ -40,3 +51,16 @@ func `codec=`*(p: LPProtocol, codec: string) =
# always insert as first codec
# if we use this abstraction
p.codecs.insert(codec, 0)
proc new*(
T: type LPProtocol,
codecs: seq[string],
handler: LPProtoHandler, # default(Opt[int]) or Opt.none(int) don't work on 1.2
maxIncomingStreams: Opt[int] | int = Opt[int]()): T =
T(
codecs: codecs,
handler: handler,
maxIncomingStreams:
when maxIncomingStreams is int: Opt.some(maxIncomingStreams)
else: maxIncomingStreams
)

View File

@ -130,7 +130,7 @@ type
knownTopics*: HashSet[string]
method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base.} =
method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
## handle peer disconnects
##
@ -377,7 +377,7 @@ method handleConn*(p: PubSub,
finally:
await conn.closeWithEOF()
method subscribePeer*(p: PubSub, peer: PeerId) {.base.} =
method subscribePeer*(p: PubSub, peer: PeerId) {.base, gcsafe.} =
## subscribe to remote peer to receive/send pubsub
## messages
##
@ -400,7 +400,7 @@ proc updateTopicMetrics(p: PubSub, topic: string) =
libp2p_pubsub_topic_handlers.set(others, labelValues = ["other"])
method onTopicSubscription*(p: PubSub, topic: string, subscribed: bool) {.base.} =
method onTopicSubscription*(p: PubSub, topic: string, subscribed: bool) {.base, gcsafe.} =
# Called when subscribe is called the first time for a topic or unsubscribe
# removes the last handler
@ -433,7 +433,7 @@ proc unsubscribe*(p: PubSub, topics: openArray[TopicPair]) {.public.} =
for t in topics:
p.unsubscribe(t.topic, t.handler)
proc unsubscribeAll*(p: PubSub, topic: string) {.public.} =
proc unsubscribeAll*(p: PubSub, topic: string) {.public, gcsafe.} =
## unsubscribe every `handler` from `topic`
if topic notin p.topics:
debug "unsubscribeAll called for an unknown topic", topic
@ -495,7 +495,7 @@ method initPubSub*(p: PubSub)
method addValidator*(p: PubSub,
topic: varargs[string],
hook: ValidatorHandler) {.base, public.} =
hook: ValidatorHandler) {.base, public, gcsafe.} =
## Add a validator to a `topic`. Each new message received in this
## will be sent to `hook`. `hook` can return either `Accept`,
## `Ignore` or `Reject` (which can descore the peer)

View File

@ -304,14 +304,15 @@ proc decodeMessages*(pb: ProtoBuffer): ProtoResult[seq[Message]] {.inline.} =
if ? pb.getRepeatedField(2, msgpbs):
trace "decodeMessages: read messages", count = len(msgpbs)
for item in msgpbs:
msgs.add(? decodeMessage(initProtoBuffer(item)))
# size is constrained at the network level
msgs.add(? decodeMessage(initProtoBuffer(item, maxSize = uint.high)))
else:
trace "decodeMessages: no messages found"
ok(msgs)
proc encodeRpcMsg*(msg: RPCMsg, anonymize: bool): seq[byte] =
trace "encodeRpcMsg: encoding message", msg = msg.shortLog()
var pb = initProtoBuffer()
var pb = initProtoBuffer(maxSize = uint.high)
for item in msg.subscriptions:
pb.write(1, item)
for item in msg.messages:
@ -324,7 +325,7 @@ proc encodeRpcMsg*(msg: RPCMsg, anonymize: bool): seq[byte] =
proc decodeRpcMsg*(msg: seq[byte]): ProtoResult[RPCMsg] {.inline.} =
trace "decodeRpcMsg: decoding message", msg = msg.shortLog()
var pb = initProtoBuffer(msg)
var pb = initProtoBuffer(msg, maxSize = uint.high)
var rpcMsg = ok(RPCMsg())
assign(rpcMsg.get().messages, ? pb.decodeMessages())
assign(rpcMsg.get().subscriptions, ? pb.decodeSubscriptions())

View File

@ -0,0 +1,157 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import std/[options, deques, sequtils]
import chronos, metrics
import ../switch
import ../protocols/[connectivity/autonat]
import ../utils/heartbeat
import ../crypto/crypto
declarePublicGauge(libp2p_autonat_reachability_confidence, "autonat reachability confidence", labels = ["reachability"])
type
AutonatService* = ref object of Service
newConnectedPeerHandler: PeerEventHandler
scheduleHandle: Future[void]
networkReachability: NetworkReachability
confidence: Option[float]
answers: Deque[NetworkReachability]
autonat: Autonat
statusAndConfidenceHandler: StatusAndConfidenceHandler
rng: ref HmacDrbgContext
scheduleInterval: Option[Duration]
askNewConnectedPeers: bool
numPeersToAsk: int
maxQueueSize: int
minConfidence: float
dialTimeout: Duration
NetworkReachability* {.pure.} = enum
NotReachable, Reachable, Unknown
StatusAndConfidenceHandler* = proc (networkReachability: NetworkReachability, confidence: Option[float]): Future[void] {.gcsafe, raises: [Defect].}
proc new*(
T: typedesc[AutonatService],
autonat: Autonat,
rng: ref HmacDrbgContext,
scheduleInterval: Option[Duration] = none(Duration),
askNewConnectedPeers = true,
numPeersToAsk: int = 5,
maxQueueSize: int = 10,
minConfidence: float = 0.3,
dialTimeout = 5.seconds): T =
return T(
scheduleInterval: scheduleInterval,
networkReachability: Unknown,
confidence: none(float),
answers: initDeque[NetworkReachability](),
autonat: autonat,
rng: rng,
askNewConnectedPeers: askNewConnectedPeers,
numPeersToAsk: numPeersToAsk,
maxQueueSize: maxQueueSize,
minConfidence: minConfidence,
dialTimeout: dialTimeout)
proc networkReachability*(self: AutonatService): NetworkReachability {.inline.} =
return self.networkReachability
proc callHandler(self: AutonatService) {.async.} =
if not isNil(self.statusAndConfidenceHandler):
await self.statusAndConfidenceHandler(self.networkReachability, self.confidence)
proc handleAnswer(self: AutonatService, ans: NetworkReachability) {.async.} =
if self.answers.len == self.maxQueueSize:
self.answers.popFirst()
self.answers.addLast(ans)
self.networkReachability = Unknown
self.confidence = none(float)
const reachabilityPriority = [Reachable, NotReachable]
for reachability in reachabilityPriority:
let confidence = self.answers.countIt(it == reachability) / self.maxQueueSize
libp2p_autonat_reachability_confidence.set(value = confidence, labelValues = [$reachability])
if self.confidence.isNone and confidence >= self.minConfidence:
self.networkReachability = reachability
self.confidence = some(confidence)
trace "Current status", currentStats = $self.networkReachability, confidence = $self.confidence
proc askPeer(self: AutonatService, s: Switch, peerId: PeerId): Future[NetworkReachability] {.async.} =
trace "Asking for reachability", peerId = $peerId
let ans =
try:
discard await self.autonat.dialMe(peerId).wait(self.dialTimeout)
Reachable
except AutonatUnreachableError:
trace "dialMe answer is not reachable", peerId = $peerId
NotReachable
except AsyncTimeoutError:
trace "dialMe timed out", peerId = $peerId
Unknown
except CatchableError as err:
trace "dialMe unexpected error", peerId = $peerId, errMsg = $err.msg
Unknown
await self.handleAnswer(ans)
if not isNil(self.statusAndConfidenceHandler):
await self.statusAndConfidenceHandler(self.networkReachability, self.confidence)
return ans
proc askConnectedPeers(self: AutonatService, switch: Switch) {.async.} =
var peers = switch.connectedPeers(Direction.Out)
self.rng.shuffle(peers)
var answersFromPeers = 0
for peer in peers:
if answersFromPeers >= self.numPeersToAsk:
break
elif (await askPeer(self, switch, peer)) != Unknown:
answersFromPeers.inc()
proc schedule(service: AutonatService, switch: Switch, interval: Duration) {.async.} =
heartbeat "Schedule AutonatService run", interval:
await service.run(switch)
method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
let hasBeenSetup = await procCall Service(self).setup(switch)
if hasBeenSetup:
if self.askNewConnectedPeers:
self.newConnectedPeerHandler = proc (peerId: PeerId, event: PeerEvent): Future[void] {.async.} =
discard askPeer(self, switch, peerId)
await self.callHandler()
switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
if self.scheduleInterval.isSome():
self.scheduleHandle = schedule(self, switch, self.scheduleInterval.get())
return hasBeenSetup
method run*(self: AutonatService, switch: Switch) {.async, public.} =
await askConnectedPeers(self, switch)
await self.callHandler()
method stop*(self: AutonatService, switch: Switch): Future[bool] {.async, public.} =
let hasBeenStopped = await procCall Service(self).stop(switch)
if hasBeenStopped:
if not isNil(self.scheduleHandle):
self.scheduleHandle.cancel()
self.scheduleHandle = nil
if not isNil(self.newConnectedPeerHandler):
switch.connManager.removePeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
return hasBeenStopped
proc statusAndConfidenceHandler*(self: AutonatService, statusAndConfidenceHandler: StatusAndConfidenceHandler) =
self.statusAndConfidenceHandler = statusAndConfidenceHandler

View File

@ -80,7 +80,7 @@ type
opened*: uint64
closed*: uint64
proc setupStreamTracker(name: string): StreamTracker =
proc setupStreamTracker*(name: string): StreamTracker =
let tracker = new StreamTracker
proc dumpTracking(): string {.gcsafe.} =

View File

@ -77,19 +77,20 @@ type
services*: seq[Service]
Service* = ref object of RootObj
inUse*: bool
inUse: bool
method setup*(self: Service, switch: Switch): Future[bool] {.base, async, gcsafe, public.} =
method setup*(self: Service, switch: Switch): Future[bool] {.base, async, gcsafe.} =
if self.inUse:
warn "service setup has already been called"
return false
self.inUse = true
return true
method run*(self: Service, switch: Switch) {.base, async, gcsafe, public.} =
method run*(self: Service, switch: Switch) {.base, async, gcsafe.} =
doAssert(false, "Not implemented!")
method stop*(self: Service, switch: Switch): Future[bool] {.base, async, gcsafe, public.} =
method stop*(self: Service, switch: Switch): Future[bool] {.base, async, gcsafe.} =
if not self.inUse:
warn "service is already stopped"
return false
@ -154,10 +155,15 @@ method connect*(
method connect*(
s: Switch,
addrs: seq[MultiAddress]): Future[PeerId] =
address: MultiAddress,
allowUnknownPeerId = false): Future[PeerId] =
## Connects to a peer and retrieve its PeerId
##
## If the P2P part is missing from the MA and `allowUnknownPeerId` is set
## to true, this will discover the PeerId while connecting. This exposes
## you to MiTM attacks, so it shouldn't be used without care!
s.dialer.connect(addrs)
s.dialer.connect(address, allowUnknownPeerId)
method dial*(
s: Switch,
@ -317,6 +323,9 @@ proc stop*(s: Switch) {.async, public.} =
if not a.finished:
a.cancel()
for service in s.services:
discard await service.stop(s)
await s.ms.stop()
trace "Switch stopped"
@ -324,6 +333,10 @@ proc stop*(s: Switch) {.async, public.} =
proc start*(s: Switch) {.async, gcsafe, public.} =
## Start listening on every transport
if s.started:
warn "Switch has already been started"
return
trace "starting switch for peer", peerInfo = s.peerInfo
var startFuts: seq[Future[void]]
for t in s.transports:
@ -356,7 +369,6 @@ proc start*(s: Switch) {.async, gcsafe, public.} =
for service in s.services:
discard await service.setup(s)
# discard await service.run(s)
s.started = true

View File

@ -0,0 +1,281 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
## Tor transport implementation
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import std/strformat
import chronos, chronicles, strutils
import stew/[byteutils, endians2, results, objects]
import ../multicodec
import transport,
tcptransport,
../switch,
../builders,
../stream/[lpstream, connection, chronosstream],
../multiaddress,
../upgrademngrs/upgrade
const
IPTcp = mapAnd(IP, mapEq("tcp"))
IPv4Tcp = mapAnd(IP4, mapEq("tcp"))
IPv6Tcp = mapAnd(IP6, mapEq("tcp"))
DnsTcp = mapAnd(DNSANY, mapEq("tcp"))
Socks5ProtocolVersion = byte(5)
NMethods = byte(1)
type
TorTransport* = ref object of Transport
transportAddress: TransportAddress
tcpTransport: TcpTransport
Socks5AuthMethod* {.pure.} = enum
NoAuth = 0
GSSAPI = 1
UsernamePassword = 2
NoAcceptableMethod = 0xff
Socks5RequestCommand* {.pure.} = enum
Connect = 1, Bind = 2, UdpAssoc = 3
Socks5AddressType* {.pure.} = enum
IPv4 = 1, FQDN = 3, IPv6 = 4
Socks5ReplyType* {.pure.} = enum
Succeeded = (0, "Succeeded"), ServerFailure = (1, "Server Failure"),
ConnectionNotAllowed = (2, "Connection Not Allowed"), NetworkUnreachable = (3, "Network Unreachable"),
HostUnreachable = (4, "Host Unreachable"), ConnectionRefused = (5, "Connection Refused"),
TtlExpired = (6, "Ttl Expired"), CommandNotSupported = (7, "Command Not Supported"),
AddressTypeNotSupported = (8, "Address Type Not Supported")
TransportStartError* = object of transport.TransportError
Socks5Error* = object of CatchableError
Socks5AuthFailedError* = object of Socks5Error
Socks5VersionError* = object of Socks5Error
Socks5ServerReplyError* = object of Socks5Error
proc new*(
T: typedesc[TorTransport],
transportAddress: TransportAddress,
flags: set[ServerFlags] = {},
upgrade: Upgrade): T {.public.} =
## Creates a Tor transport
T(
transportAddress: transportAddress,
upgrader: upgrade,
tcpTransport: TcpTransport.new(flags, upgrade))
proc handlesDial(address: MultiAddress): bool {.gcsafe.} =
return Onion3.match(address) or TCP.match(address) or DNSANY.match(address)
proc handlesStart(address: MultiAddress): bool {.gcsafe.} =
return TcpOnion3.match(address)
proc connectToTorServer(
transportAddress: TransportAddress): Future[StreamTransport] {.async, gcsafe.} =
let transp = await connect(transportAddress)
try:
discard await transp.write(@[Socks5ProtocolVersion, NMethods, Socks5AuthMethod.NoAuth.byte])
let
serverReply = await transp.read(2)
socks5ProtocolVersion = serverReply[0]
serverSelectedMethod = serverReply[1]
if socks5ProtocolVersion != Socks5ProtocolVersion:
raise newException(Socks5VersionError, "Unsupported socks version")
if serverSelectedMethod != Socks5AuthMethod.NoAuth.byte:
raise newException(Socks5AuthFailedError, "Unsupported auth method")
return transp
except CatchableError as err:
await transp.closeWait()
raise err
proc readServerReply(transp: StreamTransport) {.async, gcsafe.} =
## The specification for this code is defined on
## [link text](https://www.rfc-editor.org/rfc/rfc1928#section-5)
## and [link text](https://www.rfc-editor.org/rfc/rfc1928#section-6).
let
portNumOctets = 2
ipV4NumOctets = 4
ipV6NumOctets = 16
firstFourOctets = await transp.read(4)
socks5ProtocolVersion = firstFourOctets[0]
serverReply = firstFourOctets[1]
if socks5ProtocolVersion != Socks5ProtocolVersion:
raise newException(Socks5VersionError, "Unsupported socks version")
if serverReply != Socks5ReplyType.Succeeded.byte:
var socks5ReplyType: Socks5ReplyType
if socks5ReplyType.checkedEnumAssign(serverReply):
raise newException(Socks5ServerReplyError, fmt"Server reply error: {socks5ReplyType}")
else:
raise newException(LPError, fmt"Unexpected server reply: {serverReply}")
let atyp = firstFourOctets[3]
case atyp:
of Socks5AddressType.IPv4.byte:
discard await transp.read(ipV4NumOctets + portNumOctets)
of Socks5AddressType.FQDN.byte:
let fqdnNumOctets = await transp.read(1)
discard await transp.read(int(uint8.fromBytes(fqdnNumOctets)) + portNumOctets)
of Socks5AddressType.IPv6.byte:
discard await transp.read(ipV6NumOctets + portNumOctets)
else:
raise newException(LPError, "Address not supported")
proc parseOnion3(address: MultiAddress): (byte, seq[byte], seq[byte]) {.raises: [Defect, LPError, ValueError].} =
var addressArray = ($address).split('/')
if addressArray.len < 2: raise newException(LPError, fmt"Onion address not supported {address}")
addressArray = addressArray[2].split(':')
if addressArray.len == 0: raise newException(LPError, fmt"Onion address not supported {address}")
let
addressStr = addressArray[0] & ".onion"
dstAddr = @(uint8(addressStr.len).toBytes()) & addressStr.toBytes()
dstPort = address.data.buffer[37..38]
return (Socks5AddressType.FQDN.byte, dstAddr, dstPort)
proc parseIpTcp(address: MultiAddress): (byte, seq[byte], seq[byte]) {.raises: [Defect, LPError, ValueError].} =
let (codec, atyp) =
if IPv4Tcp.match(address):
(multiCodec("ip4"), Socks5AddressType.IPv4.byte)
elif IPv6Tcp.match(address):
(multiCodec("ip6"), Socks5AddressType.IPv6.byte)
else:
raise newException(LPError, fmt"IP address not supported {address}")
let
dstAddr = address[codec].get().protoArgument().get()
dstPort = address[multiCodec("tcp")].get().protoArgument().get()
(atyp, dstAddr, dstPort)
proc parseDnsTcp(address: MultiAddress): (byte, seq[byte], seq[byte]) =
let
dnsAddress = address[multiCodec("dns")].get().protoArgument().get()
dstAddr = @(uint8(dnsAddress.len).toBytes()) & dnsAddress
dstPort = address[multiCodec("tcp")].get().protoArgument().get()
(Socks5AddressType.FQDN.byte, dstAddr, dstPort)
proc dialPeer(
transp: StreamTransport, address: MultiAddress) {.async, gcsafe.} =
let (atyp, dstAddr, dstPort) =
if Onion3.match(address):
parseOnion3(address)
elif IPTcp.match(address):
parseIpTcp(address)
elif DnsTcp.match(address):
parseDnsTcp(address)
else:
raise newException(LPError, fmt"Address not supported: {address}")
let reserved = byte(0)
let request = @[
Socks5ProtocolVersion,
Socks5RequestCommand.Connect.byte,
reserved,
atyp] & dstAddr & dstPort
discard await transp.write(request)
await readServerReply(transp)
method dial*(
self: TorTransport,
hostname: string,
address: MultiAddress): Future[Connection] {.async, gcsafe.} =
## dial a peer
##
if not handlesDial(address):
raise newException(LPError, fmt"Address not supported: {address}")
trace "Dialing remote peer", address = $address
let transp = await connectToTorServer(self.transportAddress)
try:
await dialPeer(transp, address)
return await self.tcpTransport.connHandler(transp, Opt.none(MultiAddress), Direction.Out)
except CatchableError as err:
await transp.closeWait()
raise err
method start*(
self: TorTransport,
addrs: seq[MultiAddress]) {.async.} =
## listen on the transport
##
var listenAddrs: seq[MultiAddress]
var onion3Addrs: seq[MultiAddress]
for i, ma in addrs:
if not handlesStart(ma):
warn "Invalid address detected, skipping!", address = ma
continue
let listenAddress = ma[0..1].get()
listenAddrs.add(listenAddress)
let onion3 = ma[multiCodec("onion3")].get()
onion3Addrs.add(onion3)
if len(listenAddrs) != 0 and len(onion3Addrs) != 0:
await procCall Transport(self).start(onion3Addrs)
await self.tcpTransport.start(listenAddrs)
else:
raise newException(TransportStartError, "Tor Transport couldn't start, no supported addr was provided.")
method accept*(self: TorTransport): Future[Connection] {.async, gcsafe.} =
## accept a new Tor connection
##
let conn = await self.tcpTransport.accept()
conn.observedAddr = Opt.none(MultiAddress)
return conn
method stop*(self: TorTransport) {.async, gcsafe.} =
## stop the transport
##
await procCall Transport(self).stop() # call base
await self.tcpTransport.stop()
method handles*(t: TorTransport, address: MultiAddress): bool {.gcsafe.} =
if procCall Transport(t).handles(address):
return handlesDial(address) or handlesStart(address)
type
TorSwitch* = ref object of Switch
proc new*(
T: typedesc[TorSwitch],
torServer: TransportAddress,
rng: ref HmacDrbgContext,
addresses: seq[MultiAddress] = @[],
flags: set[ServerFlags] = {}): TorSwitch
{.raises: [LPError, Defect], public.} =
var builder = SwitchBuilder.new()
.withRng(rng)
.withTransport(proc(upgr: Upgrade): Transport = TorTransport.new(torServer, flags, upgr))
if addresses.len != 0:
builder = builder.withAddresses(addresses)
let switch = builder.withMplex()
.withNoise()
.build()
let torSwitch = T(
peerInfo: switch.peerInfo,
ms: switch.ms,
transports: switch.transports,
connManager: switch.connManager,
peerStore: switch.peerStore,
dialer: Dialer.new(switch.peerInfo.peerId, switch.connManager, switch.transports, switch.ms, nil),
nameResolver: nil)
torSwitch.connManager.peerStore = switch.peerStore
return torSwitch
method addTransport*(s: TorSwitch, t: Transport) =
doAssert(false, "not implemented!")
method getTorTransport*(s: TorSwitch): Transport {.base.} =
return s.transports[0]

View File

@ -35,14 +35,18 @@ logScope:
export transport, websock, results
const
WsTransportTrackerName* = "libp2p.wstransport"
DefaultHeadersTimeout = 3.seconds
type
WsStream = ref object of Connection
session: WSSession
method initStream*(s: WsStream) =
if s.objName.len == 0:
s.objName = "WsStream"
procCall Connection(s).initStream()
proc new*(T: type WsStream,
session: WSSession,
dir: Direction,
@ -129,7 +133,7 @@ method start*(
factories = self.factories,
rng = self.rng)
for i, ma in addrs:
let isWss =
if WSS.match(ma):
@ -256,7 +260,7 @@ method accept*(self: WsTransport): Future[Connection] {.async, gcsafe.} =
try:
let
wstransp = await self.wsserver.handleRequest(req)
wstransp = await self.wsserver.handleRequest(req).wait(self.handshakeTimeout)
isSecure = self.httpservers[index].secure
return await self.connHandler(wstransp, isSecure, Direction.In)
@ -273,6 +277,8 @@ method accept*(self: WsTransport): Future[Connection] {.async, gcsafe.} =
debug "AsyncStream Error", exc = exc.msg
except TransportTooManyError as exc:
debug "Too many files opened", exc = exc.msg
except AsyncTimeoutError as exc:
debug "Timed out", exc = exc.msg
except TransportUseClosedError as exc:
debug "Server was closed", exc = exc.msg
raise newTransportClosedError(exc)

View File

@ -58,10 +58,7 @@ type
SomeVarint* = PBSomeVarint | LPSomeVarint
SomeUVarint* = PBSomeUVarint | LPSomeUVarint
template toUleb(x: uint64): uint64 = x
template toUleb(x: uint32): uint32 = x
template toUleb(x: uint16): uint16 = x
template toUleb(x: uint8): uint8 = x
template toUleb[T: uint64|uint32|uint16|uint8|uint](x: T): T = x
func toUleb(x: zint64): uint64 =
let v = cast[uint64](x)

View File

@ -20,8 +20,8 @@ markdown_extensions:
- pymdownx.superfences
theme:
logo: https://docs.libp2p.io/images/logo_small.png
favicon: https://docs.libp2p.io/images/logo_small.png
logo: https://libp2p.io/img/logo_small.png
favicon: https://libp2p.io/img/logo_small.png
name: material
features:
- navigation.instant
@ -41,7 +41,7 @@ theme:
nav:
- Tutorials:
- 'Introduction': README.md
- 'Introduction': index.md
- 'Simple connection': tutorial_1_connect.md
- 'Create a custom protocol': tutorial_2_customproto.md
- 'Protobuf': tutorial_3_protobuf.md

View File

@ -2,53 +2,68 @@
"version": 1,
"packages": {
"unittest2": {
"version": "0.0.4",
"vcsRevision": "f180f596c88dfd266f746ed6f8dbebce39c824db",
"url": "https://github.com/status-im/nim-unittest2.git",
"version": "0.0.5",
"vcsRevision": "da8398c45cafd5bd7772da1fc96e3924a18d3823",
"url": "https://github.com/status-im/nim-unittest2",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "fa309c41eaf6ef57895b9e603f2620a2f6e11780"
"sha1": "b3f8493a4948989ef3e645a38b23aad77e851e26"
}
},
"testutils": {
"version": "0.5.0",
"vcsRevision": "dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34",
"url": "https://github.com/status-im/nim-testutils",
"downloadMethod": "git",
"dependencies": [
"unittest2"
],
"checksums": {
"sha1": "756d0757c4dd06a068f9d38c7f238576ba5ee897"
}
},
"stew": {
"version": "0.1.0",
"vcsRevision": "6ad35b876fb6ebe0dfee0f697af173acc47906ee",
"url": "https://github.com/status-im/nim-stew.git",
"vcsRevision": "7184d2424dc3945657884646a72715d494917aad",
"url": "https://github.com/status-im/nim-stew",
"downloadMethod": "git",
"dependencies": [],
"dependencies": [
"unittest2"
],
"checksums": {
"sha1": "46d58c4feb457f3241e3347778334e325dce5268"
"sha1": "f3125ed2fd126dfd3edbaea14275abd9fa57d703"
}
},
"bearssl": {
"version": "0.1.5",
"vcsRevision": "ba80e2a0d7ae8aab666cee013e38ff8d33a3e5e7",
"version": "0.2.0",
"vcsRevision": "a647994910904b0103a05db3a5ec1ecfc4d91a88",
"url": "https://github.com/status-im/nim-bearssl",
"downloadMethod": "git",
"dependencies": [
"unittest2"
],
"checksums": {
"sha1": "383abd5becc77bf8e365b780a29d20529e1d9c4c"
"sha1": "d634751df2716ea9975912a2d5d0a090bb6bcfa9"
}
},
"httputils": {
"version": "0.3.0",
"vcsRevision": "689da19e9e9cfff4ced85e2b25c6b2b5598ed079",
"url": "https://github.com/status-im/nim-http-utils.git",
"vcsRevision": "a85bd52ae0a956983ca6b3267c72961d2ec0245f",
"url": "https://github.com/status-im/nim-http-utils",
"downloadMethod": "git",
"dependencies": [
"stew"
"stew",
"unittest2"
],
"checksums": {
"sha1": "4ad3ad68d13c50184180ab4b2eacc0bd7ed2ed44"
"sha1": "92933b21bcd29335f68e377e2b2193fa331e28b3"
}
},
"chronos": {
"version": "3.0.11",
"vcsRevision": "17fed89c99beac5a92d3668d0d3e9b0e4ac13936",
"url": "https://github.com/status-im/nim-chronos.git",
"vcsRevision": "75d030ff71264513fb9701c75a326cd36fcb4692",
"url": "https://github.com/status-im/nim-chronos",
"downloadMethod": "git",
"dependencies": [
"stew",
@ -57,52 +72,27 @@
"unittest2"
],
"checksums": {
"sha1": "f6fffc87571e5f76af2a77c4ebcc0e00909ced4e"
}
},
"metrics": {
"version": "0.0.1",
"vcsRevision": "71e0f0e354e1f4c59e3dc92153989c8b723c3440",
"url": "https://github.com/status-im/nim-metrics",
"downloadMethod": "git",
"dependencies": [
"chronos"
],
"checksums": {
"sha1": "86da251fe532ef2163da30343688ab1c148c0340"
}
},
"testutils": {
"version": "0.4.2",
"vcsRevision": "aa6e5216f4b4ab5aa971cdcdd70e1ec1203cedf2",
"url": "https://github.com/status-im/nim-testutils",
"downloadMethod": "git",
"dependencies": [
"unittest2"
],
"checksums": {
"sha1": "94427e0cce0e0c5841edcd3a6530b4e6b857a3cb"
"sha1": "57a674ba3c1a57a694fa7810d93ceb68f338a861"
}
},
"faststreams": {
"version": "0.3.0",
"vcsRevision": "1b561a9e71b6bdad1c1cdff753418906037e9d09",
"url": "https://github.com/status-im/nim-faststreams.git",
"vcsRevision": "b42daf41d8eb4fbce40add6836bed838f8d85b6f",
"url": "https://github.com/status-im/nim-faststreams",
"downloadMethod": "git",
"dependencies": [
"stew",
"testutils",
"chronos",
"unittest2"
],
"checksums": {
"sha1": "97edf9797924af48566a0af8267203dc21d80c77"
"sha1": "62f7ac8fb200a8ecb9e6c63f5553a7dad66ae613"
}
},
"serialization": {
"version": "0.1.0",
"vcsRevision": "fcd0eadadde0ee000a63df8ab21dc4e9f015a790",
"url": "https://github.com/status-im/nim-serialization.git",
"vcsRevision": "d77417cba6896c26287a68e6a95762e45a1b87e5",
"url": "https://github.com/status-im/nim-serialization",
"downloadMethod": "git",
"dependencies": [
"faststreams",
@ -110,70 +100,72 @@
"stew"
],
"checksums": {
"sha1": "fef59519892cac70cccd81b612085caaa5e3e6cf"
"sha1": "e17244c6654de22254acb9bcf71d8ddbeca8b2aa"
}
},
"metrics": {
"version": "0.0.1",
"vcsRevision": "21e99a2e9d9f80e68bef65c80ef781613005fccb",
"url": "https://github.com/status-im/nim-metrics",
"downloadMethod": "git",
"dependencies": [
"chronos"
],
"checksums": {
"sha1": "ab1c994bbcd6b04f2500f05d8ea4e463f33dd310"
}
},
"nimcrypto": {
"version": "0.5.4",
"vcsRevision": "24e006df85927f64916e60511620583b11403178",
"url": "https://github.com/cheatfate/nimcrypto",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "a4db2105de265930f1578bb7957f49fa39b10d9b"
}
},
"json_serialization": {
"version": "0.1.0",
"vcsRevision": "c5f0e2465e8375dfc7aa0f56ccef67cb680bc6b0",
"url": "https://github.com/status-im/nim-json-serialization.git",
"vcsRevision": "a7d815ed92f200f490c95d3cfd722089cc923ce6",
"url": "https://github.com/status-im/nim-json-serialization",
"downloadMethod": "git",
"dependencies": [
"serialization",
"stew"
],
"checksums": {
"sha1": "d89d79d0679a3a41b350e3ad4be56c0308cc5ec6"
"sha1": "50fc34a992ef3df68a7bee88af096bb8ed42572f"
}
},
"chronicles": {
"version": "0.10.2",
"vcsRevision": "1682096306ddba8185dcfac360a8c3f952d721e4",
"url": "https://github.com/status-im/nim-chronicles.git",
"version": "0.10.3",
"vcsRevision": "32ac8679680ea699f7dbc046e8e0131cac97d41a",
"url": "https://github.com/status-im/nim-chronicles",
"downloadMethod": "git",
"dependencies": [
"testutils",
"json_serialization"
],
"checksums": {
"sha1": "9a5bebb76b0f7d587a31e621d260119279e91c76"
}
},
"asynctest": {
"version": "0.3.1",
"vcsRevision": "5347c59b4b057443a014722aa40800cd8bb95c69",
"url": "https://github.com/status-im/asynctest.git",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "53e0b610d13700296755a4ebe789882cae47a3b9"
}
},
"nimcrypto": {
"version": "0.5.4",
"vcsRevision": "a5742a9a214ac33f91615f3862c7b099aec43b00",
"url": "https://github.com/cheatfate/nimcrypto",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "f76c87707cd4e96355b8bb6ef27e7f8b0aac1e08"
"sha1": "79f09526d4d9b9196dd2f6a75310d71a890c4f88"
}
},
"zlib": {
"version": "0.1.0",
"vcsRevision": "74cdeb54b21bededb5a515d36f608bc1850555a2",
"vcsRevision": "6a6670afba6b97b29b920340e2641978c05ab4d8",
"url": "https://github.com/status-im/nim-zlib",
"downloadMethod": "git",
"dependencies": [
"stew"
],
"checksums": {
"sha1": "01d330dc4c1924e56b1559ee73bc760e526f635c"
"sha1": "2621e46369be2a6846713e8c3d681a5bba3e0325"
}
},
"websock": {
"version": "0.1.0",
"vcsRevision": "73edde4417f7b45003113b7a34212c3ccd95b9fd",
"vcsRevision": "691f069b209d372b1240d5ae1f57fb7bbafeaba7",
"url": "https://github.com/status-im/nim-websock",
"downloadMethod": "git",
"dependencies": [
@ -181,36 +173,35 @@
"httputils",
"chronicles",
"stew",
"asynctest",
"nimcrypto",
"bearssl",
"zlib"
],
"checksums": {
"sha1": "ec2b137543f280298ca48de9ed4461a033ba88d3"
"sha1": "c71edfce064e7c0cadde0e687c6edc0caaf9ec07"
}
},
"dnsclient": {
"version": "0.1.2",
"vcsRevision": "fbb76f8af8a33ab818184a7d4406d9fee20993be",
"url": "https://github.com/ba0f3/dnsclient.nim.git",
"version": "0.3.2",
"vcsRevision": "fcd7443634b950eaea574e5eaa00a628ae029823",
"url": "https://github.com/ba0f3/dnsclient.nim",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "663239a914c814204b30dda6e0902cc0fbd0b8ee"
"sha1": "146aa4a8d512a3a786c5bf54311b79900166d9d7"
}
},
"secp256k1": {
"version": "0.5.2",
"vcsRevision": "5340cf188168d6afcafc8023770d880f067c0b2f",
"url": "https://github.com/status-im/nim-secp256k1.git",
"vcsRevision": "fd173fdff863ce2e211cf64c9a03bc7539fe40b0",
"url": "https://github.com/status-im/nim-secp256k1",
"downloadMethod": "git",
"dependencies": [
"stew",
"nimcrypto"
],
"checksums": {
"sha1": "ae9cbea4487be94a06653ffee075a7f1bd1e231e"
"sha1": "657c79f6f2b1b6da92a9cda81ffc9f95d26443cb"
}
}
}

View File

@ -13,25 +13,24 @@ import ./helpers
type TransportProvider* = proc(): Transport {.gcsafe, raises: [Defect].}
proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
suite name & " common tests":
teardown:
checkTrackers()
template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string = "") =
block:
let transpProvider = prov
asyncTest "can handle local address":
let ma = @[MultiAddress.init(ma).tryGet()]
let transport1 = prov()
let ma = @[MultiAddress.init(ma1).tryGet()]
let transport1 = transpProvider()
await transport1.start(ma)
check transport1.handles(transport1.addrs[0])
await transport1.stop()
asyncTest "e2e: handle observedAddr":
let ma = @[MultiAddress.init(ma).tryGet()]
let ma = @[MultiAddress.init(ma1).tryGet()]
let transport1 = prov()
let transport1 = transpProvider()
await transport1.start(ma)
let transport2 = prov()
let transport2 = transpProvider()
proc acceptHandler() {.async, gcsafe.} =
let conn = await transport1.accept()
@ -56,9 +55,9 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
await handlerWait.wait(1.seconds) # when no issues will not wait that long!
asyncTest "e2e: handle write":
let ma = @[MultiAddress.init(ma).tryGet()]
let ma = @[MultiAddress.init(ma1).tryGet()]
let transport1 = prov()
let transport1 = transpProvider()
await transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
@ -68,7 +67,7 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
let handlerWait = acceptHandler()
let transport2 = prov()
let transport2 = transpProvider()
let conn = await transport2.dial(transport1.addrs[0])
var msg = newSeq[byte](6)
await conn.readExactly(addr msg[0], 6)
@ -84,8 +83,8 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
await handlerWait.wait(1.seconds) # when no issues will not wait that long!
asyncTest "e2e: handle read":
let ma = @[MultiAddress.init(ma).tryGet()]
let transport1 = prov()
let ma = @[MultiAddress.init(ma1).tryGet()]
let transport1 = transpProvider()
await transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
@ -97,7 +96,7 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
let handlerWait = acceptHandler()
let transport2 = prov()
let transport2 = transpProvider()
let conn = await transport2.dial(transport1.addrs[0])
await conn.write("Hello!")
@ -110,12 +109,12 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
transport2.stop()))
asyncTest "e2e: handle dial cancellation":
let ma = @[MultiAddress.init(ma).tryGet()]
let ma = @[MultiAddress.init(ma1).tryGet()]
let transport1 = prov()
let transport1 = transpProvider()
await transport1.start(ma)
let transport2 = prov()
let transport2 = transpProvider()
let cancellation = transport2.dial(transport1.addrs[0])
await cancellation.cancelAndWait()
@ -127,9 +126,9 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
transport2.stop()))
asyncTest "e2e: handle accept cancellation":
let ma = @[MultiAddress.init(ma).tryGet()]
let ma = @[MultiAddress.init(ma1).tryGet()]
let transport1 = prov()
let transport1 = transpProvider()
await transport1.start(ma)
let acceptHandler = transport1.accept()
@ -143,11 +142,11 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
# this randomly locks the Windows CI job
skip()
return
let addrs = @[MultiAddress.init(ma).tryGet(),
MultiAddress.init(ma).tryGet()]
let addrs = @[MultiAddress.init(ma1).tryGet(),
MultiAddress.init(if ma2 == "": ma1 else: ma2).tryGet()]
let transport1 = prov()
let transport1 = transpProvider()
await transport1.start(addrs)
proc acceptHandler() {.async, gcsafe.} =
@ -192,12 +191,12 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
await transport1.stop()
asyncTest "e2e: stopping transport kills connections":
let ma = @[MultiAddress.init(ma).tryGet()]
let ma = @[MultiAddress.init(ma1).tryGet()]
let transport1 = prov()
let transport1 = transpProvider()
await transport1.start(ma)
let transport2 = prov()
let transport2 = transpProvider()
let acceptHandler = transport1.accept()
let conn = await transport2.dial(transport1.addrs[0])
@ -212,8 +211,8 @@ proc commonTransportTest*(name: string, prov: TransportProvider, ma: string) =
check conn.closed()
asyncTest "read or write on closed connection":
let ma = @[MultiAddress.init(ma).tryGet()]
let transport1 = prov()
let ma = @[MultiAddress.init(ma1).tryGet()]
let transport1 = transpProvider()
await transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =

View File

@ -1,3 +1,5 @@
import ../config.nims
--threads:on
--d:metrics
--d:withoutPCRE

View File

@ -362,3 +362,35 @@ suite "FloodSub":
)
await allFuturesThrowing(nodesFut)
asyncTest "FloodSub message size validation 2":
var messageReceived = 0
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
inc(messageReceived)
let
bigNode1 = generateNodes(1, maxMessageSize = 20000000)
bigNode2 = generateNodes(1, maxMessageSize = 20000000)
# start switches
nodesFut = await allFinished(
bigNode1[0].switch.start(),
bigNode2[0].switch.start(),
)
await subscribeNodes(bigNode1 & bigNode2)
bigNode2[0].subscribe("foo", handler)
await waitSub(bigNode1[0], bigNode2[0], "foo")
let bigMessage = newSeq[byte](19000000)
check (await bigNode1[0].publish("foo", bigMessage)) > 0
checkExpiring: messageReceived == 1
await allFuturesThrowing(
bigNode1[0].switch.stop(),
bigNode2[0].switch.stop()
)
await allFuturesThrowing(nodesFut)

View File

@ -0,0 +1,36 @@
{.used.}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import chronos
import ../../libp2p/protocols/connectivity/autonat
import ../../libp2p/peerid
import ../../libp2p/multiaddress
type
AutonatStub* = ref object of Autonat
returnSuccess*: bool
dials: int
expectedDials: int
finished*: Future[void]
proc new*(T: typedesc[AutonatStub], expectedDials: int): T =
return T(dials: 0, expectedDials: expectedDials, finished: newFuture[void]())
method dialMe*(
self: AutonatStub,
pid: PeerId,
addrs: seq[MultiAddress] = newSeq[MultiAddress]()):
Future[MultiAddress] {.async.} =
self.dials += 1
if self.dials == self.expectedDials:
self.finished.complete()
if self.returnSuccess:
return MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
else:
raise newException(AutonatUnreachableError, "")

86
tests/stubs/torstub.nim Normal file
View File

@ -0,0 +1,86 @@
{.used.}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import tables
import chronos, stew/[byteutils, endians2, shims/net]
import ../../libp2p/[stream/connection,
protocols/connectivity/relay/utils,
transports/tcptransport,
transports/tortransport,
upgrademngrs/upgrade,
multiaddress,
errors,
builders]
type
TorServerStub* = ref object of RootObj
tcpTransport: TcpTransport
addrTable: Table[string, string]
proc new*(
T: typedesc[TorServerStub]): T {.public.} =
T(
tcpTransport: TcpTransport.new(flags = {ReuseAddr}, upgrade = Upgrade()),
addrTable: initTable[string, string]())
proc registerAddr*(self: TorServerStub, key: string, val: string) =
self.addrTable[key] = val
proc start*(self: TorServerStub, address: TransportAddress) {.async.} =
let ma = @[MultiAddress.init(address).tryGet()]
await self.tcpTransport.start(ma)
var msg = newSeq[byte](3)
while self.tcpTransport.running:
let connSrc = await self.tcpTransport.accept()
await connSrc.readExactly(addr msg[0], 3)
await connSrc.write(@[05'u8, 00])
msg = newSeq[byte](4)
await connSrc.readExactly(addr msg[0], 4)
let atyp = msg[3]
let address = case atyp:
of Socks5AddressType.IPv4.byte:
let n = 4 + 2 # +2 bytes for the port
msg = newSeq[byte](n)
await connSrc.readExactly(addr msg[0], n)
var ip: array[4, byte]
for i, e in msg[0..^3]:
ip[i] = e
$(ipv4(ip)) & ":" & $(Port(fromBytesBE(uint16, msg[^2..^1])))
of Socks5AddressType.IPv6.byte:
let n = 16 + 2 # +2 bytes for the port
msg = newSeq[byte](n) # +2 bytes for the port
await connSrc.readExactly(addr msg[0], n)
var ip: array[16, byte]
for i, e in msg[0..^3]:
ip[i] = e
$(ipv6(ip)) & ":" & $(Port(fromBytesBE(uint16, msg[^2..^1])))
of Socks5AddressType.FQDN.byte:
await connSrc.readExactly(addr msg[0], 1)
let n = int(uint8.fromBytes(msg[0..0])) + 2 # +2 bytes for the port
msg = newSeq[byte](n)
await connSrc.readExactly(addr msg[0], n)
string.fromBytes(msg[0..^3]) & ":" & $(Port(fromBytesBE(uint16, msg[^2..^1])))
else:
raise newException(LPError, "Address not supported")
let tcpIpAddr = self.addrTable[$(address)]
await connSrc.write(@[05'u8, 00, 00, 01, 00, 00, 00, 00, 00, 00])
let connDst = await self.tcpTransport.dial("", MultiAddress.init(tcpIpAddr).tryGet())
await bridge(connSrc, connDst)
await allFutures(connSrc.close(), connDst.close())
proc stop*(self: TorServerStub) {.async.} =
await self.tcpTransport.stop()

View File

@ -0,0 +1,155 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import std/options
import chronos, metrics
import unittest2
import ../libp2p/[builders,
switch,
services/autonatservice]
import ./helpers
import stubs/autonatstub
proc createSwitch(autonatSvc: Service = nil): Switch =
var builder = SwitchBuilder.new()
.withRng(newRng())
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
.withTcpTransport()
.withMplex()
.withAutonat()
.withNoise()
if autonatSvc != nil:
builder = builder.withServices(@[autonatSvc])
return builder.build()
suite "Autonat Service":
teardown:
checkTrackers()
asyncTest "Autonat Service Private Reachability test":
let autonatStub = AutonatStub.new(expectedDials = 3)
autonatStub.returnSuccess = false
let autonatService = AutonatService.new(autonatStub, newRng())
let switch1 = createSwitch(autonatService)
let switch2 = createSwitch()
let switch3 = createSwitch()
let switch4 = createSwitch()
check autonatService.networkReachability() == NetworkReachability.Unknown
await switch1.start()
await switch2.start()
await switch3.start()
await switch4.start()
await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs)
await switch1.connect(switch3.peerInfo.peerId, switch3.peerInfo.addrs)
await switch1.connect(switch4.peerInfo.peerId, switch4.peerInfo.addrs)
await autonatStub.finished
check autonatService.networkReachability() == NetworkReachability.NotReachable
check libp2p_autonat_reachability_confidence.value(["NotReachable"]) == 0.3
await allFuturesThrowing(
switch1.stop(), switch2.stop(), switch3.stop(), switch4.stop())
asyncTest "Autonat Service Public Reachability test":
let autonatStub = AutonatStub.new(expectedDials = 3)
autonatStub.returnSuccess = true
let autonatService = AutonatService.new(autonatStub, newRng(), some(1.seconds))
let switch1 = createSwitch(autonatService)
let switch2 = createSwitch()
let switch3 = createSwitch()
let switch4 = createSwitch()
check autonatService.networkReachability() == NetworkReachability.Unknown
await switch1.start()
await switch2.start()
await switch3.start()
await switch4.start()
await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs)
await switch1.connect(switch3.peerInfo.peerId, switch3.peerInfo.addrs)
await switch1.connect(switch4.peerInfo.peerId, switch4.peerInfo.addrs)
await autonatStub.finished
check autonatService.networkReachability() == NetworkReachability.Reachable
check libp2p_autonat_reachability_confidence.value(["Reachable"]) == 0.3
await allFuturesThrowing(
switch1.stop(), switch2.stop(), switch3.stop(), switch4.stop())
asyncTest "Autonat Service Full Reachability test":
let autonatStub = AutonatStub.new(expectedDials = 6)
autonatStub.returnSuccess = false
let autonatService = AutonatService.new(autonatStub, newRng(), some(1.seconds))
let switch1 = createSwitch(autonatService)
let switch2 = createSwitch()
let switch3 = createSwitch()
let switch4 = createSwitch()
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Option[float]) {.gcsafe, async.} =
if networkReachability == NetworkReachability.NotReachable and confidence.isSome() and confidence.get() >= 0.3:
if not awaiter.finished:
autonatStub.returnSuccess = true
awaiter.complete()
check autonatService.networkReachability() == NetworkReachability.Unknown
autonatService.statusAndConfidenceHandler(statusAndConfidenceHandler)
await switch1.start()
await switch2.start()
await switch3.start()
await switch4.start()
await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs)
await switch1.connect(switch3.peerInfo.peerId, switch3.peerInfo.addrs)
await switch1.connect(switch4.peerInfo.peerId, switch4.peerInfo.addrs)
await awaiter
check autonatService.networkReachability() == NetworkReachability.NotReachable
check libp2p_autonat_reachability_confidence.value(["NotReachable"]) == 0.3
await autonatStub.finished
check autonatService.networkReachability() == NetworkReachability.Reachable
check libp2p_autonat_reachability_confidence.value(["Reachable"]) == 0.3
await allFuturesThrowing(switch1.stop(), switch2.stop(), switch3.stop(), switch4.stop())
asyncTest "Autonat Service setup and stop twice":
let switch = createSwitch()
let autonatService = AutonatService.new(AutonatStub.new(expectedDials = 0), newRng(), some(1.seconds))
check (await autonatService.setup(switch)) == true
check (await autonatService.setup(switch)) == false
check (await autonatService.stop(switch)) == true
check (await autonatService.stop(switch)) == false
await allFuturesThrowing(switch.stop())

View File

@ -152,8 +152,8 @@ suite "Identify":
identifyPush2 {.threadvar.}: IdentifyPush
conn {.threadvar.}: Connection
asyncSetup:
switch1 = newStandardSwitch()
switch2 = newStandardSwitch()
switch1 = newStandardSwitch(sendSignedPeerRecord=true)
switch2 = newStandardSwitch(sendSignedPeerRecord=true)
proc updateStore1(peerId: PeerId, info: IdentifyInfo) {.async.} =
switch1.peerStore.updatePeerInfo(info)
@ -177,13 +177,21 @@ suite "Identify":
check:
switch1.peerStore[AddressBook][switch2.peerInfo.peerId] == switch2.peerInfo.addrs
switch2.peerStore[AddressBook][switch1.peerInfo.peerId] == switch1.peerInfo.addrs
switch1.peerStore[KeyBook][switch2.peerInfo.peerId] == switch2.peerInfo.publicKey
switch2.peerStore[KeyBook][switch1.peerInfo.peerId] == switch1.peerInfo.publicKey
switch1.peerStore[AddressBook][switch2.peerInfo.peerId] == switch2.peerInfo.addrs
switch2.peerStore[AddressBook][switch1.peerInfo.peerId] == switch1.peerInfo.addrs
switch1.peerStore[AgentBook][switch2.peerInfo.peerId] == switch2.peerInfo.agentVersion
switch2.peerStore[AgentBook][switch1.peerInfo.peerId] == switch1.peerInfo.agentVersion
#switch1.peerStore.signedPeerRecordBook.get(switch2.peerInfo.peerId) == switch2.peerInfo.signedPeerRecord.get()
#switch2.peerStore.signedPeerRecordBook.get(switch1.peerInfo.peerId) == switch1.peerInfo.signedPeerRecord.get()
# no longer sent by default
switch1.peerStore[ProtoVersionBook][switch2.peerInfo.peerId] == switch2.peerInfo.protoVersion
switch2.peerStore[ProtoVersionBook][switch1.peerInfo.peerId] == switch1.peerInfo.protoVersion
switch1.peerStore[ProtoBook][switch2.peerInfo.peerId] == switch2.peerInfo.protocols
switch2.peerStore[ProtoBook][switch1.peerInfo.peerId] == switch1.peerInfo.protocols
switch1.peerStore[SPRBook][switch2.peerInfo.peerId] == switch2.peerInfo.signedPeerRecord.envelope
switch2.peerStore[SPRBook][switch1.peerInfo.peerId] == switch1.peerInfo.signedPeerRecord.envelope
proc closeAll() {.async.} =
await conn.close()

View File

@ -623,18 +623,27 @@ suite "MinProtobuf test suite":
test "[length] too big message test":
var pb1 = initProtoBuffer()
var bigString = newString(MaxMessageSize + 1)
var bigString = newString(pb1.maxSize + 1)
for i in 0 ..< len(bigString):
bigString[i] = 'A'
pb1.write(1, bigString)
pb1.finish()
var pb2 = initProtoBuffer(pb1.buffer)
var value = newString(MaxMessageSize + 1)
var valueLen = 0
let res = pb2.getField(1, value, valueLen)
check:
res.isErr() == true
block:
var pb2 = initProtoBuffer(pb1.buffer)
var value = newString(pb1.maxSize + 1)
var valueLen = 0
let res = pb2.getField(1, value, valueLen)
check:
res.isErr() == true
block:
var pb2 = initProtoBuffer(pb1.buffer, maxSize = uint.high)
var value = newString(pb1.maxSize + 1)
var valueLen = 0
let res = pb2.getField(1, value, valueLen)
check:
res.isErr() == false
test "[length] Repeated field test":
var pb1 = initProtoBuffer()

View File

@ -278,6 +278,79 @@ suite "Multistream select":
await handlerWait.wait(30.seconds)
asyncTest "e2e - streams limit":
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
let blocker = newFuture[void]()
# Start 5 streams which are blocked by `blocker`
# Try to start a new one, which should fail
# Unblock the 5 streams, check that we can open a new one
proc testHandler(conn: Connection,
proto: string):
Future[void] {.async, gcsafe.} =
await blocker
await conn.writeLp("Hello!")
await conn.close()
var protocol: LPProtocol = LPProtocol.new(
@["/test/proto/1.0.0"],
testHandler,
maxIncomingStreams = 5
)
protocol.handler = testHandler
let msListen = MultistreamSelect.new()
msListen.addHandler("/test/proto/1.0.0", protocol)
let transport1 = TcpTransport.new(upgrade = Upgrade())
await transport1.start(ma)
proc acceptedOne(c: Connection) {.async.} =
await msListen.handle(c)
await c.close()
proc acceptHandler() {.async, gcsafe.} =
while true:
let conn = await transport1.accept()
asyncSpawn acceptedOne(conn)
var handlerWait = acceptHandler()
let msDial = MultistreamSelect.new()
let transport2 = TcpTransport.new(upgrade = Upgrade())
proc connector {.async.} =
let conn = await transport2.dial(transport1.addrs[0])
check: (await msDial.select(conn, "/test/proto/1.0.0")) == true
check: string.fromBytes(await conn.readLp(1024)) == "Hello!"
await conn.close()
# Fill up the 5 allowed streams
var dialers: seq[Future[void]]
for _ in 0..<5:
dialers.add(connector())
# This one will fail during negotiation
expect(CatchableError):
try: waitFor(connector().wait(1.seconds))
except AsyncTimeoutError as exc:
check false
raise exc
# check that the dialers aren't finished
check: (await dialers[0].withTimeout(10.milliseconds)) == false
# unblock the dialers
blocker.complete()
await allFutures(dialers)
# now must work
waitFor(connector())
await transport2.stop()
await transport1.stop()
await handlerWait.cancelAndWait()
asyncTest "e2e - ls":
let ma = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]

View File

@ -23,6 +23,7 @@ import testmultibase,
testrouting_record
import testtcptransport,
testtortransport,
testnameresolve,
testwstransport,
testmultistream,
@ -41,4 +42,5 @@ import testtcptransport,
testdiscovery,
testyamux,
testautonat,
testautonatservice,
testautorelay

View File

@ -12,7 +12,7 @@
import unittest2
import nimcrypto/utils, stew/base58
import ../libp2p/crypto/crypto, ../libp2p/peerid
import bearssl/hash
when defined(nimHasUsed): {.used.}
const
@ -236,3 +236,15 @@ suite "Peer testing suite":
ekey2 == pubkey
ekey3 == pubkey
ekey4 == pubkey
test "Test PeerId.random() proc":
# generate a random peer with a deterministic ssed
var rng = (ref HmacDrbgContext)()
hmacDrbgInit(rng[], addr sha256Vtable, nil, 0)
var randomPeer1 = PeerId.random(rng)
check:
$randomPeer1.get() == "16Uiu2HAmCxpSTFDNdWiu1MLScu7inPhcbbGfPvuvRPD1e51gw1Xr"
# generate a random peer with a new random seed
var randomPeer2 = PeerId.random()
check:
randomPeer2.isErr() != true

View File

@ -10,6 +10,7 @@ import ../libp2p/[errors,
builders,
stream/bufferstream,
stream/connection,
multicodec,
multiaddress,
peerinfo,
crypto/crypto,
@ -213,12 +214,40 @@ suite "Switch":
"dnsaddr=" & $switch1.peerInfo.addrs[0] & "/p2p/" & $switch1.peerInfo.peerId,
]
check: (await switch2.connect(@[MultiAddress.init("/dnsaddr/test.io/").tryGet()])) == switch1.peerInfo.peerId
check: (await switch2.connect(MultiAddress.init("/dnsaddr/test.io/").tryGet(), true)) == switch1.peerInfo.peerId
await switch2.disconnect(switch1.peerInfo.peerId)
# via direct ip
check not switch2.isConnected(switch1.peerInfo.peerId)
check: (await switch2.connect(switch1.peerInfo.addrs)) == switch1.peerInfo.peerId
check: (await switch2.connect(switch1.peerInfo.addrs[0], true)) == switch1.peerInfo.peerId
await switch2.disconnect(switch1.peerInfo.peerId)
await allFuturesThrowing(
switch1.stop(),
switch2.stop()
)
asyncTest "e2e connect to peer with known PeerId":
let switch1 = newStandardSwitch(secureManagers = [SecureProtocol.Noise])
let switch2 = newStandardSwitch(secureManagers = [SecureProtocol.Noise])
await switch1.start()
await switch2.start()
# via direct ip
check not switch2.isConnected(switch1.peerInfo.peerId)
# without specifying allow unknown, will fail
expect(DialFailedError):
discard await switch2.connect(switch1.peerInfo.addrs[0])
# with invalid PeerId, will fail
let fakeMa = concat(switch1.peerInfo.addrs[0], MultiAddress.init(multiCodec("p2p"), PeerId.random.tryGet().data).tryGet()).tryGet()
expect(CatchableError):
discard (await switch2.connect(fakeMa))
# real thing works
check (await switch2.connect(switch1.peerInfo.fullAddrs.tryGet()[0])) == switch1.peerInfo.peerId
await switch2.disconnect(switch1.peerInfo.peerId)
@ -1016,3 +1045,13 @@ suite "Switch":
expect LPError:
await switch.start()
# test is that this doesn't leak
asyncTest "starting two times does not crash":
let switch = newStandardSwitch(
addrs = @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]
)
await switch.start()
await switch.start()
await allFuturesThrowing(switch.stop())

View File

@ -125,7 +125,8 @@ suite "TCP transport":
server.close()
await server.join()
proc transProvider(): Transport = TcpTransport.new(upgrade = Upgrade())
commonTransportTest(
"TcpTransport",
proc (): Transport = TcpTransport.new(upgrade = Upgrade()),
transProvider,
"/ip4/0.0.0.0/tcp/0")

143
tests/testtortransport.nim Normal file
View File

@ -0,0 +1,143 @@
{.used.}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import tables
import chronos, stew/[byteutils]
import ../libp2p/[stream/connection,
transports/tcptransport,
transports/tortransport,
upgrademngrs/upgrade,
multiaddress,
builders]
import ./helpers, ./stubs/torstub, ./commontransport
const torServer = initTAddress("127.0.0.1", 9050.Port)
var stub: TorServerStub
var startFut: Future[void]
suite "Tor transport":
setup:
stub = TorServerStub.new()
stub.registerAddr("127.0.0.1:8080", "/ip4/127.0.0.1/tcp/8080")
stub.registerAddr("libp2p.nim:8080", "/ip4/127.0.0.1/tcp/8080")
stub.registerAddr("::1:8080", "/ip6/::1/tcp/8080")
stub.registerAddr("a2mncbqsbullu7thgm4e6zxda2xccmcgzmaq44oayhdtm6rav5vovcad.onion:80", "/ip4/127.0.0.1/tcp/8080")
stub.registerAddr("a2mncbqsbullu7thgm4e6zxda2xccmcgzmaq44oayhdtm6rav5vovcae.onion:81", "/ip4/127.0.0.1/tcp/8081")
startFut = stub.start(torServer)
teardown:
waitFor startFut.cancelAndWait()
waitFor stub.stop()
checkTrackers()
proc test(lintesAddr: string, dialAddr: string) {.async.} =
let server = TcpTransport.new({ReuseAddr}, Upgrade())
let ma2 = @[MultiAddress.init(lintesAddr).tryGet()]
await server.start(ma2)
proc runClient() {.async.} =
let client = TorTransport.new(transportAddress = torServer, upgrade = Upgrade())
let conn = await client.dial("", MultiAddress.init(dialAddr).tryGet())
await conn.write("client")
var resp: array[6, byte]
await conn.readExactly(addr resp, 6)
await conn.close()
check string.fromBytes(resp) == "server"
await client.stop()
proc serverAcceptHandler() {.async, gcsafe.} =
let conn = await server.accept()
var resp: array[6, byte]
await conn.readExactly(addr resp, 6)
check string.fromBytes(resp) == "client"
await conn.write("server")
await conn.close()
await server.stop()
asyncSpawn serverAcceptHandler()
await runClient()
asyncTest "test start and dial using ipv4":
await test("/ip4/127.0.0.1/tcp/8080", "/ip4/127.0.0.1/tcp/8080")
asyncTest "test start and dial using ipv6":
await test("/ip6/::1/tcp/8080", "/ip6/::1/tcp/8080")
asyncTest "test start and dial using dns":
await test("/ip4/127.0.0.1/tcp/8080", "/dns/libp2p.nim/tcp/8080")
asyncTest "test start and dial usion onion3 and builder":
const TestCodec = "/test/proto/1.0.0" # custom protocol string identifier
type
TestProto = ref object of LPProtocol # declare a custom protocol
proc new(T: typedesc[TestProto]): T =
# every incoming connections will be in handled in this closure
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
var resp: array[6, byte]
await conn.readExactly(addr resp, 6)
check string.fromBytes(resp) == "client"
await conn.write("server")
# We must close the connections ourselves when we're done with it
await conn.close()
return T(codecs: @[TestCodec], handler: handle)
let rng = newRng()
let ma = MultiAddress.init("/ip4/127.0.0.1/tcp/8080/onion3/a2mncbqsbullu7thgm4e6zxda2xccmcgzmaq44oayhdtm6rav5vovcad:80").tryGet()
let serverSwitch = TorSwitch.new(torServer, rng, @[ma], {ReuseAddr})
# setup the custom proto
let testProto = TestProto.new()
serverSwitch.mount(testProto)
await serverSwitch.start()
let serverPeerId = serverSwitch.peerInfo.peerId
let serverAddress = serverSwitch.peerInfo.addrs
proc startClient() {.async.} =
let clientSwitch = TorSwitch.new(torServer = torServer, rng= rng, flags = {ReuseAddr})
let conn = await clientSwitch.dial(serverPeerId, serverAddress, TestCodec)
await conn.write("client")
var resp: array[6, byte]
await conn.readExactly(addr resp, 6)
check string.fromBytes(resp) == "server"
await conn.close()
await clientSwitch.stop()
await startClient()
await serverSwitch.stop()
test "It's not possible to add another transport in TorSwitch":
when (NimMajor, NimMinor, NimPatch) < (1, 4, 0):
type AssertionDefect = AssertionError
let torSwitch = TorSwitch.new(torServer = torServer, rng= rng, flags = {ReuseAddr})
expect(AssertionDefect):
torSwitch.addTransport(TcpTransport.new(upgrade = Upgrade()))
waitFor torSwitch.stop()
proc transProvider(): Transport =
TorTransport.new(torServer, {ReuseAddr}, Upgrade())
commonTransportTest(
transProvider,
"/ip4/127.0.0.1/tcp/8080/onion3/a2mncbqsbullu7thgm4e6zxda2xccmcgzmaq44oayhdtm6rav5vovcad:80",
"/ip4/127.0.0.1/tcp/8081/onion3/a2mncbqsbullu7thgm4e6zxda2xccmcgzmaq44oayhdtm6rav5vovcae:81")

View File

@ -55,14 +55,13 @@ suite "WebSocket transport":
teardown:
checkTrackers()
commonTransportTest(
"WebSocket",
proc (): Transport = WsTransport.new(Upgrade()),
"/ip4/0.0.0.0/tcp/0/ws")
proc wsTraspProvider(): Transport = WsTransport.new(Upgrade())
commonTransportTest(
"WebSocket Secure",
(proc (): Transport {.gcsafe.} =
wsTraspProvider,
"/ip4/0.0.0.0/tcp/0/ws")
proc wsSecureTranspProvider(): Transport {.gcsafe.} =
try:
return WsTransport.new(
Upgrade(),
@ -70,7 +69,9 @@ suite "WebSocket transport":
TLSCertificate.init(SecureCert),
{TLSFlags.NoVerifyHost, TLSFlags.NoVerifyServerName})
except Exception: check(false)
),
commonTransportTest(
wsSecureTranspProvider,
"/ip4/0.0.0.0/tcp/0/wss")
asyncTest "Hostname verification":

View File

@ -13,8 +13,15 @@ createDir("nimbledeps")
discard execCmd("nimble install -dy")
var allDeps: Table[string, string]
for (_, dependency) in walkDir("nimbledeps/pkgs"):
let fileContent = parseJson(readFile(dependency & "/nimblemeta.json"))
let nimblePkgs =
if dirExists("nimbledeps/pkgs"): "nimbledeps/pkgs"
else: "nimbledeps/pkgs2"
for (_, dependency) in walkDir(nimblePkgs):
let
jsonContent = parseJson(readFile(dependency & "/nimblemeta.json"))
fileContent =
if "metaData" in jsonContent: jsonContent["metaData"]
else: jsonContent
let url = fileContent.getOrDefault("url").getStr("")
var version = fileContent.getOrDefault("vcsRevision").getStr("")
var packageName = dependency.split('/')[^1].split('-')[0]