mirror of
https://github.com/logos-storage/logos-storage-nim-dht.git
synced 2026-01-02 13:33:08 +00:00
Compare commits
84 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
754765ba31 | ||
|
|
99884b5971 | ||
|
|
6c7de03622 | ||
|
|
f6eef1ac95 | ||
|
|
fb17db8187 | ||
|
|
d435c6945f | ||
|
|
89d22c156e | ||
|
|
ee33946afb | ||
|
|
c777106e7f | ||
|
|
14d4dd97e9 | ||
|
|
a9e17f4a33 | ||
|
|
bc27eebb85 | ||
|
|
0f67d21bbc | ||
|
|
4bd3a39e00 | ||
|
|
5f22be0420 | ||
|
|
4eb4e9126a | ||
|
|
5320e8c81e | ||
|
|
cc54a4f0ec | ||
|
|
e7e45de75f | ||
|
|
a3f203bbea | ||
|
|
de39c2006e | ||
|
|
cafb6ffe53 | ||
|
|
570fb9a936 | ||
|
|
9fdf0eca8a | ||
|
|
d73dc48515 | ||
|
|
57f4b6f7cb | ||
|
|
ee4e2102d9 | ||
|
|
a6cfe1a084 | ||
|
|
1a344f1fd7 | ||
|
|
fee5a9ced2 | ||
|
|
6310c50ce0 | ||
|
|
7507e99c96 | ||
|
|
02bc12e639 | ||
|
|
e1c1089e4f | ||
|
|
c1d2ea410d | ||
|
|
8b1660464d | ||
|
|
7057663f81 | ||
|
|
ff5391a35e | ||
|
|
4ccaaee721 | ||
|
|
80cc069c5e | ||
|
|
ffeeeeb3fb | ||
|
|
4d2250477e | ||
|
|
b7b04ed9e4 | ||
|
|
6e180af4aa | ||
|
|
706cb50041 | ||
|
|
0825d887ea | ||
|
|
ec4f0d4a84 | ||
|
|
0b69de242f | ||
|
|
f3eec2a202 | ||
|
|
f6971cc947 | ||
|
|
4d9e39d86c | ||
|
|
b8bcb2d08d | ||
|
|
f121d080e7 | ||
|
|
fef297c622 | ||
|
|
936a5ec6fa | ||
|
|
9acdca795b | ||
|
|
5f38fd9570 | ||
|
|
5624700855 | ||
|
|
76da855725 | ||
|
|
4c9c92232b | ||
|
|
148b10908d | ||
|
|
f299c23e2e | ||
|
|
bdf57381e3 | ||
|
|
4b82bdc2f9 | ||
|
|
d8160ff0f7 | ||
|
|
f766cb39b1 | ||
|
|
316464fc71 | ||
|
|
6e61e02091 | ||
|
|
dfff39091b | ||
|
|
63822e8356 | ||
|
|
2299317116 | ||
|
|
717cd0a50c | ||
|
|
223ce9240b | ||
|
|
709a873862 | ||
|
|
b3d01245e9 | ||
|
|
beefafcc6f | ||
|
|
a7f14bc9b7 | ||
|
|
dd4985435a | ||
|
|
91b2eaec89 | ||
|
|
66116b9bf6 | ||
|
|
60dc4e764c | ||
|
|
ee5d8acb05 | ||
|
|
ed7caa119d | ||
|
|
fdd02450aa |
42
.github/actions/install_nimble/action.yml
vendored
42
.github/actions/install_nimble/action.yml
vendored
@ -1,42 +0,0 @@
|
||||
name: Install Nimble
|
||||
description: install nimble
|
||||
inputs:
|
||||
nimble_version:
|
||||
description: "install nimble"
|
||||
# TODO: make sure to change to tagged release when available
|
||||
default: "latest"
|
||||
os:
|
||||
description: "operating system"
|
||||
default: "linux"
|
||||
cpu:
|
||||
description: "cpu architecture"
|
||||
default: "amd64"
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Build Nimble
|
||||
shell: bash
|
||||
run: |
|
||||
set -x
|
||||
mkdir -p .nimble
|
||||
cd .nimble
|
||||
if [[ '${{ inputs.cpu }}' == 'amd64' ]]; then
|
||||
CPU=x64
|
||||
elif [[ '${{ inputs.cpu }}' == 'i386' ]]; then
|
||||
CPU=x32
|
||||
else
|
||||
CPU=${{ inputs.cpu }}
|
||||
fi
|
||||
|
||||
if [[ '${{ inputs.os }}' == 'macos' ]]; then
|
||||
OS=apple
|
||||
else
|
||||
OS='${{ inputs.os }}'
|
||||
fi
|
||||
URL=https://github.com/nim-lang/nimble/releases/download/${{ inputs.nimble_version }}/nimble-"$OS"_"$CPU".tar.gz
|
||||
curl -o nimble.tar.gz -L -s -S "$URL"
|
||||
tar -xvf nimble.tar.gz
|
||||
- name: Derive environment variables
|
||||
shell: bash
|
||||
run: echo '${{ github.workspace }}/.nimble/' >> $GITHUB_PATH
|
||||
144
.github/workflows/ci.yml
vendored
144
.github/workflows/ci.yml
vendored
@ -1,134 +1,22 @@
|
||||
name: CI
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
timeout-minutes: 90
|
||||
test:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target:
|
||||
- os: linux
|
||||
cpu: amd64
|
||||
tests: all
|
||||
- os: macos
|
||||
cpu: amd64
|
||||
tests: all
|
||||
- os: windows
|
||||
cpu: amd64
|
||||
tests: part1
|
||||
- os: windows
|
||||
cpu: amd64
|
||||
tests: part2
|
||||
branch: [version-1-6]
|
||||
include:
|
||||
- target:
|
||||
os: linux
|
||||
builder: ubuntu-20.04
|
||||
shell: bash
|
||||
- target:
|
||||
os: macos
|
||||
builder: macos-12
|
||||
shell: bash
|
||||
- target:
|
||||
os: windows
|
||||
builder: windows-latest
|
||||
shell: msys2 {0}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.shell }}
|
||||
|
||||
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
|
||||
runs-on: ${{ matrix.builder }}
|
||||
continue-on-error: ${{ matrix.branch == 'version-1-6' || matrix.branch == 'devel' }}
|
||||
nim: [2.2.4]
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- name: MSYS2 (Windows amd64)
|
||||
if: runner.os == 'Windows' && matrix.target.cpu == 'amd64'
|
||||
uses: msys2/setup-msys2@v2
|
||||
with:
|
||||
path-type: inherit
|
||||
install: >-
|
||||
base-devel
|
||||
git
|
||||
mingw-w64-x86_64-toolchain
|
||||
- name: Restore Nim DLLs dependencies (Windows) from cache
|
||||
if: runner.os == 'Windows'
|
||||
id: windows-dlls-cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: external/dlls
|
||||
key: 'dlls'
|
||||
|
||||
- name: Install DLL dependencies (Windows)
|
||||
if: >
|
||||
steps.windows-dlls-cache.outputs.cache-hit != 'true' &&
|
||||
runner.os == 'Windows'
|
||||
run: |
|
||||
mkdir external
|
||||
curl -L "https://nim-lang.org/download/windeps.zip" -o external/windeps.zip
|
||||
7z x external/windeps.zip -oexternal/dlls
|
||||
- name: Path to cached dependencies (Windows)
|
||||
if: >
|
||||
runner.os == 'Windows'
|
||||
run: |
|
||||
echo '${{ github.workspace }}'"/external/dlls" >> $GITHUB_PATH
|
||||
|
||||
## Restore nimble deps
|
||||
- name: Restore nimble dependencies from cache
|
||||
id: nimble_deps
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.nimble
|
||||
${{ github.workspace }}/.nimble
|
||||
key: ${{ matrix.builder }}-${{ matrix.target.cpu }}-dotnimble-${{ hashFiles('nimble.lock') }}
|
||||
|
||||
- name: Setup Nimble
|
||||
uses: "./.github/actions/install_nimble"
|
||||
with:
|
||||
os: ${{ matrix.target.os }}
|
||||
cpu: ${{ matrix.target.cpu }}
|
||||
|
||||
- name: Setup Env
|
||||
run: |
|
||||
nimble -v
|
||||
|
||||
- name: Setup Deps
|
||||
run: |
|
||||
nimble install -d
|
||||
nimble setup
|
||||
|
||||
- name: Run tests
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
nimble test -y
|
||||
|
||||
- name: Run windows tests part1
|
||||
if: runner.os == 'Windows' && matrix.target.tests == 'part1'
|
||||
run: |
|
||||
if [[ "${{ matrix.target.os }}" == "windows" ]]; then
|
||||
# https://github.com/status-im/nimbus-eth2/issues/3121
|
||||
export NIMFLAGS="-d:nimRawSetjmp"
|
||||
fi
|
||||
|
||||
nimble testPart1 -y
|
||||
|
||||
- name: Run windows tests part2
|
||||
if: runner.os == 'Windows' && matrix.target.tests == 'part2'
|
||||
run: |
|
||||
if [[ "${{ matrix.target.os }}" == "windows" ]]; then
|
||||
export NIMFLAGS="-d:nimRawSetjmp"
|
||||
fi
|
||||
|
||||
nimble testPart2 -y
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- uses: jiro4989/setup-nim-action@v2
|
||||
with:
|
||||
nim-version: ${{matrix.nim}}
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build
|
||||
run: nimble install -y
|
||||
- name: Test
|
||||
run: nimble test -y
|
||||
69
.github/workflows/codecov.yml
vendored
69
.github/workflows/codecov.yml
vendored
@ -1,69 +0,0 @@
|
||||
name: Generate and upload code coverage
|
||||
|
||||
on:
|
||||
#On push to common branches, this computes the "bases stats" for PRs
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
All_Tests:
|
||||
name: All tests
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
matrix:
|
||||
nim-options: [
|
||||
""
|
||||
]
|
||||
test-program: [
|
||||
"test"
|
||||
]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Environment setup
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y lcov build-essential git curl
|
||||
mkdir coverage
|
||||
|
||||
- name: Restore nimble dependencies from cache
|
||||
id: nimble_deps
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.nimble
|
||||
key: ubuntu-20.04-amd64-${{ hashFiles('nimble.lock') }}
|
||||
|
||||
- name: Setup Nimble
|
||||
uses: "./.github/actions/install_nimble"
|
||||
with:
|
||||
os: linux
|
||||
cpu: x64
|
||||
|
||||
- name: Setup Env
|
||||
run: |
|
||||
nimble -v
|
||||
|
||||
- name: Setup Deps
|
||||
run: |
|
||||
nimble install -d
|
||||
nimble setup
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
nimble -y --verbose coverage
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v2
|
||||
with:
|
||||
directory: ./coverage/
|
||||
fail_ci_if_error: true
|
||||
files: ./coverage/coverage.f.info
|
||||
flags: unittests
|
||||
name: codecov-umbrella
|
||||
verbose: true
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@ -12,3 +12,5 @@ vendor/*
|
||||
NimBinaries
|
||||
.update.timestamp
|
||||
*.dSYM
|
||||
.vscode/*
|
||||
nimbledeps
|
||||
|
||||
@ -1,12 +1,12 @@
|
||||
# A DHT implementation for Codex
|
||||
# A DHT implementation for Logos Storage
|
||||
|
||||
[](https://opensource.org/licenses/Apache-2.0)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](#stability)
|
||||
[](https://github.com/status-im/nim-libp2p-dht/actions?query=workflow%3ACI+branch%3Amain)
|
||||
[](https://codecov.io/gh/status-im/nim-libp2p-dht)
|
||||
[](https://github.com/logos-storage/logos-storage-nim-dht/actions/workflows/ci.yml?query=workflow%3ACI+branch%3Amaster)
|
||||
[](https://codecov.io/gh/logos-storage/logos-storage-nim-dht)
|
||||
|
||||
This DHT implementation is aiming to provide a DHT for Codex with the following properties
|
||||
This DHT implementation is aiming to provide a DHT for Logos Storage with the following properties
|
||||
* flexible secure transport usage with
|
||||
* fast UDP based operation
|
||||
* eventual fallback to TCP-based operation (maybe though libp2p)
|
||||
|
||||
144
atlas.lock
144
atlas.lock
@ -1,144 +0,0 @@
|
||||
{
|
||||
"items": {
|
||||
"nim-zlib": {
|
||||
"dir": "vendor/nim-zlib",
|
||||
"url": "https://github.com/status-im/nim-zlib",
|
||||
"commit": "f34ca261efd90f118dc1647beefd2f7a69b05d93"
|
||||
},
|
||||
"nim-stew": {
|
||||
"dir": "vendor/nim-stew",
|
||||
"url": "https://github.com/status-im/nim-stew",
|
||||
"commit": "7afe7e3c070758cac1f628e4330109f3ef6fc853"
|
||||
},
|
||||
"nim-http-utils": {
|
||||
"dir": "vendor/nim-http-utils",
|
||||
"url": "https://github.com/status-im/nim-http-utils",
|
||||
"commit": "3b491a40c60aad9e8d3407443f46f62511e63b18"
|
||||
},
|
||||
"nim-chronos": {
|
||||
"dir": "vendor/nim-chronos",
|
||||
"url": "https://github.com/status-im/nim-chronos",
|
||||
"commit": "0277b65be2c7a365ac13df002fba6e172be55537"
|
||||
},
|
||||
"upraises": {
|
||||
"dir": "vendor/upraises",
|
||||
"url": "https://github.com/markspanbroek/upraises",
|
||||
"commit": "ff4f8108e44fba9b35cac535ab63d3927e8fd3c2"
|
||||
},
|
||||
"nim-sqlite3-abi": {
|
||||
"dir": "vendor/nim-sqlite3-abi",
|
||||
"url": "https://github.com/arnetheduck/nim-sqlite3-abi",
|
||||
"commit": "362e1bd9f689ad9f5380d9d27f0705b3d4dfc7d3"
|
||||
},
|
||||
"questionable": {
|
||||
"dir": "vendor/questionable",
|
||||
"url": "https://github.com/codex-storage/questionable",
|
||||
"commit": "b3cf35ac450fd42c9ea83dc084f5cba2efc55da3"
|
||||
},
|
||||
"nim-results": {
|
||||
"dir": "vendor/nim-results",
|
||||
"url": "https://github.com/arnetheduck/nim-results",
|
||||
"commit": "f3c666a272c69d70cb41e7245e7f6844797303ad"
|
||||
},
|
||||
"nim-unittest2": {
|
||||
"dir": "vendor/nim-unittest2",
|
||||
"url": "https://github.com/status-im/nim-unittest2",
|
||||
"commit": "b178f47527074964f76c395ad0dfc81cf118f379"
|
||||
},
|
||||
"nim-websock": {
|
||||
"dir": "vendor/nim-websock",
|
||||
"url": "https://github.com/status-im/nim-websock",
|
||||
"commit": "2c3ae3137f3c9cb48134285bd4a47186fa51f0e8"
|
||||
},
|
||||
"nim-secp256k1": {
|
||||
"dir": "vendor/nim-secp256k1",
|
||||
"url": "https://github.com/status-im/nim-secp256k1",
|
||||
"commit": "2acbbdcc0e63002a013fff49f015708522875832"
|
||||
},
|
||||
"nim-bearssl": {
|
||||
"dir": "vendor/nim-bearssl",
|
||||
"url": "https://github.com/status-im/nim-bearssl",
|
||||
"commit": "99fcb3405c55b27cfffbf60f5368c55da7346f23"
|
||||
},
|
||||
"dnsclient.nim": {
|
||||
"dir": "vendor/dnsclient.nim",
|
||||
"url": "https://github.com/ba0f3/dnsclient.nim",
|
||||
"commit": "23214235d4784d24aceed99bbfe153379ea557c8"
|
||||
},
|
||||
"nimcrypto": {
|
||||
"dir": "vendor/nimcrypto",
|
||||
"url": "https://github.com/status-im/nimcrypto",
|
||||
"commit": "24e006df85927f64916e60511620583b11403178"
|
||||
},
|
||||
"nim-json-serialization": {
|
||||
"dir": "vendor/nim-json-serialization",
|
||||
"url": "https://github.com/status-im/nim-json-serialization",
|
||||
"commit": "bb53d49caf2a6c6cf1df365ba84af93cdcfa7aa3"
|
||||
},
|
||||
"nim-testutils": {
|
||||
"dir": "vendor/nim-testutils",
|
||||
"url": "https://github.com/status-im/nim-testutils",
|
||||
"commit": "b56a5953e37fc5117bd6ea6dfa18418c5e112815"
|
||||
},
|
||||
"npeg": {
|
||||
"dir": "vendor/npeg",
|
||||
"url": "https://github.com/zevv/npeg",
|
||||
"commit": "b15a10e388b91b898c581dbbcb6a718d46b27d2f"
|
||||
},
|
||||
"nim-serialization": {
|
||||
"dir": "vendor/nim-serialization",
|
||||
"url": "https://github.com/status-im/nim-serialization",
|
||||
"commit": "384eb2561ee755446cff512a8e057325848b86a7"
|
||||
},
|
||||
"nim-faststreams": {
|
||||
"dir": "vendor/nim-faststreams",
|
||||
"url": "https://github.com/status-im/nim-faststreams",
|
||||
"commit": "720fc5e5c8e428d9d0af618e1e27c44b42350309"
|
||||
},
|
||||
"nim-datastore": {
|
||||
"dir": "vendor/nim-datastore",
|
||||
"url": "https://github.com/codex-storage/nim-datastore",
|
||||
"commit": "0cde8aeb67c59fd0ac95496dc6b5e1168d6632aa"
|
||||
},
|
||||
"asynctest": {
|
||||
"dir": "vendor/asynctest",
|
||||
"url": "https://github.com/codex-storage/asynctest",
|
||||
"commit": "fe1a34caf572b05f8bdba3b650f1871af9fce31e"
|
||||
},
|
||||
"nim-stint": {
|
||||
"dir": "vendor/nim-stint",
|
||||
"url": "https://github.com/status-im/nim-stint",
|
||||
"commit": "86621eced1dcfb5e25903019ebcfc76ed9128ec5"
|
||||
},
|
||||
"nim-metrics": {
|
||||
"dir": "vendor/nim-metrics",
|
||||
"url": "https://github.com/status-im/nim-metrics",
|
||||
"commit": "6142e433fc8ea9b73379770a788017ac528d46ff"
|
||||
},
|
||||
"nim-libp2p": {
|
||||
"dir": "vendor/nim-libp2p",
|
||||
"url": "https://github.com/status-im/nim-libp2p",
|
||||
"commit": "440461b24b9e66542b34d26a0b908c17f6549d05"
|
||||
},
|
||||
"nim-chronicles": {
|
||||
"dir": "vendor/nim-chronicles",
|
||||
"url": "https://github.com/status-im/nim-chronicles",
|
||||
"commit": "c9c8e58ec3f89b655a046c485f622f9021c68b61"
|
||||
},
|
||||
"nim-protobuf-serialization": {
|
||||
"dir": "vendor/nim-protobuf-serialization",
|
||||
"url": "https://github.com/status-im/nim-protobuf-serialization",
|
||||
"commit": "28214b3e40c755a9886d2ec8f261ec48fbb6bec6"
|
||||
}
|
||||
},
|
||||
"nimcfg": "############# begin Atlas config section ##########\n--noNimblePath\n--path:\"vendor/nim-secp256k1\"\n--path:\"vendor/nim-protobuf-serialization\"\n--path:\"vendor/nimcrypto\"\n--path:\"vendor/nim-bearssl\"\n--path:\"vendor/nim-chronicles\"\n--path:\"vendor/nim-chronos\"\n--path:\"vendor/nim-libp2p\"\n--path:\"vendor/nim-metrics\"\n--path:\"vendor/nim-stew\"\n--path:\"vendor/nim-stint\"\n--path:\"vendor/asynctest\"\n--path:\"vendor/nim-datastore\"\n--path:\"vendor/questionable\"\n--path:\"vendor/nim-faststreams\"\n--path:\"vendor/nim-serialization\"\n--path:\"vendor/npeg/src\"\n--path:\"vendor/nim-unittest2\"\n--path:\"vendor/nim-testutils\"\n--path:\"vendor/nim-json-serialization\"\n--path:\"vendor/nim-http-utils\"\n--path:\"vendor/dnsclient.nim/src\"\n--path:\"vendor/nim-websock\"\n--path:\"vendor/nim-results\"\n--path:\"vendor/nim-sqlite3-abi\"\n--path:\"vendor/upraises\"\n--path:\"vendor/nim-zlib\"\n############# end Atlas config section ##########\n",
|
||||
"nimbleFile": {
|
||||
"filename": "",
|
||||
"content": "# Package\n\nversion = \"0.3.2\"\nauthor = \"Status Research & Development GmbH\"\ndescription = \"DHT based on the libp2p Kademlia spec\"\nlicense = \"MIT\"\nskipDirs = @[\"tests\"]\n\n\n# Dependencies\nrequires \"nim >= 1.2.0\"\nrequires \"secp256k1#2acbbdcc0e63002a013fff49f015708522875832\" # >= 0.5.2 & < 0.6.0\nrequires \"protobuf_serialization\" # >= 0.2.0 & < 0.3.0\nrequires \"nimcrypto == 0.5.4\"\nrequires \"bearssl#head\"\nrequires \"chronicles >= 0.10.2 & < 0.11.0\"\nrequires \"chronos == 3.2.0\" # >= 3.0.11 & < 3.1.0\nrequires \"libp2p#unstable\"\nrequires \"metrics\"\nrequires \"stew#head\"\nrequires \"stint\"\nrequires \"asynctest >= 0.3.1 & < 0.4.0\"\nrequires \"https://github.com/codex-storage/nim-datastore#head\"\nrequires \"questionable\"\n\ninclude \"build.nims\"\n\n"
|
||||
},
|
||||
"hostOS": "macosx",
|
||||
"hostCPU": "arm64",
|
||||
"nimVersion": "1.6.14",
|
||||
"gccVersion": "",
|
||||
"clangVersion": ""
|
||||
}
|
||||
22
codecov.yml
22
codecov.yml
@ -1,22 +0,0 @@
|
||||
coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
# advanced settings
|
||||
|
||||
# Prevents PR from being blocked with a reduction in coverage.
|
||||
# Note, if we want to re-enable this, a `threshold` value can be used
|
||||
# allow coverage to drop by x% while still posting a success status.
|
||||
# `informational`: https://docs.codecov.com/docs/commit-status#informational
|
||||
# `threshold`: https://docs.codecov.com/docs/commit-status#threshold
|
||||
informational: true
|
||||
patch:
|
||||
default:
|
||||
# advanced settings
|
||||
|
||||
# Prevents PR from being blocked with a reduction in coverage.
|
||||
# Note, if we want to re-enable this, a `threshold` value can be used
|
||||
# allow coverage to drop by x% while still posting a success status.
|
||||
# `informational`: https://docs.codecov.com/docs/commit-status#informational
|
||||
# `threshold`: https://docs.codecov.com/docs/commit-status#threshold
|
||||
informational: true
|
||||
@ -1,26 +1,42 @@
|
||||
# Package
|
||||
|
||||
version = "0.3.2"
|
||||
version = "0.6.0"
|
||||
author = "Status Research & Development GmbH"
|
||||
description = "DHT based on Eth discv5 implementation"
|
||||
license = "MIT"
|
||||
skipDirs = @["tests"]
|
||||
installFiles = @["build.nims"]
|
||||
|
||||
# Dependencies
|
||||
requires "nim >= 1.2.0"
|
||||
requires "secp256k1#2acbbdcc0e63002a013fff49f015708522875832" # >= 0.5.2 & < 0.6.0
|
||||
requires "protobuf_serialization" # >= 0.2.0 & < 0.3.0
|
||||
requires "nimcrypto == 0.5.4"
|
||||
requires "bearssl#head"
|
||||
requires "chronicles >= 0.10.2 & < 0.11.0"
|
||||
requires "chronos == 3.2.0" # >= 3.0.11 & < 3.1.0
|
||||
requires "libp2p#unstable"
|
||||
requires "metrics"
|
||||
requires "stew#head"
|
||||
requires "stint"
|
||||
requires "asynctest >= 0.3.1 & < 0.4.0"
|
||||
requires "https://github.com/codex-storage/nim-datastore#head"
|
||||
requires "questionable"
|
||||
requires "nim >= 2.2.4 & < 3.0.0"
|
||||
requires "secp256k1 >= 0.6.0 & < 0.7.0"
|
||||
requires "nimcrypto >= 0.6.2 & < 0.8.0"
|
||||
requires "bearssl >= 0.2.5 & < 0.3.0"
|
||||
requires "chronicles >= 0.11.2 & < 0.13.0"
|
||||
requires "chronos >= 4.0.4 & < 4.1.0"
|
||||
requires "libp2p >= 1.14.1 & < 2.0.0"
|
||||
requires "metrics >= 0.1.0 & < 0.2.0"
|
||||
requires "stew >= 0.4.2"
|
||||
requires "stint >= 0.8.1 & < 0.9.0"
|
||||
requires "https://github.com/logos-storage/nim-datastore >= 0.2.1 & < 0.3.0"
|
||||
requires "questionable >= 0.10.15 & < 0.11.0"
|
||||
requires "leveldbstatic >= 0.2.1 & < 0.3.0"
|
||||
|
||||
include "build.nims"
|
||||
task testAll, "Run all test suites":
|
||||
exec "nimble install -d -y"
|
||||
withDir "tests":
|
||||
exec "nimble testAll"
|
||||
|
||||
task test, "Run the test suite":
|
||||
exec "nimble install -d -y"
|
||||
withDir "tests":
|
||||
exec "nimble test"
|
||||
|
||||
task testPart1, "Run the test suite part 1":
|
||||
exec "nimble install -d -y"
|
||||
withDir "tests":
|
||||
exec "nimble testPart1"
|
||||
|
||||
task testPart2, "Run the test suite part 2":
|
||||
exec "nimble install -d -y"
|
||||
withDir "tests":
|
||||
exec "nimble testPart2"
|
||||
|
||||
@ -1,13 +1,13 @@
|
||||
import
|
||||
std/sugar,
|
||||
libp2p/crypto/[crypto, secp],
|
||||
stew/[byteutils, objects, results, ptrops]
|
||||
stew/[byteutils, objects, ptrops],
|
||||
results
|
||||
|
||||
# from secp256k1 import ecdh, SkEcdhSecretSize, toRaw, SkSecretKey, SkPublicKey
|
||||
import secp256k1
|
||||
|
||||
const
|
||||
KeyLength* = SkEcdhSecretSize
|
||||
KeyLength* = secp256k1.SkEcdhSecretSize
|
||||
## Ecdh shared secret key length without leading byte
|
||||
## (publicKey * privateKey).x, where length of x is 32 bytes
|
||||
|
||||
@ -25,12 +25,12 @@ type
|
||||
data*: array[FullKeyLength, byte]
|
||||
|
||||
proc fromHex*(T: type PrivateKey, data: string): Result[PrivateKey, cstring] =
|
||||
let skKey = ? SkPrivateKey.init(data).mapErr(e =>
|
||||
let skKey = ? secp.SkPrivateKey.init(data).mapErr(e =>
|
||||
("Failed to init private key from hex string: " & $e).cstring)
|
||||
ok PrivateKey.init(skKey)
|
||||
|
||||
proc fromHex*(T: type PublicKey, data: string): Result[PublicKey, cstring] =
|
||||
let skKey = ? SkPublicKey.init(data).mapErr(e =>
|
||||
let skKey = ? secp.SkPublicKey.init(data).mapErr(e =>
|
||||
("Failed to init public key from hex string: " & $e).cstring)
|
||||
ok PublicKey.init(skKey)
|
||||
|
||||
@ -45,14 +45,17 @@ proc ecdhSharedSecretHash(output: ptr byte, x32, y32: ptr byte, data: pointer):
|
||||
## Take the `x32` part as ecdh shared secret.
|
||||
## output length is derived from x32 length and taken from ecdh
|
||||
## generic parameter `KeyLength`
|
||||
copyMem(output, x32, SkEcdhSecretSize)
|
||||
copyMem(output, x32, KeyLength)
|
||||
return 1
|
||||
|
||||
func ecdhSharedSecret(seckey: SkPrivateKey, pubkey: secp.SkPublicKey): SharedSecret =
|
||||
## Compute ecdh agreed shared secret.
|
||||
let res = ecdh[SkEcdhSecretSize](secp256k1.SkSecretKey(seckey),
|
||||
secp256k1.SkPublicKey(pubkey),
|
||||
ecdhSharedSecretHash, nil)
|
||||
let res = secp256k1.ecdh[KeyLength](
|
||||
secp256k1.SkSecretKey(seckey),
|
||||
secp256k1.SkPublicKey(pubkey),
|
||||
ecdhSharedSecretHash,
|
||||
nil,
|
||||
)
|
||||
# This function only fail if the hash function return zero.
|
||||
# Because our hash function always success, we can turn the error into defect
|
||||
doAssert res.isOk, $res.error
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
# codex-dht - Codex DHT
|
||||
# logos-storage-dht - Logos Storage DHT
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
@ -11,19 +11,21 @@
|
||||
## https://github.com/ethereum/devp2p/blob/master/discv5/discv5-theory.md#sessions
|
||||
##
|
||||
|
||||
{.push raises: [Defect].}
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[hashes, net, options, sugar, tables],
|
||||
stew/endians2,
|
||||
bearssl/rand,
|
||||
chronicles,
|
||||
stew/[results, byteutils],
|
||||
stew/[byteutils],
|
||||
stint,
|
||||
libp2p/crypto/crypto as libp2p_crypto,
|
||||
libp2p/crypto/secp,
|
||||
libp2p/signed_envelope,
|
||||
metrics,
|
||||
nimcrypto,
|
||||
results,
|
||||
"."/[messages, messages_encoding, node, spr, hkdf, sessions],
|
||||
"."/crypto
|
||||
|
||||
@ -32,13 +34,16 @@ from stew/objects import checkedEnumAssign
|
||||
|
||||
export crypto
|
||||
|
||||
declareCounter discovery_session_lru_cache_hits, "Session LRU cache hits"
|
||||
declareCounter discovery_session_lru_cache_misses, "Session LRU cache misses"
|
||||
declareCounter discovery_session_decrypt_failures, "Session decrypt failures"
|
||||
declareCounter dht_session_lru_cache_hits, "Session LRU cache hits"
|
||||
declareCounter dht_session_lru_cache_misses, "Session LRU cache misses"
|
||||
declareCounter dht_session_decrypt_failures, "Session decrypt failures"
|
||||
|
||||
logScope:
|
||||
topics = "discv5"
|
||||
|
||||
type
|
||||
cipher = aes128
|
||||
|
||||
const
|
||||
version: uint16 = 1
|
||||
idSignatureText = "discovery v5 identity proof"
|
||||
@ -161,7 +166,7 @@ proc deriveKeys*(n1, n2: NodeId, priv: PrivateKey, pub: PublicKey,
|
||||
ok secrets
|
||||
|
||||
proc encryptGCM*(key: AesKey, nonce, pt, authData: openArray[byte]): seq[byte] =
|
||||
var ectx: GCM[aes128]
|
||||
var ectx: GCM[cipher]
|
||||
ectx.init(key, nonce, authData)
|
||||
result = newSeq[byte](pt.len + gcmTagSize)
|
||||
ectx.encrypt(pt, result)
|
||||
@ -174,7 +179,7 @@ proc decryptGCM*(key: AesKey, nonce, ct, authData: openArray[byte]):
|
||||
debug "cipher is missing tag", len = ct.len
|
||||
return
|
||||
|
||||
var dctx: GCM[aes128]
|
||||
var dctx: GCM[cipher]
|
||||
dctx.init(key, nonce, authData)
|
||||
var res = newSeq[byte](ct.len - gcmTagSize)
|
||||
var tag: array[gcmTagSize, byte]
|
||||
@ -188,7 +193,7 @@ proc decryptGCM*(key: AesKey, nonce, ct, authData: openArray[byte]):
|
||||
return some(res)
|
||||
|
||||
proc encryptHeader*(id: NodeId, iv, header: openArray[byte]): seq[byte] =
|
||||
var ectx: CTR[aes128]
|
||||
var ectx: CTR[cipher]
|
||||
ectx.init(id.toByteArrayBE().toOpenArray(0, 15), iv)
|
||||
result = newSeq[byte](header.len)
|
||||
ectx.encrypt(header, result)
|
||||
@ -200,7 +205,7 @@ proc hasHandshake*(c: Codec, key: HandshakeKey): bool =
|
||||
proc encodeStaticHeader*(flag: Flag, nonce: AESGCMNonce, authSize: int):
|
||||
seq[byte] =
|
||||
result.add(protocolId)
|
||||
result.add(version.toBytesBE())
|
||||
result.add(endians2.toBytesBE(version))
|
||||
result.add(byte(flag))
|
||||
result.add(nonce)
|
||||
# TODO: assert on authSize of > 2^16?
|
||||
@ -208,8 +213,9 @@ proc encodeStaticHeader*(flag: Flag, nonce: AESGCMNonce, authSize: int):
|
||||
|
||||
proc encodeMessagePacket*(rng: var HmacDrbgContext, c: var Codec,
|
||||
toId: NodeId, toAddr: Address, message: openArray[byte]):
|
||||
(seq[byte], AESGCMNonce) =
|
||||
(seq[byte], AESGCMNonce, bool) =
|
||||
var nonce: AESGCMNonce
|
||||
var haskey: bool
|
||||
hmacDrbgGenerate(rng, nonce) # Random AESGCM nonce
|
||||
var iv: array[ivSize, byte]
|
||||
hmacDrbgGenerate(rng, iv) # Random IV
|
||||
@ -225,10 +231,11 @@ proc encodeMessagePacket*(rng: var HmacDrbgContext, c: var Codec,
|
||||
|
||||
# message
|
||||
var messageEncrypted: seq[byte]
|
||||
var initiatorKey, recipientKey: AesKey
|
||||
if c.sessions.load(toId, toAddr, recipientKey, initiatorKey):
|
||||
var initiatorKey, recipientKey1, recipientKey2: AesKey
|
||||
if c.sessions.load(toId, toAddr, recipientKey1, recipientKey2, initiatorKey):
|
||||
haskey = true
|
||||
messageEncrypted = encryptGCM(initiatorKey, nonce, message, @iv & header)
|
||||
discovery_session_lru_cache_hits.inc()
|
||||
dht_session_lru_cache_hits.inc()
|
||||
else:
|
||||
# We might not have the node's keys if the handshake hasn't been performed
|
||||
# yet. That's fine, we send a random-packet and we will be responded with
|
||||
@ -237,10 +244,11 @@ proc encodeMessagePacket*(rng: var HmacDrbgContext, c: var Codec,
|
||||
# message. 16 bytes for the gcm tag and 4 bytes for ping with requestId of
|
||||
# 1 byte (e.g "01c20101"). Could increase to 27 for 8 bytes requestId in
|
||||
# case this must not look like a random packet.
|
||||
haskey = false
|
||||
var randomData: array[gcmTagSize + 4, byte]
|
||||
hmacDrbgGenerate(rng, randomData)
|
||||
messageEncrypted.add(randomData)
|
||||
discovery_session_lru_cache_misses.inc()
|
||||
dht_session_lru_cache_misses.inc()
|
||||
|
||||
let maskedHeader = encryptHeader(toId, iv, header)
|
||||
|
||||
@ -249,7 +257,7 @@ proc encodeMessagePacket*(rng: var HmacDrbgContext, c: var Codec,
|
||||
packet.add(maskedHeader)
|
||||
packet.add(messageEncrypted)
|
||||
|
||||
return (packet, nonce)
|
||||
return (packet, nonce, haskey)
|
||||
|
||||
proc encodeWhoareyouPacket*(rng: var HmacDrbgContext, c: var Codec,
|
||||
toId: NodeId, toAddr: Address, requestNonce: AESGCMNonce, recordSeq: uint64,
|
||||
@ -370,7 +378,7 @@ proc decodeHeader*(id: NodeId, iv, maskedHeader: openArray[byte]):
|
||||
DecodeResult[(StaticHeader, seq[byte])] =
|
||||
# No need to check staticHeader size as that is included in minimum packet
|
||||
# size check in decodePacket
|
||||
var ectx: CTR[aes128]
|
||||
var ectx: CTR[cipher]
|
||||
ectx.init(id.toByteArrayBE().toOpenArray(0, aesKeySize - 1), iv)
|
||||
# Decrypt static-header part of the header
|
||||
var staticHeader = newSeq[byte](staticHeaderSize)
|
||||
@ -419,26 +427,35 @@ proc decodeMessagePacket(c: var Codec, fromAddr: Address, nonce: AESGCMNonce,
|
||||
let srcId = NodeId.fromBytesBE(header.toOpenArray(staticHeaderSize,
|
||||
header.high))
|
||||
|
||||
var initiatorKey, recipientKey: AesKey
|
||||
if not c.sessions.load(srcId, fromAddr, recipientKey, initiatorKey):
|
||||
var initiatorKey, recipientKey1, recipientKey2: AesKey
|
||||
if not c.sessions.load(srcId, fromAddr, recipientKey1, recipientKey2, initiatorKey):
|
||||
# Don't consider this an error, simply haven't done a handshake yet or
|
||||
# the session got removed.
|
||||
trace "Decrypting failed (no keys)"
|
||||
discovery_session_lru_cache_misses.inc()
|
||||
dht_session_lru_cache_misses.inc()
|
||||
return ok(Packet(flag: Flag.OrdinaryMessage, requestNonce: nonce,
|
||||
srcId: srcId))
|
||||
|
||||
discovery_session_lru_cache_hits.inc()
|
||||
dht_session_lru_cache_hits.inc()
|
||||
|
||||
let pt = decryptGCM(recipientKey, nonce, ct, @iv & @header)
|
||||
var pt = decryptGCM(recipientKey2, nonce, ct, @iv & @header)
|
||||
if pt.isNone():
|
||||
# Don't consider this an error, the session got probably removed at the
|
||||
# peer's side and a random message is send.
|
||||
trace "Decrypting failed (invalid keys)"
|
||||
c.sessions.del(srcId, fromAddr)
|
||||
discovery_session_decrypt_failures.inc()
|
||||
return ok(Packet(flag: Flag.OrdinaryMessage, requestNonce: nonce,
|
||||
srcId: srcId))
|
||||
trace "Decrypting failed, trying other key"
|
||||
pt = decryptGCM(recipientKey1, nonce, ct, @iv & @header)
|
||||
if pt.isNone():
|
||||
# Don't consider this an error, the session got probably removed at the
|
||||
# peer's side and a random message is send.
|
||||
# This might also be a cross-connect. Not deleting key, as it might be
|
||||
# needed later, depending on message order.
|
||||
trace "Decrypting failed (invalid keys)", address = fromAddr
|
||||
#c.sessions.del(srcId, fromAddr)
|
||||
dht_session_decrypt_failures.inc()
|
||||
return ok(Packet(flag: Flag.OrdinaryMessage, requestNonce: nonce,
|
||||
srcId: srcId))
|
||||
|
||||
# Most probably the same decryption key will work next time. We should
|
||||
# elevate it's priority.
|
||||
c.sessions.swapr(srcId, fromAddr)
|
||||
|
||||
let message = ? decodeMessage(pt.get())
|
||||
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
# codex-dht - Codex DHT
|
||||
# logos-storage-dht - Logos Storage DHT
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
@ -15,7 +15,7 @@
|
||||
## To select the right address, a majority count is done. This is done over a
|
||||
## sort of moving window as votes expire after `IpVoteTimeout`.
|
||||
|
||||
{.push raises: [Defect].}
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[tables, options],
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
import std/[tables, lists, options]
|
||||
|
||||
{.push raises: [Defect].}
|
||||
{.push raises: [].}
|
||||
|
||||
export tables, lists, options
|
||||
|
||||
@ -55,3 +55,10 @@ iterator items*[K, V](lru: LRUCache[K, V]): V =
|
||||
|
||||
for item in lru.list:
|
||||
yield item[1]
|
||||
|
||||
iterator keys*[K, V](lru: LRUCache[K, V]): K =
|
||||
## Get cached keys - this doesn't touch the cache
|
||||
##
|
||||
|
||||
for item in lru.table.keys:
|
||||
yield item
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
# codex-dht - Codex DHT
|
||||
# logos-storage-dht - Logos Storage DHT
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
@ -10,7 +10,7 @@
|
||||
## These messages get protobuf encoded, while in the spec they get RLP encoded.
|
||||
##
|
||||
|
||||
{.push raises: [Defect].}
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[hashes, net],
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
# codex-dht - Codex DHT
|
||||
# logos-storage-dht - Logos Storage DHT
|
||||
# Copyright (c) 2020-2022 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
@ -11,8 +11,10 @@
|
||||
import
|
||||
std/net,
|
||||
chronicles,
|
||||
stew/endians2,
|
||||
libp2p/routing_record,
|
||||
libp2p/signed_envelope,
|
||||
libp2p/protobuf/minprotobuf,
|
||||
"."/[messages, spr, node],
|
||||
../../../../dht/providers_encoding
|
||||
|
||||
@ -98,7 +100,7 @@ proc getField*(pb: ProtoBuffer, field: int,
|
||||
if not(res):
|
||||
ok(false)
|
||||
else:
|
||||
family = uint8.fromBytesBE(buffer).IpAddressFamily
|
||||
family = endians2.fromBytesBE(uint8, buffer).IpAddressFamily
|
||||
ok(true)
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, family: IpAddressFamily) =
|
||||
@ -324,7 +326,7 @@ proc encodeMessage*[T: SomeMessage](p: T, reqId: RequestId): seq[byte] =
|
||||
pb.write(2, encoded)
|
||||
pb.finish()
|
||||
result.add(pb.buffer)
|
||||
trace "Encoded protobuf message", typ = $T, encoded
|
||||
trace "Encoded protobuf message", typ = $T
|
||||
|
||||
proc decodeMessage*(body: openArray[byte]): DecodeResult[Message] =
|
||||
## Decodes to the specific `Message` type.
|
||||
|
||||
@ -1,40 +1,51 @@
|
||||
# codex-dht - Codex DHT
|
||||
# logos-storage-dht - Logos Storage DHT
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [Defect].}
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/hashes,
|
||||
std/[hashes, net],
|
||||
bearssl/rand,
|
||||
chronicles,
|
||||
chronos,
|
||||
nimcrypto,
|
||||
stew/shims/net,
|
||||
stint,
|
||||
./crypto,
|
||||
./spr
|
||||
|
||||
export stint
|
||||
|
||||
const
|
||||
avgSmoothingFactor = 0.9
|
||||
seenSmoothingFactor = 0.9
|
||||
|
||||
type
|
||||
NodeId* = UInt256
|
||||
|
||||
Address* = object
|
||||
ip*: ValidIpAddress
|
||||
ip*: IpAddress
|
||||
port*: Port
|
||||
|
||||
Stats* = object
|
||||
rttMin*: float #millisec
|
||||
rttAvg*: float #millisec
|
||||
bwAvg*: float #bps
|
||||
bwMax*: float #bps
|
||||
|
||||
Node* = ref object
|
||||
id*: NodeId
|
||||
pubkey*: PublicKey
|
||||
address*: Option[Address]
|
||||
record*: SignedPeerRecord
|
||||
seen*: bool ## Indicates if there was at least one successful
|
||||
seen*: float ## Indicates if there was at least one successful
|
||||
## request-response with this node, or if the nde was verified
|
||||
## through the underlying transport mechanisms.
|
||||
## through the underlying transport mechanisms. After first contact
|
||||
## it tracks how reliable is the communication with the node.
|
||||
stats*: Stats # traffic measurements and statistics
|
||||
|
||||
func toNodeId*(pid: PeerId): NodeId =
|
||||
## Convert public key to a node identifier.
|
||||
@ -57,7 +68,7 @@ func newNode*(
|
||||
id: ? pk.toNodeId(),
|
||||
pubkey: pk,
|
||||
record: record,
|
||||
address: Address(ip: ValidIpAddress.init(ip), port: port).some)
|
||||
address: Address(ip: ip, port: port).some)
|
||||
|
||||
ok node
|
||||
|
||||
@ -77,7 +88,9 @@ func newNode*(r: SignedPeerRecord): Result[Node, cstring] =
|
||||
nodeId = ? pk.get().toNodeId()
|
||||
|
||||
if r.ip.isSome() and r.udp.isSome():
|
||||
let a = Address(ip: ipv4(r.ip.get()), port: Port(r.udp.get()))
|
||||
let a = Address(
|
||||
ip: IpAddress(family: IPv4, address_v4: r.ip.get()), port: Port(r.udp.get())
|
||||
)
|
||||
|
||||
ok(Node(
|
||||
id: nodeId,
|
||||
@ -91,7 +104,7 @@ func newNode*(r: SignedPeerRecord): Result[Node, cstring] =
|
||||
record: r,
|
||||
address: none(Address)))
|
||||
|
||||
proc update*(n: Node, pk: PrivateKey, ip: Option[ValidIpAddress],
|
||||
proc update*(n: Node, pk: PrivateKey, ip: Option[IpAddress],
|
||||
tcpPort, udpPort: Option[Port] = none[Port]()): Result[void, cstring] =
|
||||
? n.record.update(pk, ip, tcpPort, udpPort)
|
||||
|
||||
@ -135,14 +148,14 @@ func shortLog*(id: NodeId): string =
|
||||
result = sid
|
||||
else:
|
||||
result = newStringOfCap(10)
|
||||
for i in 0..<2:
|
||||
for i in 0..<3:
|
||||
result.add(sid[i])
|
||||
result.add("*")
|
||||
for i in (len(sid) - 6)..sid.high:
|
||||
result.add(sid[i])
|
||||
chronicles.formatIt(NodeId): shortLog(it)
|
||||
|
||||
func hash*(ip: ValidIpAddress): Hash =
|
||||
func hash*(ip: IpAddress): Hash =
|
||||
case ip.family
|
||||
of IpAddressFamily.IPv6: hash(ip.address_v6)
|
||||
of IpAddressFamily.IPv4: hash(ip.address_v4)
|
||||
@ -177,3 +190,38 @@ func shortLog*(nodes: seq[Node]): string =
|
||||
|
||||
result.add("]")
|
||||
chronicles.formatIt(seq[Node]): shortLog(it)
|
||||
|
||||
func shortLog*(address: Address): string =
|
||||
$address
|
||||
|
||||
chronicles.formatIt(Address): shortLog(it)
|
||||
|
||||
func registerSeen*(n:Node, seen = true) =
|
||||
## Register event of seeing (getting message from) or not seeing (missing message) node
|
||||
## Note: interpretation might depend on NAT type
|
||||
if n.seen == 0: # first time seeing the node
|
||||
n.seen = 1
|
||||
else:
|
||||
n.seen = seenSmoothingFactor * n.seen + (1.0 - seenSmoothingFactor) * seen.float
|
||||
|
||||
func alreadySeen*(n:Node) : bool =
|
||||
## Was the node seen at least once?
|
||||
n.seen > 0
|
||||
|
||||
# collecting performane metrics
|
||||
func registerRtt*(n: Node, rtt: Duration) =
|
||||
## register an RTT measurement
|
||||
let rttMs = rtt.nanoseconds.float / 1e6
|
||||
n.stats.rttMin =
|
||||
if n.stats.rttMin == 0: rttMs
|
||||
else: min(n.stats.rttMin, rttMs)
|
||||
n.stats.rttAvg =
|
||||
if n.stats.rttAvg == 0: rttMs
|
||||
else: avgSmoothingFactor * n.stats.rttAvg + (1.0 - avgSmoothingFactor) * rttMs
|
||||
|
||||
func registerBw*(n: Node, bw: float) =
|
||||
## register an bandwidth measurement
|
||||
n.stats.bwMax = max(n.stats.bwMax, bw)
|
||||
n.stats.bwAvg =
|
||||
if n.stats.bwAvg == 0: bw
|
||||
else: avgSmoothingFactor * n.stats.bwAvg + (1.0 - avgSmoothingFactor) * bw
|
||||
|
||||
@ -1,8 +1,8 @@
|
||||
{.push raises: [Defect].}
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[sets, options],
|
||||
stew/results, stew/shims/net, chronicles, chronos,
|
||||
std/[net, sets, options],
|
||||
results, chronicles, chronos,
|
||||
"."/[node, spr, routing_table]
|
||||
|
||||
logScope:
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
# codex-dht - Codex DHT
|
||||
# logos-storage-dht - Logos Storage DHT
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
@ -71,18 +71,18 @@
|
||||
## more requests will be needed for a lookup (adding bandwidth and latency).
|
||||
## This might be a concern for mobile devices.
|
||||
|
||||
{.push raises: [Defect].}
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[tables, sets, options, math, sequtils, algorithm, strutils],
|
||||
stew/shims/net as stewNet,
|
||||
std/[net, tables, sets, options, math, sequtils, algorithm, strutils],
|
||||
json_serialization/std/net,
|
||||
stew/[base64, endians2, results],
|
||||
stew/[base64, endians2],
|
||||
pkg/[chronicles, chronicles/chronos_tools],
|
||||
pkg/chronos,
|
||||
pkg/stint,
|
||||
pkg/bearssl/rand,
|
||||
pkg/metrics
|
||||
pkg/metrics,
|
||||
pkg/results
|
||||
|
||||
import "."/[
|
||||
messages,
|
||||
@ -100,13 +100,13 @@ import nimcrypto except toHex
|
||||
|
||||
export options, results, node, spr, providers
|
||||
|
||||
declareCounter discovery_message_requests_outgoing,
|
||||
declareCounter dht_message_requests_outgoing,
|
||||
"Discovery protocol outgoing message requests", labels = ["response"]
|
||||
declareCounter discovery_message_requests_incoming,
|
||||
declareCounter dht_message_requests_incoming,
|
||||
"Discovery protocol incoming message requests", labels = ["response"]
|
||||
declareCounter discovery_unsolicited_messages,
|
||||
declareCounter dht_unsolicited_messages,
|
||||
"Discovery protocol unsolicited or timed-out messages"
|
||||
declareCounter discovery_enr_auto_update,
|
||||
declareCounter dht_enr_auto_update,
|
||||
"Amount of discovery IP:port address SPR auto updates"
|
||||
|
||||
logScope:
|
||||
@ -117,6 +117,7 @@ const
|
||||
LookupRequestLimit = 3 ## Amount of distances requested in a single Findnode
|
||||
## message for a lookup or query
|
||||
FindNodeResultLimit = 16 ## Maximum amount of SPRs in the total Nodes messages
|
||||
FindNodeFastResultLimit = 6 ## Maximum amount of SPRs in response to findNodeFast
|
||||
## that will be processed
|
||||
MaxNodesPerMessage = 3 ## Maximum amount of SPRs per individual Nodes message
|
||||
RefreshInterval = 5.minutes ## Interval of launching a random query to
|
||||
@ -125,12 +126,17 @@ const
|
||||
RevalidateMax = 10000 ## Revalidation of a peer is done between min and max milliseconds.
|
||||
## value in milliseconds
|
||||
IpMajorityInterval = 5.minutes ## Interval for checking the latest IP:Port
|
||||
DebugPrintInterval = 5.minutes ## Interval to print neighborhood with stats
|
||||
## majority and updating this when SPR auto update is set.
|
||||
InitialLookups = 1 ## Amount of lookups done when populating the routing table
|
||||
ResponseTimeout* = 4.seconds ## timeout for the response of a request-response
|
||||
ResponseTimeout* = 1.seconds ## timeout for the response of a request-response
|
||||
MaxProvidersEntries* = 1_000_000 # one million records
|
||||
MaxProvidersPerEntry* = 20 # providers per entry
|
||||
## call
|
||||
FindnodeSeenThreshold = 1.0 ## threshold used as findnode response filter
|
||||
LookupSeenThreshold = 0.0 ## threshold used for lookup nodeset selection
|
||||
QuerySeenThreshold = 0.0 ## threshold used for query nodeset selection
|
||||
NoreplyRemoveThreshold = 0.5 ## remove node on no reply if 'seen' is below this value
|
||||
|
||||
func shortLog*(record: SignedPeerRecord): string =
|
||||
## Returns compact string representation of ``SignedPeerRecord``.
|
||||
@ -166,6 +172,7 @@ type
|
||||
refreshLoop: Future[void]
|
||||
revalidateLoop: Future[void]
|
||||
ipMajorityLoop: Future[void]
|
||||
debugPrintLoop: Future[void]
|
||||
lastLookup: chronos.Moment
|
||||
bootstrapRecords*: seq[SignedPeerRecord]
|
||||
ipVote: IpVote
|
||||
@ -182,6 +189,9 @@ type
|
||||
|
||||
DiscResult*[T] = Result[T, cstring]
|
||||
|
||||
func `$`*(p: Protocol): string =
|
||||
$p.localNode.id
|
||||
|
||||
const
|
||||
defaultDiscoveryConfig* = DiscoveryConfig(
|
||||
tableIpLimits: DefaultTableIpLimits,
|
||||
@ -231,7 +241,7 @@ proc randomNodes*(d: Protocol, maxAmount: int): seq[Node] =
|
||||
d.routingTable.randomNodes(maxAmount)
|
||||
|
||||
proc randomNodes*(d: Protocol, maxAmount: int,
|
||||
pred: proc(x: Node): bool {.gcsafe, noSideEffect.}): seq[Node] =
|
||||
pred: proc(x: Node): bool {.gcsafe, noSideEffect, raises: [].}): seq[Node] =
|
||||
## Get a `maxAmount` of random nodes from the local routing table with the
|
||||
## `pred` predicate function applied as filter on the nodes selected.
|
||||
d.routingTable.randomNodes(maxAmount, pred)
|
||||
@ -243,14 +253,14 @@ proc randomNodes*(d: Protocol, maxAmount: int,
|
||||
d.randomNodes(maxAmount, proc(x: Node): bool = x.record.contains(enrField))
|
||||
|
||||
proc neighbours*(d: Protocol, id: NodeId, k: int = BUCKET_SIZE,
|
||||
seenOnly = false): seq[Node] =
|
||||
seenThreshold = 0.0): seq[Node] =
|
||||
## Return up to k neighbours (closest node ids) of the given node id.
|
||||
d.routingTable.neighbours(id, k, seenOnly)
|
||||
d.routingTable.neighbours(id, k, seenThreshold)
|
||||
|
||||
proc neighboursAtDistances*(d: Protocol, distances: seq[uint16],
|
||||
k: int = BUCKET_SIZE, seenOnly = false): seq[Node] =
|
||||
k: int = BUCKET_SIZE, seenThreshold = 0.0): seq[Node] =
|
||||
## Return up to k neighbours (closest node ids) at given distances.
|
||||
d.routingTable.neighboursAtDistances(distances, k, seenOnly)
|
||||
d.routingTable.neighboursAtDistances(distances, k, seenThreshold)
|
||||
|
||||
proc nodesDiscovered*(d: Protocol): int = d.routingTable.len
|
||||
|
||||
@ -272,7 +282,7 @@ proc updateRecord*(
|
||||
newSpr = spr.get()
|
||||
seqNo = d.localNode.record.seqNum
|
||||
|
||||
info "Updated discovery SPR", uri = newSpr.toURI()
|
||||
info "Updated discovery SPR", uri = newSpr.toURI(), newSpr = newSpr.data
|
||||
|
||||
d.localNode.record = newSpr
|
||||
d.localNode.record.data.seqNo = seqNo
|
||||
@ -338,7 +348,7 @@ proc handleFindNode(d: Protocol, fromId: NodeId, fromAddr: Address,
|
||||
# TODO: Still deduplicate also?
|
||||
if fn.distances.all(proc (x: uint16): bool = return x <= 256):
|
||||
d.sendNodes(fromId, fromAddr, reqId,
|
||||
d.routingTable.neighboursAtDistances(fn.distances, seenOnly = true))
|
||||
d.routingTable.neighboursAtDistances(fn.distances, FindNodeResultLimit, FindnodeSeenThreshold))
|
||||
else:
|
||||
# At least one invalid distance, but the polite node we are, still respond
|
||||
# with empty nodes.
|
||||
@ -347,7 +357,7 @@ proc handleFindNode(d: Protocol, fromId: NodeId, fromAddr: Address,
|
||||
proc handleFindNodeFast(d: Protocol, fromId: NodeId, fromAddr: Address,
|
||||
fnf: FindNodeFastMessage, reqId: RequestId) =
|
||||
d.sendNodes(fromId, fromAddr, reqId,
|
||||
d.routingTable.neighbours(fnf.target, seenOnly = true))
|
||||
d.routingTable.neighbours(fnf.target, FindNodeFastResultLimit, FindnodeSeenThreshold))
|
||||
# TODO: if known, maybe we should add exact target even if not yet "seen"
|
||||
|
||||
proc handleTalkReq(d: Protocol, fromId: NodeId, fromAddr: Address,
|
||||
@ -369,7 +379,7 @@ proc handleTalkReq(d: Protocol, fromId: NodeId, fromAddr: Address,
|
||||
|
||||
proc addProviderLocal(p: Protocol, cId: NodeId, prov: SignedPeerRecord) {.async.} =
|
||||
trace "adding provider to local db", n = p.localNode, cId, prov
|
||||
if (let res = (await p.providers.add(cid, prov)); res.isErr):
|
||||
if (let res = (await p.providers.add(cId, prov)); res.isErr):
|
||||
trace "Unable to add provider", cid, peerId = prov.data.peerId
|
||||
|
||||
proc handleAddProvider(
|
||||
@ -403,27 +413,27 @@ proc handleMessage(d: Protocol, srcId: NodeId, fromAddr: Address,
|
||||
message: Message) =
|
||||
case message.kind
|
||||
of ping:
|
||||
discovery_message_requests_incoming.inc()
|
||||
dht_message_requests_incoming.inc()
|
||||
d.handlePing(srcId, fromAddr, message.ping, message.reqId)
|
||||
of findNode:
|
||||
discovery_message_requests_incoming.inc()
|
||||
dht_message_requests_incoming.inc()
|
||||
d.handleFindNode(srcId, fromAddr, message.findNode, message.reqId)
|
||||
of findNodeFast:
|
||||
discovery_message_requests_incoming.inc()
|
||||
dht_message_requests_incoming.inc()
|
||||
d.handleFindNodeFast(srcId, fromAddr, message.findNodeFast, message.reqId)
|
||||
of talkReq:
|
||||
discovery_message_requests_incoming.inc()
|
||||
dht_message_requests_incoming.inc()
|
||||
d.handleTalkReq(srcId, fromAddr, message.talkReq, message.reqId)
|
||||
of addProvider:
|
||||
discovery_message_requests_incoming.inc()
|
||||
discovery_message_requests_incoming.inc(labelValues = ["no_response"])
|
||||
dht_message_requests_incoming.inc()
|
||||
dht_message_requests_incoming.inc(labelValues = ["no_response"])
|
||||
d.handleAddProvider(srcId, fromAddr, message.addProvider, message.reqId)
|
||||
of getProviders:
|
||||
discovery_message_requests_incoming.inc()
|
||||
dht_message_requests_incoming.inc()
|
||||
asyncSpawn d.handleGetProviders(srcId, fromAddr, message.getProviders, message.reqId)
|
||||
of regTopic, topicQuery:
|
||||
discovery_message_requests_incoming.inc()
|
||||
discovery_message_requests_incoming.inc(labelValues = ["no_response"])
|
||||
dht_message_requests_incoming.inc()
|
||||
dht_message_requests_incoming.inc(labelValues = ["no_response"])
|
||||
trace "Received unimplemented message kind", kind = message.kind,
|
||||
origin = fromAddr
|
||||
else:
|
||||
@ -431,7 +441,7 @@ proc handleMessage(d: Protocol, srcId: NodeId, fromAddr: Address,
|
||||
if d.awaitedMessages.take((srcId, message.reqId), waiter):
|
||||
waiter.complete(some(message))
|
||||
else:
|
||||
discovery_unsolicited_messages.inc()
|
||||
dht_unsolicited_messages.inc()
|
||||
trace "Timed out or unrequested message", kind = message.kind,
|
||||
origin = fromAddr
|
||||
|
||||
@ -443,27 +453,50 @@ proc registerTalkProtocol*(d: Protocol, protocolId: seq[byte],
|
||||
else:
|
||||
ok()
|
||||
|
||||
proc replaceNode(d: Protocol, n: Node) =
|
||||
proc replaceNode(d: Protocol, n: Node, forceRemoveBelow = 1.0) =
|
||||
if n.record notin d.bootstrapRecords:
|
||||
d.routingTable.replaceNode(n)
|
||||
d.routingTable.replaceNode(n, forceRemoveBelow)
|
||||
else:
|
||||
# For now we never remove bootstrap nodes. It might make sense to actually
|
||||
# do so and to retry them only in case we drop to a really low amount of
|
||||
# peers in the routing table.
|
||||
debug "Message request to bootstrap node failed", src=d.localNode, dst=n
|
||||
|
||||
proc sendRequest*[T: SomeMessage](d: Protocol, toNode: Node, m: T,
|
||||
reqId: RequestId) =
|
||||
doAssert(toNode.address.isSome())
|
||||
let
|
||||
message = encodeMessage(m, reqId)
|
||||
|
||||
proc waitMessage(d: Protocol, fromNode: Node, reqId: RequestId):
|
||||
trace "Send message packet", dstId = toNode.id,
|
||||
address = toNode.address, kind = messageKind(T)
|
||||
dht_message_requests_outgoing.inc()
|
||||
|
||||
d.transport.sendMessage(toNode, message)
|
||||
|
||||
proc waitResponse*[T: SomeMessage](d: Protocol, node: Node, msg: T):
|
||||
Future[Option[Message]] =
|
||||
let reqId = RequestId.init(d.rng[])
|
||||
result = d.waitMessage(node, reqId)
|
||||
sendRequest(d, node, msg, reqId)
|
||||
|
||||
proc waitMessage(d: Protocol, fromNode: Node, reqId: RequestId, timeout = ResponseTimeout):
|
||||
Future[Option[Message]] =
|
||||
result = newFuture[Option[Message]]("waitMessage")
|
||||
let res = result
|
||||
let key = (fromNode.id, reqId)
|
||||
sleepAsync(ResponseTimeout).addCallback() do(data: pointer):
|
||||
sleepAsync(timeout).addCallback() do(data: pointer):
|
||||
d.awaitedMessages.del(key)
|
||||
if not res.finished:
|
||||
res.complete(none(Message))
|
||||
d.awaitedMessages[key] = result
|
||||
|
||||
proc waitNodeResponses*[T: SomeMessage](d: Protocol, node: Node, msg: T):
|
||||
Future[DiscResult[seq[SignedPeerRecord]]] =
|
||||
let reqId = RequestId.init(d.rng[])
|
||||
result = d.waitNodes(node, reqId)
|
||||
sendRequest(d, node, msg, reqId)
|
||||
|
||||
proc waitNodes(d: Protocol, fromNode: Node, reqId: RequestId):
|
||||
Future[DiscResult[seq[SignedPeerRecord]]] {.async.} =
|
||||
## Wait for one or more nodes replies.
|
||||
@ -472,72 +505,70 @@ proc waitNodes(d: Protocol, fromNode: Node, reqId: RequestId):
|
||||
## on that, more replies will be awaited.
|
||||
## If one reply is lost here (timed out), others are ignored too.
|
||||
## Same counts for out of order receival.
|
||||
let startTime = Moment.now()
|
||||
var op = await d.waitMessage(fromNode, reqId)
|
||||
if op.isSome:
|
||||
if op.get.kind == MessageKind.nodes:
|
||||
var res = op.get.nodes.sprs
|
||||
let total = op.get.nodes.total
|
||||
let
|
||||
total = op.get.nodes.total
|
||||
firstTime = Moment.now()
|
||||
rtt = firstTime - startTime
|
||||
# trace "nodes RTT:", rtt, node = fromNode
|
||||
fromNode.registerRtt(rtt)
|
||||
for i in 1 ..< total:
|
||||
op = await d.waitMessage(fromNode, reqId)
|
||||
if op.isSome and op.get.kind == MessageKind.nodes:
|
||||
res.add(op.get.nodes.sprs)
|
||||
# Estimate bandwidth based on UDP packet train received, assuming these were
|
||||
# released fast and spaced in time by bandwidth bottleneck. This is just a rough
|
||||
# packet-pair based estimate, far from being perfect.
|
||||
# TODO: get message size from lower layer for better bandwidth estimate
|
||||
# TODO: get better reception timestamp from lower layers
|
||||
let
|
||||
deltaT = Moment.now() - firstTime
|
||||
bwBps = 500.0 * 8.0 / (deltaT.nanoseconds.float / i.float / 1e9)
|
||||
# trace "bw estimate:", deltaT = deltaT, i, bw_mbps = bwBps / 1e6, node = fromNode
|
||||
fromNode.registerBw(bwBps)
|
||||
else:
|
||||
# No error on this as we received some nodes.
|
||||
break
|
||||
return ok(res)
|
||||
else:
|
||||
discovery_message_requests_outgoing.inc(labelValues = ["invalid_response"])
|
||||
dht_message_requests_outgoing.inc(labelValues = ["invalid_response"])
|
||||
return err("Invalid response to find node message")
|
||||
else:
|
||||
discovery_message_requests_outgoing.inc(labelValues = ["no_response"])
|
||||
dht_message_requests_outgoing.inc(labelValues = ["no_response"])
|
||||
return err("Nodes message not received in time")
|
||||
|
||||
proc sendRequest*[T: SomeMessage](d: Protocol, toId: NodeId, toAddr: Address, m: T):
|
||||
RequestId =
|
||||
let
|
||||
reqId = RequestId.init(d.rng[])
|
||||
message = encodeMessage(m, reqId)
|
||||
|
||||
trace "Send message packet", dstId = toId, toAddr, kind = messageKind(T)
|
||||
discovery_message_requests_outgoing.inc()
|
||||
|
||||
d.transport.sendMessage(toId, toAddr, message)
|
||||
return reqId
|
||||
|
||||
proc sendRequest*[T: SomeMessage](d: Protocol, toNode: Node, m: T):
|
||||
RequestId =
|
||||
doAssert(toNode.address.isSome())
|
||||
let
|
||||
reqId = RequestId.init(d.rng[])
|
||||
message = encodeMessage(m, reqId)
|
||||
|
||||
trace "Send message packet", dstId = toNode.id,
|
||||
address = toNode.address, kind = messageKind(T)
|
||||
discovery_message_requests_outgoing.inc()
|
||||
|
||||
d.transport.sendMessage(toNode, message)
|
||||
return reqId
|
||||
|
||||
proc ping*(d: Protocol, toNode: Node):
|
||||
Future[DiscResult[PongMessage]] {.async.} =
|
||||
## Send a discovery ping message.
|
||||
##
|
||||
## Returns the received pong message or an error.
|
||||
let reqId = d.sendRequest(toNode,
|
||||
PingMessage(sprSeq: d.localNode.record.seqNum))
|
||||
let resp = await d.waitMessage(toNode, reqId)
|
||||
let
|
||||
msg = PingMessage(sprSeq: d.localNode.record.seqNum)
|
||||
startTime = Moment.now()
|
||||
resp = await d.waitResponse(toNode, msg)
|
||||
rtt = Moment.now() - startTime
|
||||
# trace "ping RTT:", rtt, node = toNode
|
||||
toNode.registerRtt(rtt)
|
||||
|
||||
d.routingTable.setJustSeen(toNode, resp.isSome())
|
||||
if resp.isSome():
|
||||
if resp.get().kind == pong:
|
||||
d.routingTable.setJustSeen(toNode)
|
||||
return ok(resp.get().pong)
|
||||
else:
|
||||
d.replaceNode(toNode)
|
||||
discovery_message_requests_outgoing.inc(labelValues = ["invalid_response"])
|
||||
dht_message_requests_outgoing.inc(labelValues = ["invalid_response"])
|
||||
return err("Invalid response to ping message")
|
||||
else:
|
||||
d.replaceNode(toNode)
|
||||
discovery_message_requests_outgoing.inc(labelValues = ["no_response"])
|
||||
# A ping (or the pong) was lost, what should we do? Previous implementation called
|
||||
# d.replaceNode(toNode) immediately, which removed the node. This is too aggressive,
|
||||
# especially if we have a temporary network outage. Although bootstrap nodes are protected
|
||||
# from being removed, everything else would slowly be removed.
|
||||
d.replaceNode(toNode, NoreplyRemoveThreshold)
|
||||
dht_message_requests_outgoing.inc(labelValues = ["no_response"])
|
||||
return err("Pong message not received in time")
|
||||
|
||||
proc findNode*(d: Protocol, toNode: Node, distances: seq[uint16]):
|
||||
@ -546,12 +577,13 @@ proc findNode*(d: Protocol, toNode: Node, distances: seq[uint16]):
|
||||
##
|
||||
## Returns the received nodes or an error.
|
||||
## Received SPRs are already validated and converted to `Node`.
|
||||
let reqId = d.sendRequest(toNode, FindNodeMessage(distances: distances))
|
||||
let nodes = await d.waitNodes(toNode, reqId)
|
||||
let
|
||||
msg = FindNodeMessage(distances: distances)
|
||||
nodes = await d.waitNodeResponses(toNode, msg)
|
||||
|
||||
d.routingTable.setJustSeen(toNode, nodes.isOk)
|
||||
if nodes.isOk:
|
||||
let res = verifyNodesRecords(nodes.get(), toNode, FindNodeResultLimit, distances)
|
||||
d.routingTable.setJustSeen(toNode)
|
||||
return ok(res)
|
||||
else:
|
||||
trace "findNode nodes not OK."
|
||||
@ -564,12 +596,13 @@ proc findNodeFast*(d: Protocol, toNode: Node, target: NodeId):
|
||||
##
|
||||
## Returns the received nodes or an error.
|
||||
## Received SPRs are already validated and converted to `Node`.
|
||||
let reqId = d.sendRequest(toNode, FindNodeFastMessage(target: target))
|
||||
let nodes = await d.waitNodes(toNode, reqId)
|
||||
let
|
||||
msg = FindNodeFastMessage(target: target)
|
||||
nodes = await d.waitNodeResponses(toNode, msg)
|
||||
|
||||
d.routingTable.setJustSeen(toNode, nodes.isOk)
|
||||
if nodes.isOk:
|
||||
let res = verifyNodesRecords(nodes.get(), toNode, FindNodeResultLimit)
|
||||
d.routingTable.setJustSeen(toNode)
|
||||
let res = verifyNodesRecords(nodes.get(), toNode, FindNodeFastResultLimit)
|
||||
return ok(res)
|
||||
else:
|
||||
d.replaceNode(toNode)
|
||||
@ -581,21 +614,26 @@ proc talkReq*(d: Protocol, toNode: Node, protocol, request: seq[byte]):
|
||||
## Send a discovery talkreq message.
|
||||
##
|
||||
## Returns the received talkresp message or an error.
|
||||
let reqId = d.sendRequest(toNode,
|
||||
TalkReqMessage(protocol: protocol, request: request))
|
||||
let resp = await d.waitMessage(toNode, reqId)
|
||||
let
|
||||
msg = TalkReqMessage(protocol: protocol, request: request)
|
||||
startTime = Moment.now()
|
||||
resp = await d.waitResponse(toNode, msg)
|
||||
rtt = Moment.now() - startTime
|
||||
# trace "talk RTT:", rtt, node = toNode
|
||||
toNode.registerRtt(rtt)
|
||||
|
||||
d.routingTable.setJustSeen(toNode, resp.isSome())
|
||||
if resp.isSome():
|
||||
if resp.get().kind == talkResp:
|
||||
d.routingTable.setJustSeen(toNode)
|
||||
return ok(resp.get().talkResp.response)
|
||||
else:
|
||||
d.replaceNode(toNode)
|
||||
discovery_message_requests_outgoing.inc(labelValues = ["invalid_response"])
|
||||
dht_message_requests_outgoing.inc(labelValues = ["invalid_response"])
|
||||
return err("Invalid response to talk request message")
|
||||
else:
|
||||
d.replaceNode(toNode)
|
||||
discovery_message_requests_outgoing.inc(labelValues = ["no_response"])
|
||||
# remove on loss only if there is a replacement
|
||||
d.replaceNode(toNode, NoreplyRemoveThreshold)
|
||||
dht_message_requests_outgoing.inc(labelValues = ["no_response"])
|
||||
return err("Talk response message not received in time")
|
||||
|
||||
proc lookupDistances*(target, dest: NodeId): seq[uint16] =
|
||||
@ -610,25 +648,18 @@ proc lookupDistances*(target, dest: NodeId): seq[uint16] =
|
||||
result.add(td - uint16(i))
|
||||
inc i
|
||||
|
||||
proc lookupWorker(d: Protocol, destNode: Node, target: NodeId):
|
||||
proc lookupWorker(d: Protocol, destNode: Node, target: NodeId, fast: bool):
|
||||
Future[seq[Node]] {.async.} =
|
||||
let dists = lookupDistances(target, destNode.id)
|
||||
|
||||
# Instead of doing max `LookupRequestLimit` findNode requests, make use
|
||||
# of the discv5.1 functionality to request nodes for multiple distances.
|
||||
let r = await d.findNode(destNode, dists)
|
||||
if r.isOk:
|
||||
result.add(r[])
|
||||
let r =
|
||||
if fast:
|
||||
await d.findNodeFast(destNode, target)
|
||||
else:
|
||||
# Instead of doing max `LookupRequestLimit` findNode requests, make use
|
||||
# of the discv5.1 functionality to request nodes for multiple distances.
|
||||
let dists = lookupDistances(target, destNode.id)
|
||||
await d.findNode(destNode, dists)
|
||||
|
||||
# Attempt to add all nodes discovered
|
||||
for n in result:
|
||||
discard d.addNode(n)
|
||||
|
||||
proc lookupWorkerFast(d: Protocol, destNode: Node, target: NodeId):
|
||||
Future[seq[Node]] {.async.} =
|
||||
## use terget NodeId based find_node
|
||||
|
||||
let r = await d.findNodeFast(destNode, target)
|
||||
if r.isOk:
|
||||
result.add(r[])
|
||||
|
||||
@ -642,7 +673,7 @@ proc lookup*(d: Protocol, target: NodeId, fast: bool = false): Future[seq[Node]]
|
||||
# `closestNodes` holds the k closest nodes to target found, sorted by distance
|
||||
# Unvalidated nodes are used for requests as a form of validation.
|
||||
var closestNodes = d.routingTable.neighbours(target, BUCKET_SIZE,
|
||||
seenOnly = false)
|
||||
LookupSeenThreshold)
|
||||
|
||||
var asked, seen = initHashSet[NodeId]()
|
||||
asked.incl(d.localNode.id) # No need to ask our own node
|
||||
@ -659,10 +690,7 @@ proc lookup*(d: Protocol, target: NodeId, fast: bool = false): Future[seq[Node]]
|
||||
while i < closestNodes.len and pendingQueries.len < Alpha:
|
||||
let n = closestNodes[i]
|
||||
if not asked.containsOrIncl(n.id):
|
||||
if fast:
|
||||
pendingQueries.add(d.lookupWorkerFast(n, target))
|
||||
else:
|
||||
pendingQueries.add(d.lookupWorker(n, target))
|
||||
pendingQueries.add(d.lookupWorker(n, target, fast))
|
||||
inc i
|
||||
|
||||
trace "discv5 pending queries", total = pendingQueries.len
|
||||
@ -707,7 +735,8 @@ proc addProvider*(
|
||||
res.add(d.localNode)
|
||||
for toNode in res:
|
||||
if toNode != d.localNode:
|
||||
discard d.sendRequest(toNode, AddProviderMessage(cId: cId, prov: pr))
|
||||
let reqId = RequestId.init(d.rng[])
|
||||
d.sendRequest(toNode, AddProviderMessage(cId: cId, prov: pr), reqId)
|
||||
else:
|
||||
asyncSpawn d.addProviderLocal(cId, pr)
|
||||
|
||||
@ -720,22 +749,21 @@ proc sendGetProviders(d: Protocol, toNode: Node,
|
||||
trace "sendGetProviders", toNode, msg
|
||||
|
||||
let
|
||||
reqId = d.sendRequest(toNode, msg)
|
||||
resp = await d.waitMessage(toNode, reqId)
|
||||
resp = await d.waitResponse(toNode, msg)
|
||||
|
||||
d.routingTable.setJustSeen(toNode, resp.isSome())
|
||||
if resp.isSome():
|
||||
if resp.get().kind == MessageKind.providers:
|
||||
d.routingTable.setJustSeen(toNode)
|
||||
return ok(resp.get().provs)
|
||||
else:
|
||||
# TODO: do we need to do something when there is an invalid response?
|
||||
d.replaceNode(toNode)
|
||||
discovery_message_requests_outgoing.inc(labelValues = ["invalid_response"])
|
||||
dht_message_requests_outgoing.inc(labelValues = ["invalid_response"])
|
||||
return err("Invalid response to GetProviders message")
|
||||
else:
|
||||
# TODO: do we need to do something when there is no response?
|
||||
d.replaceNode(toNode)
|
||||
discovery_message_requests_outgoing.inc(labelValues = ["no_response"])
|
||||
# remove on loss only if there is a replacement
|
||||
d.replaceNode(toNode, NoreplyRemoveThreshold)
|
||||
dht_message_requests_outgoing.inc(labelValues = ["no_response"])
|
||||
return err("GetProviders response message not received in time")
|
||||
|
||||
proc getProvidersLocal*(
|
||||
@ -808,7 +836,7 @@ proc query*(d: Protocol, target: NodeId, k = BUCKET_SIZE): Future[seq[Node]]
|
||||
## This will take k nodes from the routing table closest to target and
|
||||
## query them for nodes closest to target. If there are less than k nodes in
|
||||
## the routing table, nodes returned by the first queries will be used.
|
||||
var queryBuffer = d.routingTable.neighbours(target, k, seenOnly = false)
|
||||
var queryBuffer = d.routingTable.neighbours(target, k, QuerySeenThreshold)
|
||||
|
||||
var asked, seen = initHashSet[NodeId]()
|
||||
asked.incl(d.localNode.id) # No need to ask our own node
|
||||
@ -823,7 +851,7 @@ proc query*(d: Protocol, target: NodeId, k = BUCKET_SIZE): Future[seq[Node]]
|
||||
while i < min(queryBuffer.len, k) and pendingQueries.len < Alpha:
|
||||
let n = queryBuffer[i]
|
||||
if not asked.containsOrIncl(n.id):
|
||||
pendingQueries.add(d.lookupWorker(n, target))
|
||||
pendingQueries.add(d.lookupWorker(n, target, false))
|
||||
inc i
|
||||
|
||||
trace "discv5 pending queries", total = pendingQueries.len
|
||||
@ -934,7 +962,8 @@ proc revalidateNode*(d: Protocol, n: Node) {.async.} =
|
||||
discard d.addNode(nodes[][0])
|
||||
|
||||
# Get IP and port from pong message and add it to the ip votes
|
||||
let a = Address(ip: ValidIpAddress.init(res.ip), port: Port(res.port))
|
||||
trace "pong rx", n, myip = res.ip, myport = res.port
|
||||
let a = Address(ip: res.ip, port: Port(res.port))
|
||||
d.ipVote.insert(n.id, a)
|
||||
|
||||
proc revalidateLoop(d: Protocol) {.async.} =
|
||||
@ -1004,7 +1033,7 @@ proc ipMajorityLoop(d: Protocol) {.async.} =
|
||||
warn "Failed updating SPR with newly discovered external address",
|
||||
majority, previous, error = res.error
|
||||
else:
|
||||
discovery_enr_auto_update.inc()
|
||||
dht_enr_auto_update.inc()
|
||||
info "Updated SPR with newly discovered external address",
|
||||
majority, previous, uri = toURI(d.localNode.record)
|
||||
else:
|
||||
@ -1019,6 +1048,19 @@ proc ipMajorityLoop(d: Protocol) {.async.} =
|
||||
trace "ipMajorityLoop canceled"
|
||||
trace "ipMajorityLoop exited!"
|
||||
|
||||
proc debugPrintLoop(d: Protocol) {.async.} =
|
||||
## Loop which prints the neighborhood with stats
|
||||
while true:
|
||||
await sleepAsync(DebugPrintInterval)
|
||||
for b in d.routingTable.buckets:
|
||||
debug "bucket", depth = b.getDepth,
|
||||
len = b.nodes.len, standby = b.replacementLen
|
||||
for n in b.nodes:
|
||||
debug "node", n, rttMin = n.stats.rttMin.int, rttAvg = n.stats.rttAvg.int,
|
||||
reliability = n.seen.round(3)
|
||||
# bandwidth estimates are based on limited information, so not logging it yet to avoid confusion
|
||||
# trace "node", n, bwMaxMbps = (n.stats.bwMax / 1e6).round(3), bwAvgMbps = (n.stats.bwAvg / 1e6).round(3)
|
||||
|
||||
func init*(
|
||||
T: type DiscoveryConfig,
|
||||
tableIpLimit: uint,
|
||||
@ -1034,7 +1076,7 @@ func init*(
|
||||
|
||||
proc newProtocol*(
|
||||
privKey: PrivateKey,
|
||||
enrIp: Option[ValidIpAddress],
|
||||
enrIp: Option[IpAddress],
|
||||
enrTcpPort, enrUdpPort: Option[Port],
|
||||
localEnrFields: openArray[(string, seq[byte])] = [],
|
||||
bootstrapRecords: openArray[SignedPeerRecord] = [],
|
||||
@ -1156,6 +1198,7 @@ proc start*(d: Protocol) {.async.} =
|
||||
d.refreshLoop = refreshLoop(d)
|
||||
d.revalidateLoop = revalidateLoop(d)
|
||||
d.ipMajorityLoop = ipMajorityLoop(d)
|
||||
d.debugPrintLoop = debugPrintLoop(d)
|
||||
|
||||
await d.providers.start()
|
||||
|
||||
|
||||
@ -1,16 +1,17 @@
|
||||
# codex-dht - Codex DHT
|
||||
# logos-storage-dht - Logos Storage DHT
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [Defect].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sequtils
|
||||
|
||||
import pkg/chronicles
|
||||
import pkg/libp2p
|
||||
import pkg/questionable
|
||||
|
||||
import ../node
|
||||
import ../lru
|
||||
@ -35,22 +36,21 @@ type
|
||||
func add*(
|
||||
self: var ProvidersCache,
|
||||
id: NodeId,
|
||||
provider: SignedPeerRecord) =
|
||||
record: SignedPeerRecord) =
|
||||
## Add providers for an id
|
||||
## to the cache
|
||||
|
||||
if self.disable:
|
||||
return
|
||||
|
||||
var providers =
|
||||
if id notin self.cache:
|
||||
Providers.init(self.maxProviders.int)
|
||||
else:
|
||||
self.cache.get(id).get()
|
||||
without var providers =? self.cache.get(id):
|
||||
providers = Providers.init(self.maxProviders.int)
|
||||
|
||||
let
|
||||
peerId = provider.data.peerId
|
||||
peerId = record.data.peerId
|
||||
|
||||
trace "Adding provider to cache", id, peerId
|
||||
providers.put(peerId, provider)
|
||||
trace "Adding provider record to cache", id, peerId
|
||||
providers.put(peerId, record)
|
||||
self.cache.put(id, providers)
|
||||
|
||||
proc get*(
|
||||
@ -58,14 +58,13 @@ proc get*(
|
||||
id: NodeId,
|
||||
start = 0,
|
||||
stop = MaxProvidersPerEntry.int): seq[SignedPeerRecord] =
|
||||
## Get providers for an id
|
||||
## from the cache
|
||||
|
||||
if self.disable:
|
||||
return
|
||||
|
||||
if id in self.cache:
|
||||
let
|
||||
recs = self.cache.get(id).get
|
||||
|
||||
if recs =? self.cache.get(id):
|
||||
let
|
||||
providers = toSeq(recs)[start..<min(recs.len, stop)]
|
||||
|
||||
@ -74,23 +73,40 @@ proc get*(
|
||||
|
||||
func remove*(
|
||||
self: var ProvidersCache,
|
||||
id: NodeId,
|
||||
peerId: PeerId) =
|
||||
## Remove a provider record from an id
|
||||
## from the cache
|
||||
##
|
||||
|
||||
if self.disable:
|
||||
return
|
||||
|
||||
if id notin self.cache:
|
||||
for id in self.cache.keys:
|
||||
if var providers =? self.cache.get(id):
|
||||
trace "Removing provider from cache", id, peerId
|
||||
providers.del(peerId)
|
||||
self.cache.put(id, providers)
|
||||
|
||||
func remove*(
|
||||
self: var ProvidersCache,
|
||||
id: NodeId,
|
||||
peerId: PeerId) =
|
||||
## Remove a provider record from an id
|
||||
## from the cache
|
||||
##
|
||||
|
||||
if self.disable:
|
||||
return
|
||||
|
||||
var
|
||||
providers = self.cache.get(id).get()
|
||||
|
||||
trace "Removing provider from cache", id
|
||||
providers.del(peerId)
|
||||
self.cache.put(id, providers)
|
||||
if var providers =? self.cache.get(id):
|
||||
trace "Removing record from cache", id
|
||||
providers.del(peerId)
|
||||
self.cache.put(id, providers)
|
||||
|
||||
func drop*(self: var ProvidersCache, id: NodeId) =
|
||||
## Drop all the providers for an entry
|
||||
##
|
||||
|
||||
if self.disable:
|
||||
return
|
||||
|
||||
|
||||
@ -1,11 +1,11 @@
|
||||
# codex-dht - Codex DHT
|
||||
# logos-storage-dht - Logos Storage DHT
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [Defect].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sequtils
|
||||
import std/strutils
|
||||
|
||||
@ -1,15 +1,17 @@
|
||||
# codex-dht - Codex DHT
|
||||
# logos-storage-dht - Logos Storage DHT
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [Defect].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/options
|
||||
import std/sequtils
|
||||
from std/times import now, utc, toTime, toUnix
|
||||
|
||||
import pkg/stew/endians2
|
||||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
import pkg/datastore
|
||||
@ -21,16 +23,13 @@ import ./common
|
||||
|
||||
const
|
||||
ExpiredCleanupBatch* = 1000
|
||||
CleanupInterval* = 5.minutes
|
||||
CleanupInterval* = 24.hours
|
||||
|
||||
proc cleanupExpired*(
|
||||
store: Datastore,
|
||||
batchSize = ExpiredCleanupBatch) {.async.} =
|
||||
trace "Cleaning up expired records"
|
||||
|
||||
let
|
||||
now = Moment.now()
|
||||
|
||||
let
|
||||
q = Query.init(CidKey, limit = batchSize)
|
||||
|
||||
@ -47,11 +46,13 @@ proc cleanupExpired*(
|
||||
var
|
||||
keys = newSeq[Key]()
|
||||
|
||||
let
|
||||
now = times.now().utc().toTime().toUnix()
|
||||
|
||||
for item in iter:
|
||||
if pair =? (await item) and pair.key.isSome:
|
||||
if (maybeKey, data) =? (await item) and key =? maybeKey:
|
||||
let
|
||||
(key, data) = (pair.key.get(), pair.data)
|
||||
expired = Moment.init(uint64.fromBytesBE(data).int64, Microsecond)
|
||||
expired = endians2.fromBytesBE(uint64, data).int64
|
||||
|
||||
if now >= expired:
|
||||
trace "Found expired record", key
|
||||
@ -74,7 +75,7 @@ proc cleanupOrphaned*(
|
||||
trace "Cleaning up orphaned records"
|
||||
|
||||
let
|
||||
providersQuery = Query.init(ProvidersKey, limit = batchSize)
|
||||
providersQuery = Query.init(ProvidersKey, limit = batchSize, value = false)
|
||||
|
||||
block:
|
||||
without iter =? (await store.query(providersQuery)), err:
|
||||
@ -83,7 +84,7 @@ proc cleanupOrphaned*(
|
||||
|
||||
defer:
|
||||
if not isNil(iter):
|
||||
trace "Cleaning up query iterator"
|
||||
trace "Cleaning up orphaned query iterator"
|
||||
discard (await iter.dispose())
|
||||
|
||||
var count = 0
|
||||
@ -92,10 +93,7 @@ proc cleanupOrphaned*(
|
||||
trace "Batch cleaned up", size = batchSize
|
||||
|
||||
count.inc
|
||||
if pair =? (await item) and pair.key.isSome:
|
||||
let
|
||||
key = pair.key.get()
|
||||
|
||||
if (maybeKey, _) =? (await item) and key =? maybeKey:
|
||||
without peerId =? key.fromProvKey(), err:
|
||||
trace "Error extracting parts from cid key", key
|
||||
continue
|
||||
@ -104,15 +102,17 @@ proc cleanupOrphaned*(
|
||||
trace "Error building cid key", err = err.msg
|
||||
continue
|
||||
|
||||
without cidIter =? (await store.query(Query.init(cidKey, limit = 1))), err:
|
||||
trace "Error querying key", cidKey
|
||||
without cidIter =? (await store.query(Query.init(cidKey, limit = 1, value = false))), err:
|
||||
trace "Error querying key", cidKey, err = err.msg
|
||||
continue
|
||||
|
||||
let
|
||||
res = (await allFinished(toSeq(cidIter)))
|
||||
.filterIt( it.completed )
|
||||
.mapIt( it.read.get )
|
||||
.filterIt( it.key.isSome ).len
|
||||
res = block:
|
||||
var count = 0
|
||||
for item in cidIter:
|
||||
if (key, _) =? (await item) and key.isSome:
|
||||
count.inc
|
||||
count
|
||||
|
||||
if not isNil(cidIter):
|
||||
trace "Disposing cid iter"
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
# codex-dht - Codex DHT
|
||||
# logos-storage-dht - Logos Storage DHT
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
@ -7,17 +7,18 @@
|
||||
|
||||
import std/sequtils
|
||||
import std/strutils
|
||||
from std/times import now, utc, toTime, toUnix
|
||||
|
||||
import pkg/stew/endians2
|
||||
import pkg/datastore
|
||||
import pkg/chronos
|
||||
import pkg/libp2p
|
||||
import pkg/chronicles
|
||||
import pkg/stew/results as rs
|
||||
import pkg/stew/byteutils
|
||||
import pkg/questionable
|
||||
import pkg/questionable/results
|
||||
|
||||
{.push raises: [Defect].}
|
||||
{.push raises: [].}
|
||||
|
||||
import ./maintenance
|
||||
import ./cache
|
||||
@ -56,30 +57,30 @@ proc getProvByKey*(self: ProvidersManager, key: Key): Future[?!SignedPeerRecord]
|
||||
|
||||
proc add*(
|
||||
self: ProvidersManager,
|
||||
cid: NodeId,
|
||||
id: NodeId,
|
||||
provider: SignedPeerRecord,
|
||||
ttl = ZeroDuration): Future[?!void] {.async.} =
|
||||
|
||||
let
|
||||
peerId = provider.data.peerId
|
||||
|
||||
trace "Adding provider to persistent store", cid, peerId
|
||||
trace "Adding provider to persistent store", id, peerId
|
||||
without provKey =? makeProviderKey(peerId), err:
|
||||
trace "Error creating key from provider record", err = err.msg
|
||||
return failure err.msg
|
||||
|
||||
without cidKey =? makeCidKey(cid, peerId), err:
|
||||
without cidKey =? makeCidKey(id, peerId), err:
|
||||
trace "Error creating key from content id", err = err.msg
|
||||
return failure err.msg
|
||||
|
||||
let
|
||||
now = times.now().utc().toTime().toUnix()
|
||||
expires =
|
||||
if ttl > ZeroDuration:
|
||||
ttl
|
||||
ttl.seconds + now
|
||||
else:
|
||||
Moment.fromNow(self.ttl) - ZeroMoment
|
||||
|
||||
ttl = expires.microseconds.uint64.toBytesBE
|
||||
self.ttl.seconds + now
|
||||
ttl = endians2.toBytesBE(expires.uint64)
|
||||
|
||||
bytes: seq[byte] =
|
||||
if existing =? (await self.getProvByKey(provKey)) and
|
||||
@ -93,17 +94,17 @@ proc add*(
|
||||
bytes
|
||||
|
||||
if bytes.len > 0:
|
||||
trace "Adding or updating provider record", cid, peerId
|
||||
trace "Adding or updating provider record", id, peerId
|
||||
if err =? (await self.store.put(provKey, bytes)).errorOption:
|
||||
trace "Unable to store provider with key", key = provKey, err = err.msg
|
||||
|
||||
trace "Adding or updating cid", cid, key = cidKey, ttl = expires.minutes
|
||||
trace "Adding or updating id", id, key = cidKey, ttl = expires.seconds
|
||||
if err =? (await self.store.put(cidKey, @ttl)).errorOption:
|
||||
trace "Unable to store provider with key", key = cidKey, err = err.msg
|
||||
return
|
||||
|
||||
self.cache.add(cid, provider)
|
||||
trace "Provider for cid added", cidKey, provKey
|
||||
self.cache.add(id, provider)
|
||||
trace "Provider for id added", cidKey, provKey
|
||||
return success()
|
||||
|
||||
proc get*(
|
||||
@ -136,12 +137,10 @@ proc get*(
|
||||
trace "Cleaning up query iterator"
|
||||
discard (await cidIter.dispose())
|
||||
|
||||
var keys: seq[Key]
|
||||
for item in cidIter:
|
||||
# TODO: =? doesn't support tuples
|
||||
if pair =? (await item) and pair.key.isSome:
|
||||
let
|
||||
(key, val) = (pair.key.get, pair.data)
|
||||
|
||||
if (maybeKey, val) =? (await item) and key =? maybeKey:
|
||||
without pairs =? key.fromCidKey() and
|
||||
provKey =? makeProviderKey(pairs.peerId), err:
|
||||
trace "Error creating key from provider record", err = err.msg
|
||||
@ -150,17 +149,24 @@ proc get*(
|
||||
trace "Querying provider key", key = provKey
|
||||
without data =? (await self.store.get(provKey)):
|
||||
trace "Error getting provider", key = provKey
|
||||
keys.add(key)
|
||||
continue
|
||||
|
||||
without provider =? SignedPeerRecord.decode(data).mapErr(mapFailure), err:
|
||||
trace "Unable to decode provider from store", err = err.msg
|
||||
keys.add(key)
|
||||
continue
|
||||
|
||||
trace "Retrieved provider with key", key = provKey
|
||||
providers.add(provider)
|
||||
self.cache.add(id, provider)
|
||||
|
||||
trace "Retrieved providers from persistent store", cid = id, len = providers.len
|
||||
trace "Deleting keys without provider from store", len = keys.len
|
||||
if keys.len > 0 and err =? (await self.store.delete(keys)).errorOption:
|
||||
trace "Error deleting records from persistent store", err = err.msg
|
||||
return failure err
|
||||
|
||||
trace "Retrieved providers from persistent store", id = id, len = providers.len
|
||||
return success providers
|
||||
|
||||
proc contains*(
|
||||
@ -178,8 +184,8 @@ proc contains*(self: ProvidersManager, peerId: PeerId): Future[bool] {.async.} =
|
||||
|
||||
return (await self.store.has(provKey)) |? false
|
||||
|
||||
proc contains*(self: ProvidersManager, cid: NodeId): Future[bool] {.async.} =
|
||||
without cidKey =? (CidKey / $cid), err:
|
||||
proc contains*(self: ProvidersManager, id: NodeId): Future[bool] {.async.} =
|
||||
without cidKey =? (CidKey / $id), err:
|
||||
return false
|
||||
|
||||
let
|
||||
@ -196,15 +202,15 @@ proc contains*(self: ProvidersManager, cid: NodeId): Future[bool] {.async.} =
|
||||
discard (await iter.dispose())
|
||||
|
||||
for item in iter:
|
||||
if pair =? (await item) and pair.key.isSome:
|
||||
if (key, _) =? (await item) and key.isSome:
|
||||
return true
|
||||
|
||||
return false
|
||||
|
||||
proc remove*(self: ProvidersManager, cid: NodeId): Future[?!void] {.async.} =
|
||||
proc remove*(self: ProvidersManager, id: NodeId): Future[?!void] {.async.} =
|
||||
|
||||
self.cache.drop(cid)
|
||||
without cidKey =? (CidKey / $cid), err:
|
||||
self.cache.drop(id)
|
||||
without cidKey =? (CidKey / $id), err:
|
||||
return failure(err.msg)
|
||||
|
||||
let
|
||||
@ -224,16 +230,14 @@ proc remove*(self: ProvidersManager, cid: NodeId): Future[?!void] {.async.} =
|
||||
keys: seq[Key]
|
||||
|
||||
for item in iter:
|
||||
if pair =? (await item) and pair.key.isSome:
|
||||
let
|
||||
key = pair.key.get()
|
||||
if (maybeKey, _) =? (await item) and key =? maybeKey:
|
||||
|
||||
keys.add(key)
|
||||
without pairs =? key.fromCidKey, err:
|
||||
trace "Unable to parse peer id from key", key
|
||||
return failure err
|
||||
|
||||
self.cache.remove(cid, pairs.peerId)
|
||||
self.cache.remove(id, pairs.peerId)
|
||||
trace "Deleted record from store", key
|
||||
|
||||
if keys.len > 0 and err =? (await self.store.delete(keys)).errorOption:
|
||||
@ -242,57 +246,60 @@ proc remove*(self: ProvidersManager, cid: NodeId): Future[?!void] {.async.} =
|
||||
|
||||
return success()
|
||||
|
||||
proc remove*(self: ProvidersManager, peerId: PeerId): Future[?!void] {.async.} =
|
||||
without cidKey =? (CidKey / "*" / $peerId), err:
|
||||
return failure err
|
||||
proc remove*(
|
||||
self: ProvidersManager,
|
||||
peerId: PeerId,
|
||||
entries = false): Future[?!void] {.async.} =
|
||||
|
||||
let
|
||||
q = Query.init(cidKey)
|
||||
|
||||
block:
|
||||
without iter =? (await self.store.query(q)), err:
|
||||
trace "Unable to obtain record for key", key = cidKey
|
||||
if entries:
|
||||
without cidKey =? (CidKey / "*" / $peerId), err:
|
||||
return failure err
|
||||
|
||||
defer:
|
||||
if not isNil(iter):
|
||||
trace "Cleaning up query iterator"
|
||||
discard (await iter.dispose())
|
||||
let
|
||||
q = Query.init(cidKey)
|
||||
|
||||
var
|
||||
keys: seq[Key]
|
||||
block:
|
||||
without iter =? (await self.store.query(q)), err:
|
||||
trace "Unable to obtain record for key", key = cidKey
|
||||
return failure err
|
||||
|
||||
for item in iter:
|
||||
if pair =? (await item) and pair.key.isSome:
|
||||
let
|
||||
key = pair.key.get()
|
||||
defer:
|
||||
if not isNil(iter):
|
||||
trace "Cleaning up query iterator"
|
||||
discard (await iter.dispose())
|
||||
|
||||
keys.add(key)
|
||||
var
|
||||
keys: seq[Key]
|
||||
|
||||
let
|
||||
parts = key.id.split(datastore.Separator)
|
||||
for item in iter:
|
||||
if (maybeKey, _) =? (await item) and key =? maybeKey:
|
||||
keys.add(key)
|
||||
|
||||
self.cache.remove(NodeId.fromHex(parts[2]), peerId)
|
||||
let
|
||||
parts = key.id.split(datastore.Separator)
|
||||
|
||||
if keys.len > 0 and err =? (await self.store.delete(keys)).errorOption:
|
||||
trace "Error deleting record from persistent store", err = err.msg
|
||||
return failure err
|
||||
if keys.len > 0 and err =? (await self.store.delete(keys)).errorOption:
|
||||
trace "Error deleting record from persistent store", err = err.msg
|
||||
return failure err
|
||||
|
||||
trace "Deleted records from store"
|
||||
trace "Deleted records from store"
|
||||
|
||||
without provKey =? makeProviderKey(peerId), err:
|
||||
without provKey =? peerId.makeProviderKey, err:
|
||||
return failure err
|
||||
|
||||
trace "Removing provider from cache", peerId
|
||||
self.cache.remove(peerId)
|
||||
|
||||
trace "Removing provider record", key = provKey
|
||||
return (await self.store.delete(provKey))
|
||||
|
||||
proc remove*(
|
||||
self: ProvidersManager,
|
||||
cid: NodeId,
|
||||
id: NodeId,
|
||||
peerId: PeerId): Future[?!void] {.async.} =
|
||||
|
||||
self.cache.remove(cid, peerId)
|
||||
without cidKey =? makeCidKey(cid, peerId), err:
|
||||
self.cache.remove(id, peerId)
|
||||
without cidKey =? makeCidKey(id, peerId), err:
|
||||
trace "Error creating key from content id", err = err.msg
|
||||
return failure err.msg
|
||||
|
||||
|
||||
@ -1,21 +1,26 @@
|
||||
# codex-dht - Codex DHT
|
||||
# logos-storage-dht - Logos Storage DHT
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [Defect].}
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[algorithm, times, sequtils, bitops, sets, options, tables],
|
||||
stint, chronicles, metrics, bearssl/rand, chronos, stew/shims/net as stewNet,
|
||||
std/[algorithm, net, times, sequtils, bitops, sets, options, tables],
|
||||
stint, chronicles, metrics, bearssl/rand, chronos,
|
||||
"."/[node, random2, spr]
|
||||
|
||||
export options
|
||||
|
||||
declarePublicGauge routing_table_nodes,
|
||||
declarePublicGauge dht_routing_table_nodes,
|
||||
"Discovery routing table nodes", labels = ["state"]
|
||||
declarePublicGauge dht_routing_table_buckets,
|
||||
"Discovery routing table: number of buckets"
|
||||
|
||||
logScope:
|
||||
topics = "discv5 routingtable"
|
||||
|
||||
type
|
||||
DistanceProc* = proc(a, b: NodeId): NodeId {.raises: [Defect], gcsafe, noSideEffect.}
|
||||
@ -29,7 +34,7 @@ type
|
||||
|
||||
IpLimits* = object
|
||||
limit*: uint
|
||||
ips: Table[ValidIpAddress, uint]
|
||||
ips: Table[IpAddress, uint]
|
||||
|
||||
|
||||
RoutingTable* = object
|
||||
@ -96,7 +101,7 @@ type
|
||||
ReplacementExisting
|
||||
NoAddress
|
||||
|
||||
func inc*(ipLimits: var IpLimits, ip: ValidIpAddress): bool =
|
||||
func inc*(ipLimits: var IpLimits, ip: IpAddress): bool =
|
||||
let val = ipLimits.ips.getOrDefault(ip, 0)
|
||||
if val < ipLimits.limit:
|
||||
ipLimits.ips[ip] = val + 1
|
||||
@ -104,7 +109,7 @@ func inc*(ipLimits: var IpLimits, ip: ValidIpAddress): bool =
|
||||
else:
|
||||
false
|
||||
|
||||
func dec*(ipLimits: var IpLimits, ip: ValidIpAddress) =
|
||||
func dec*(ipLimits: var IpLimits, ip: IpAddress) =
|
||||
let val = ipLimits.ips.getOrDefault(ip, 0)
|
||||
if val == 1:
|
||||
ipLimits.ips.del(ip)
|
||||
@ -177,6 +182,8 @@ proc midpoint(k: KBucket): NodeId =
|
||||
|
||||
proc len(k: KBucket): int = k.nodes.len
|
||||
|
||||
proc replacementLen*(k: KBucket): int = k.replacementCache.len
|
||||
|
||||
proc tail(k: KBucket): Node = k.nodes[high(k.nodes)]
|
||||
|
||||
proc ipLimitInc(r: var RoutingTable, b: KBucket, n: Node): bool =
|
||||
@ -205,14 +212,14 @@ proc ipLimitDec(r: var RoutingTable, b: KBucket, n: Node) =
|
||||
|
||||
proc add(k: KBucket, n: Node) =
|
||||
k.nodes.add(n)
|
||||
routing_table_nodes.inc()
|
||||
dht_routing_table_nodes.inc()
|
||||
|
||||
proc remove(k: KBucket, n: Node): bool =
|
||||
let i = k.nodes.find(n)
|
||||
if i != -1:
|
||||
routing_table_nodes.dec()
|
||||
if k.nodes[i].seen:
|
||||
routing_table_nodes.dec(labelValues = ["seen"])
|
||||
dht_routing_table_nodes.dec()
|
||||
if alreadySeen(k.nodes[i]):
|
||||
dht_routing_table_nodes.dec(labelValues = ["seen"])
|
||||
k.nodes.delete(i)
|
||||
trace "removed node:", node = n
|
||||
true
|
||||
@ -278,11 +285,15 @@ proc computeSharedPrefixBits(nodes: openArray[NodeId]): int =
|
||||
# Reaching this would mean that all node ids are equal.
|
||||
doAssert(false, "Unable to calculate number of shared prefix bits")
|
||||
|
||||
proc getDepth*(b: KBucket) : int =
|
||||
computeSharedPrefixBits(@[b.istart, b.iend])
|
||||
|
||||
proc init*(T: type RoutingTable, localNode: Node, bitsPerHop = DefaultBitsPerHop,
|
||||
ipLimits = DefaultTableIpLimits, rng: ref HmacDrbgContext,
|
||||
distanceCalculator = XorDistanceCalculator): T =
|
||||
## Initialize the routing table for provided `Node` and bitsPerHop value.
|
||||
## `bitsPerHop` is default set to 5 as recommended by original Kademlia paper.
|
||||
dht_routing_table_buckets.inc()
|
||||
RoutingTable(
|
||||
localNode: localNode,
|
||||
buckets: @[KBucket.new(0.u256, high(UInt256), ipLimits.bucketIpLimit)],
|
||||
@ -296,6 +307,7 @@ proc splitBucket(r: var RoutingTable, index: int) =
|
||||
let (a, b) = bucket.split()
|
||||
r.buckets[index] = a
|
||||
r.buckets.insert(b, index + 1)
|
||||
dht_routing_table_buckets.inc()
|
||||
|
||||
proc bucketForNode(r: RoutingTable, id: NodeId): KBucket =
|
||||
result = binaryGetBucketForNode(r.buckets, id)
|
||||
@ -317,15 +329,12 @@ proc addReplacement(r: var RoutingTable, k: KBucket, n: Node): NodeStatus =
|
||||
# gets moved to the tail.
|
||||
if k.replacementCache[nodeIdx].address.get().ip != n.address.get().ip:
|
||||
if not ipLimitInc(r, k, n):
|
||||
trace "replace: ip limit reached"
|
||||
return IpLimitReached
|
||||
ipLimitDec(r, k, k.replacementCache[nodeIdx])
|
||||
k.replacementCache.delete(nodeIdx)
|
||||
k.replacementCache.add(n)
|
||||
trace "replace: already existed"
|
||||
return ReplacementExisting
|
||||
elif not ipLimitInc(r, k, n):
|
||||
trace "replace: ip limit reached (2)"
|
||||
return IpLimitReached
|
||||
else:
|
||||
doAssert(k.replacementCache.len <= REPLACEMENT_CACHE_SIZE)
|
||||
@ -336,7 +345,7 @@ proc addReplacement(r: var RoutingTable, k: KBucket, n: Node): NodeStatus =
|
||||
k.replacementCache.delete(0)
|
||||
|
||||
k.replacementCache.add(n)
|
||||
trace "replace: added"
|
||||
debug "Node added to replacement cache", n
|
||||
return ReplacementAdded
|
||||
|
||||
proc addNode*(r: var RoutingTable, n: Node): NodeStatus =
|
||||
@ -403,42 +412,50 @@ proc addNode*(r: var RoutingTable, n: Node): NodeStatus =
|
||||
return IpLimitReached
|
||||
|
||||
bucket.add(n)
|
||||
else:
|
||||
# Bucket must be full, but lets see if it should be split the bucket.
|
||||
debug "Node added to routing table", n
|
||||
return Added
|
||||
|
||||
# Calculate the prefix shared by all nodes in the bucket's range, not the
|
||||
# ones actually in the bucket.
|
||||
let depth = computeSharedPrefixBits(@[bucket.istart, bucket.iend])
|
||||
# Split if the bucket has the local node in its range or if the depth is not
|
||||
# congruent to 0 mod `bitsPerHop`
|
||||
if bucket.inRange(r.localNode) or
|
||||
(depth mod r.bitsPerHop != 0 and depth != ID_SIZE):
|
||||
r.splitBucket(r.buckets.find(bucket))
|
||||
return r.addNode(n) # retry adding
|
||||
else:
|
||||
# When bucket doesn't get split the node is added to the replacement cache
|
||||
return r.addReplacement(bucket, n)
|
||||
# Bucket must be full, but lets see if it should be split the bucket.
|
||||
# Calculate the prefix shared by all nodes in the bucket's range, not the
|
||||
# ones actually in the bucket.
|
||||
let depth = computeSharedPrefixBits(@[bucket.istart, bucket.iend])
|
||||
# Split if the bucket has the local node in its range or if the depth is not
|
||||
# congruent to 0 mod `bitsPerHop`
|
||||
if bucket.inRange(r.localNode) or
|
||||
(depth mod r.bitsPerHop != 0 and depth != ID_SIZE):
|
||||
r.splitBucket(r.buckets.find(bucket))
|
||||
return r.addNode(n) # retry adding
|
||||
|
||||
# When bucket doesn't get split the node is added to the replacement cache
|
||||
return r.addReplacement(bucket, n)
|
||||
|
||||
proc removeNode*(r: var RoutingTable, n: Node) =
|
||||
## Remove the node `n` from the routing table.
|
||||
## No replemennt added, even if there is in replacement cache.
|
||||
let b = r.bucketForNode(n.id)
|
||||
if b.remove(n):
|
||||
ipLimitDec(r, b, n)
|
||||
|
||||
proc replaceNode*(r: var RoutingTable, n: Node) =
|
||||
proc replaceNode*(r: var RoutingTable, n: Node, forceRemoveBelow = 1.0) =
|
||||
## Replace node `n` with last entry in the replacement cache. If there are
|
||||
## no entries in the replacement cache, node `n` will simply be removed.
|
||||
# TODO: Kademlia paper recommends here to not remove nodes if there are no
|
||||
# replacements. However, that would require a bit more complexity in the
|
||||
# revalidation as you don't want to try pinging that node all the time.
|
||||
## no entries in the replacement cache, node `n` will either be removed
|
||||
## or kept based on `forceRemoveBelow`. Default: remove.
|
||||
## Note: Kademlia paper recommends here to not remove nodes if there are no
|
||||
## replacements. This might mean pinging nodes that are not reachable, but
|
||||
## also avoids being too agressive because UDP losses or temporary network
|
||||
## failures.
|
||||
let b = r.bucketForNode(n.id)
|
||||
if b.remove(n):
|
||||
ipLimitDec(r, b, n)
|
||||
if (b.replacementCache.len > 0 or n.seen <= forceRemoveBelow):
|
||||
if b.remove(n):
|
||||
debug "Node removed from routing table", n
|
||||
ipLimitDec(r, b, n)
|
||||
|
||||
if b.replacementCache.len > 0:
|
||||
# Nodes in the replacement cache are already included in the ip limits.
|
||||
b.add(b.replacementCache[high(b.replacementCache)])
|
||||
b.replacementCache.delete(high(b.replacementCache))
|
||||
if b.replacementCache.len > 0:
|
||||
# Nodes in the replacement cache are already included in the ip limits.
|
||||
let rn = b.replacementCache[high(b.replacementCache)]
|
||||
b.add(rn)
|
||||
b.replacementCache.delete(high(b.replacementCache))
|
||||
debug "Node added to routing table from replacement cache", node=rn
|
||||
|
||||
proc getNode*(r: RoutingTable, id: NodeId): Option[Node] =
|
||||
## Get the `Node` with `id` as `NodeId` from the routing table.
|
||||
@ -459,16 +476,16 @@ proc nodesByDistanceTo(r: RoutingTable, k: KBucket, id: NodeId): seq[Node] =
|
||||
sortedByIt(k.nodes, r.distance(it.id, id))
|
||||
|
||||
proc neighbours*(r: RoutingTable, id: NodeId, k: int = BUCKET_SIZE,
|
||||
seenOnly = false): seq[Node] =
|
||||
seenThreshold = 0.0): seq[Node] =
|
||||
## Return up to k neighbours of the given node id.
|
||||
## When seenOnly is set to true, only nodes that have been contacted
|
||||
## previously successfully will be selected.
|
||||
## When seenThreshold is set, only nodes that have been contacted
|
||||
## previously successfully and were seen enough recently will be selected.
|
||||
result = newSeqOfCap[Node](k * 2)
|
||||
block addNodes:
|
||||
for bucket in r.bucketsByDistanceTo(id):
|
||||
for n in r.nodesByDistanceTo(bucket, id):
|
||||
# Only provide actively seen nodes when `seenOnly` set.
|
||||
if not seenOnly or n.seen:
|
||||
# Avoid nodes with 'seen' value below threshold
|
||||
if n.seen >= seenThreshold:
|
||||
result.add(n)
|
||||
if result.len == k * 2:
|
||||
break addNodes
|
||||
@ -480,22 +497,22 @@ proc neighbours*(r: RoutingTable, id: NodeId, k: int = BUCKET_SIZE,
|
||||
result.setLen(k)
|
||||
|
||||
proc neighboursAtDistance*(r: RoutingTable, distance: uint16,
|
||||
k: int = BUCKET_SIZE, seenOnly = false): seq[Node] =
|
||||
k: int = BUCKET_SIZE, seenThreshold = 0.0): seq[Node] =
|
||||
## Return up to k neighbours at given logarithmic distance.
|
||||
result = r.neighbours(r.idAtDistance(r.localNode.id, distance), k, seenOnly)
|
||||
result = r.neighbours(r.idAtDistance(r.localNode.id, distance), k, seenThreshold)
|
||||
# This is a bit silly, first getting closest nodes then to only keep the ones
|
||||
# that are exactly the requested distance.
|
||||
keepIf(result, proc(n: Node): bool = r.logDistance(n.id, r.localNode.id) == distance)
|
||||
|
||||
proc neighboursAtDistances*(r: RoutingTable, distances: seq[uint16],
|
||||
k: int = BUCKET_SIZE, seenOnly = false): seq[Node] =
|
||||
k: int = BUCKET_SIZE, seenThreshold = 0.0): seq[Node] =
|
||||
## Return up to k neighbours at given logarithmic distances.
|
||||
# TODO: This will currently return nodes with neighbouring distances on the
|
||||
# first one prioritize. It might end up not including all the node distances
|
||||
# requested. Need to rework the logic here and not use the neighbours call.
|
||||
if distances.len > 0:
|
||||
result = r.neighbours(r.idAtDistance(r.localNode.id, distances[0]), k,
|
||||
seenOnly)
|
||||
seenThreshold)
|
||||
# This is a bit silly, first getting closest nodes then to only keep the ones
|
||||
# that are exactly the requested distances.
|
||||
keepIf(result, proc(n: Node): bool =
|
||||
@ -507,23 +524,30 @@ proc len*(r: RoutingTable): int =
|
||||
proc moveRight[T](arr: var openArray[T], a, b: int) =
|
||||
## In `arr` move elements in range [a, b] right by 1.
|
||||
var t: T
|
||||
shallowCopy(t, arr[b + 1])
|
||||
for i in countdown(b, a):
|
||||
shallowCopy(arr[i + 1], arr[i])
|
||||
shallowCopy(arr[a], t)
|
||||
when declared(shallowCopy):
|
||||
shallowCopy(t, arr[b + 1])
|
||||
for i in countdown(b, a):
|
||||
shallowCopy(arr[i + 1], arr[i])
|
||||
shallowCopy(arr[a], t)
|
||||
else:
|
||||
t = move arr[b + 1]
|
||||
for i in countdown(b, a):
|
||||
arr[i + 1] = move arr[i]
|
||||
arr[a] = move t
|
||||
|
||||
proc setJustSeen*(r: RoutingTable, n: Node) =
|
||||
## Move `n` to the head (most recently seen) of its bucket.
|
||||
proc setJustSeen*(r: RoutingTable, n: Node, seen = true) =
|
||||
## If seen, move `n` to the head (most recently seen) of its bucket.
|
||||
## If `n` is not in the routing table, do nothing.
|
||||
let b = r.bucketForNode(n.id)
|
||||
let idx = b.nodes.find(n)
|
||||
if idx >= 0:
|
||||
if idx != 0:
|
||||
b.nodes.moveRight(0, idx - 1)
|
||||
if seen:
|
||||
let idx = b.nodes.find(n)
|
||||
if idx >= 0:
|
||||
if idx != 0:
|
||||
b.nodes.moveRight(0, idx - 1)
|
||||
|
||||
if not n.seen:
|
||||
b.nodes[0].seen = true
|
||||
routing_table_nodes.inc(labelValues = ["seen"])
|
||||
if not alreadySeen(n): # first time seeing the node
|
||||
dht_routing_table_nodes.inc(labelValues = ["seen"])
|
||||
n.registerSeen(seen)
|
||||
|
||||
proc nodeToRevalidate*(r: RoutingTable): Node =
|
||||
## Return a node to revalidate. The least recently seen node from a random
|
||||
@ -537,7 +561,7 @@ proc nodeToRevalidate*(r: RoutingTable): Node =
|
||||
return b.nodes[^1]
|
||||
|
||||
proc randomNodes*(r: RoutingTable, maxAmount: int,
|
||||
pred: proc(x: Node): bool {.gcsafe, noSideEffect.} = nil): seq[Node] =
|
||||
pred: proc(x: Node): bool {.gcsafe, noSideEffect, raises: [].} = nil): seq[Node] =
|
||||
## Get a `maxAmount` of random nodes from the routing table with the `pred`
|
||||
## predicate function applied as filter on the nodes selected.
|
||||
var maxAmount = maxAmount
|
||||
@ -560,7 +584,8 @@ proc randomNodes*(r: RoutingTable, maxAmount: int,
|
||||
# while it will take less total time compared to e.g. an (async)
|
||||
# randomLookup, the time might be wasted as all nodes are possibly seen
|
||||
# already.
|
||||
while len(seen) < maxAmount:
|
||||
# We check against the number of nodes to avoid an infinite loop in case of a filter.
|
||||
while len(result) < maxAmount and len(seen) < sz:
|
||||
let bucket = r.rng[].sample(r.buckets)
|
||||
if bucket.nodes.len != 0:
|
||||
let node = r.rng[].sample(bucket.nodes)
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
# codex-dht - Codex DHT
|
||||
# logos-storage-dht - Logos Storage DHT
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
@ -9,11 +9,18 @@
|
||||
## https://github.com/ethereum/devp2p/blob/master/discv5/discv5-theory.md#session-cache
|
||||
##
|
||||
|
||||
{.push raises: [Defect].}
|
||||
## A session stores encryption and decryption keys for P2P encryption.
|
||||
## Since key exchange can be started both ways, and these might not get finalised with
|
||||
## UDP transport, we can't be sure what encryption key will be used by the other side:
|
||||
## - the one derived in the key-exchange started by us,
|
||||
## - the one derived in the key-exchange started by the other node.
|
||||
## To alleviate this issue, we store two decryption keys in each session.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/options,
|
||||
stint, stew/endians2, stew/shims/net,
|
||||
std/[net, options],
|
||||
stint, stew/endians2,
|
||||
node, lru
|
||||
|
||||
export lru
|
||||
@ -27,12 +34,12 @@ const
|
||||
type
|
||||
AesKey* = array[aesKeySize, byte]
|
||||
SessionKey* = array[keySize, byte]
|
||||
SessionValue* = array[sizeof(AesKey) + sizeof(AesKey), byte]
|
||||
SessionValue* = array[3 * sizeof(AesKey), byte]
|
||||
Sessions* = LRUCache[SessionKey, SessionValue]
|
||||
|
||||
func makeKey(id: NodeId, address: Address): SessionKey =
|
||||
var pos = 0
|
||||
result[pos ..< pos+sizeof(id)] = toBytes(id)
|
||||
result[pos ..< pos+sizeof(id)] = toBytesBE(id)
|
||||
pos.inc(sizeof(id))
|
||||
case address.ip.family
|
||||
of IpAddressFamily.IpV4:
|
||||
@ -40,20 +47,39 @@ func makeKey(id: NodeId, address: Address): SessionKey =
|
||||
of IpAddressFamily.IpV6:
|
||||
result[pos ..< pos+sizeof(address.ip.address_v6)] = address.ip.address_v6
|
||||
pos.inc(sizeof(address.ip.address_v6))
|
||||
result[pos ..< pos+sizeof(address.port)] = toBytes(address.port.uint16)
|
||||
result[pos ..< pos+sizeof(address.port)] = toBytesBE(address.port.uint16)
|
||||
|
||||
func swapr*(s: var Sessions, id: NodeId, address: Address) =
|
||||
var value: array[3 * sizeof(AesKey), byte]
|
||||
let
|
||||
key = makeKey(id, address)
|
||||
entry = s.get(key)
|
||||
if entry.isSome():
|
||||
let val = entry.get()
|
||||
copyMem(addr value[0], unsafeAddr val[16], sizeof(AesKey))
|
||||
copyMem(addr value[16], unsafeAddr val[0], sizeof(AesKey))
|
||||
copyMem(addr value[32], unsafeAddr val[32], sizeof(AesKey))
|
||||
s.put(key, value)
|
||||
|
||||
func store*(s: var Sessions, id: NodeId, address: Address, r, w: AesKey) =
|
||||
var value: array[sizeof(r) + sizeof(w), byte]
|
||||
value[0 .. 15] = r
|
||||
value[16 .. ^1] = w
|
||||
s.put(makeKey(id, address), value)
|
||||
var value: array[3 * sizeof(AesKey), byte]
|
||||
let
|
||||
key = makeKey(id, address)
|
||||
entry = s.get(key)
|
||||
if entry.isSome():
|
||||
let val = entry.get()
|
||||
copyMem(addr value[0], unsafeAddr val[16], sizeof(r))
|
||||
value[16 .. 31] = r
|
||||
value[32 .. ^1] = w
|
||||
s.put(key, value)
|
||||
|
||||
func load*(s: var Sessions, id: NodeId, address: Address, r, w: var AesKey): bool =
|
||||
func load*(s: var Sessions, id: NodeId, address: Address, r1, r2, w: var AesKey): bool =
|
||||
let res = s.get(makeKey(id, address))
|
||||
if res.isSome():
|
||||
let val = res.get()
|
||||
copyMem(addr r[0], unsafeAddr val[0], sizeof(r))
|
||||
copyMem(addr w[0], unsafeAddr val[sizeof(r)], sizeof(w))
|
||||
copyMem(addr r1[0], unsafeAddr val[0], sizeof(r1))
|
||||
copyMem(addr r2[0], unsafeAddr val[sizeof(r1)], sizeof(r2))
|
||||
copyMem(addr w[0], unsafeAddr val[sizeof(r1) + sizeof(r2)], sizeof(w))
|
||||
return true
|
||||
else:
|
||||
return false
|
||||
|
||||
@ -6,10 +6,10 @@
|
||||
#
|
||||
import
|
||||
chronicles,
|
||||
std/[options, strutils, sugar],
|
||||
pkg/stew/[results, byteutils, arrayops],
|
||||
results,
|
||||
std/[net, options, strutils, sugar],
|
||||
pkg/stew/[byteutils, arrayops],
|
||||
stew/endians2,
|
||||
stew/shims/net,
|
||||
stew/base64,
|
||||
libp2p/crypto/crypto,
|
||||
libp2p/crypto/secp,
|
||||
@ -58,7 +58,7 @@ proc incSeqNo*(
|
||||
proc update*(
|
||||
r: var SignedPeerRecord,
|
||||
pk: crypto.PrivateKey,
|
||||
ip: Option[ValidIpAddress],
|
||||
ip: Option[IpAddress],
|
||||
tcpPort, udpPort: Option[Port] = none[Port]()):
|
||||
RecordResult[void] =
|
||||
## Update a `SignedPeerRecord` with given ip address, tcp port, udp port and optional
|
||||
@ -97,9 +97,8 @@ proc update*(
|
||||
if udpPort.isNone and tcpPort.isNone:
|
||||
return err "No existing address in SignedPeerRecord with no port provided"
|
||||
|
||||
let ipAddr = try: ValidIpAddress.init(ip.get)
|
||||
except ValueError as e:
|
||||
return err ("Existing address contains invalid address: " & $e.msg).cstring
|
||||
let ipAddr = ip.get
|
||||
|
||||
if tcpPort.isSome:
|
||||
transProto = IpTransportProtocol.tcpProtocol
|
||||
transProtoPort = tcpPort.get
|
||||
@ -123,9 +122,13 @@ proc update*(
|
||||
.mapErr((e: string) => e.cstring)
|
||||
existingIp =
|
||||
if existingNetProtoFam == MultiCodec.codec("ip6"):
|
||||
ipv6 array[16, byte].initCopyFrom(existingNetProtoAddr)
|
||||
IpAddress(
|
||||
family: IPv6, address_v6: array[16, byte].initCopyFrom(existingNetProtoAddr)
|
||||
)
|
||||
else:
|
||||
ipv4 array[4, byte].initCopyFrom(existingNetProtoAddr)
|
||||
IpAddress(
|
||||
family: IPv4, address_v4: array[4, byte].initCopyFrom(existingNetProtoAddr)
|
||||
)
|
||||
|
||||
ipAddr = ip.get(existingIp)
|
||||
|
||||
@ -223,7 +226,7 @@ proc init*(
|
||||
T: type SignedPeerRecord,
|
||||
seqNum: uint64,
|
||||
pk: PrivateKey,
|
||||
ip: Option[ValidIpAddress],
|
||||
ip: Option[IpAddress],
|
||||
tcpPort, udpPort: Option[Port]):
|
||||
RecordResult[T] =
|
||||
## Initialize a `SignedPeerRecord` with given sequence number, private key, optional
|
||||
@ -238,9 +241,7 @@ proc init*(
|
||||
tcpPort, udpPort
|
||||
|
||||
var
|
||||
ipAddr = try: ValidIpAddress.init("127.0.0.1")
|
||||
except ValueError as e:
|
||||
return err ("Existing address contains invalid address: " & $e.msg).cstring
|
||||
ipAddr = static parseIpAddress("127.0.0.1")
|
||||
proto: IpTransportProtocol
|
||||
protoPort: Port
|
||||
|
||||
|
||||
@ -6,26 +6,40 @@
|
||||
|
||||
# Everything below the handling of ordinary messages
|
||||
import
|
||||
std/[tables, options],
|
||||
std/[net, tables, options, sets],
|
||||
bearssl/rand,
|
||||
chronos,
|
||||
chronicles,
|
||||
metrics,
|
||||
libp2p/crypto/crypto,
|
||||
stew/shims/net,
|
||||
"."/[node, encoding, sessions]
|
||||
|
||||
const
|
||||
handshakeTimeout* = 2.seconds ## timeout for the reply on the
|
||||
handshakeTimeout* = 500.milliseconds ## timeout for the reply on the
|
||||
## whoareyou message
|
||||
responseTimeout* = 4.seconds ## timeout for the response of a request-response
|
||||
responseTimeout* = 1.seconds ## timeout for the response of a request-response
|
||||
## call
|
||||
|
||||
logScope:
|
||||
topics = "discv5 transport"
|
||||
|
||||
declarePublicCounter dht_transport_tx_packets,
|
||||
"Discovery transport packets sent", labels = ["state"]
|
||||
declarePublicCounter dht_transport_tx_bytes,
|
||||
"Discovery transport bytes sent", labels = ["state"]
|
||||
declarePublicCounter dht_transport_rx_packets,
|
||||
"Discovery transport packets received", labels = ["state"]
|
||||
declarePublicCounter dht_transport_rx_bytes,
|
||||
"Discovery transport bytes received", labels = ["state"]
|
||||
|
||||
type
|
||||
Transport* [Client] = ref object
|
||||
client: Client
|
||||
bindAddress: Address ## UDP binding address
|
||||
transp: DatagramTransport
|
||||
pendingRequests: Table[AESGCMNonce, PendingRequest]
|
||||
pendingRequests: Table[AESGCMNonce, (PendingRequest, Moment)]
|
||||
keyexchangeInProgress: HashSet[NodeId]
|
||||
pendingRequestsByNode: Table[NodeId, seq[seq[byte]]]
|
||||
codec*: Codec
|
||||
rng: ref HmacDrbgContext
|
||||
|
||||
@ -33,29 +47,36 @@ type
|
||||
node: Node
|
||||
message: seq[byte]
|
||||
|
||||
proc sendToA(t: Transport, a: Address, data: seq[byte]) =
|
||||
proc sendToA(t: Transport, a: Address, msg: seq[byte]) =
|
||||
trace "Send packet", myport = t.bindAddress.port, address = a
|
||||
let ta = initTAddress(a.ip, a.port)
|
||||
let f = t.transp.sendTo(ta, data)
|
||||
f.callback = proc(data: pointer) {.gcsafe.} =
|
||||
if f.failed:
|
||||
# Could be `TransportUseClosedError` in case the transport is already
|
||||
# closed, or could be `TransportOsError` in case of a socket error.
|
||||
# In the latter case this would probably mostly occur if the network
|
||||
# interface underneath gets disconnected or similar.
|
||||
# TODO: Should this kind of error be propagated upwards? Probably, but
|
||||
# it should not stop the process as that would reset the discovery
|
||||
# progress in case there is even a small window of no connection.
|
||||
# One case that needs this error available upwards is when revalidating
|
||||
# nodes. Else the revalidation might end up clearing the routing tabl
|
||||
# because of ping failures due to own network connection failure.
|
||||
warn "Discovery send failed", msg = f.readError.msg
|
||||
let f = t.transp.sendTo(ta, msg)
|
||||
f.addCallback(
|
||||
proc(data: pointer) =
|
||||
if f.failed:
|
||||
# Could be `TransportUseClosedError` in case the transport is already
|
||||
# closed, or could be `TransportOsError` in case of a socket error.
|
||||
# In the latter case this would probably mostly occur if the network
|
||||
# interface underneath gets disconnected or similar.
|
||||
# TODO: Should this kind of error be propagated upwards? Probably, but
|
||||
# it should not stop the process as that would reset the discovery
|
||||
# progress in case there is even a small window of no connection.
|
||||
# One case that needs this error available upwards is when revalidating
|
||||
# nodes. Else the revalidation might end up clearing the routing tabl
|
||||
# because of ping failures due to own network connection failure.
|
||||
warn "Discovery send failed", msg = f.readError.msg
|
||||
dht_transport_tx_packets.inc(labelValues = ["failed"])
|
||||
dht_transport_tx_bytes.inc(msg.len.int64, labelValues = ["failed"])
|
||||
)
|
||||
dht_transport_tx_packets.inc()
|
||||
dht_transport_tx_bytes.inc(msg.len.int64)
|
||||
|
||||
proc send(t: Transport, n: Node, data: seq[byte]) =
|
||||
doAssert(n.address.isSome())
|
||||
t.sendToA(n.address.get(), data)
|
||||
|
||||
proc sendMessage*(t: Transport, toId: NodeId, toAddr: Address, message: seq[byte]) =
|
||||
let (data, _) = encodeMessagePacket(t.rng[], t.codec, toId, toAddr,
|
||||
let (data, _, _) = encodeMessagePacket(t.rng[], t.codec, toId, toAddr,
|
||||
message)
|
||||
t.sendToA(toAddr, data)
|
||||
|
||||
@ -65,7 +86,7 @@ proc sendMessage*(t: Transport, toId: NodeId, toAddr: Address, message: seq[byte
|
||||
proc registerRequest(t: Transport, n: Node, message: seq[byte],
|
||||
nonce: AESGCMNonce) =
|
||||
let request = PendingRequest(node: n, message: message)
|
||||
if not t.pendingRequests.hasKeyOrPut(nonce, request):
|
||||
if not t.pendingRequests.hasKeyOrPut(nonce, (request, Moment.now())):
|
||||
sleepAsync(responseTimeout).addCallback() do(data: pointer):
|
||||
t.pendingRequests.del(nonce)
|
||||
|
||||
@ -73,11 +94,30 @@ proc registerRequest(t: Transport, n: Node, message: seq[byte],
|
||||
proc sendMessage*(t: Transport, toNode: Node, message: seq[byte]) =
|
||||
doAssert(toNode.address.isSome())
|
||||
let address = toNode.address.get()
|
||||
let (data, nonce) = encodeMessagePacket(t.rng[], t.codec,
|
||||
let (data, nonce, haskey) = encodeMessagePacket(t.rng[], t.codec,
|
||||
toNode.id, address, message)
|
||||
|
||||
t.registerRequest(toNode, message, nonce)
|
||||
t.send(toNode, data)
|
||||
if haskey:
|
||||
trace "Send message: has key", myport = t.bindAddress.port , dstId = toNode
|
||||
t.registerRequest(toNode, message, nonce)
|
||||
t.send(toNode, data)
|
||||
else:
|
||||
# we don't have an encryption key for this target, so we should initiate keyexchange
|
||||
if not (toNode.id in t.keyexchangeInProgress):
|
||||
trace "Send message: send random to trigger Whoareyou", myport = t.bindAddress.port , dstId = toNode
|
||||
t.registerRequest(toNode, message, nonce)
|
||||
t.send(toNode, data)
|
||||
t.keyexchangeInProgress.incl(toNode.id)
|
||||
trace "keyexchangeInProgress added", myport = t.bindAddress.port , dstId = toNode
|
||||
sleepAsync(handshakeTimeout).addCallback() do(data: pointer):
|
||||
t.keyexchangeInProgress.excl(toNode.id)
|
||||
trace "keyexchangeInProgress removed (timeout)", myport = t.bindAddress.port , dstId = toNode
|
||||
else:
|
||||
# delay sending this message until whoareyou is received and handshake is sent
|
||||
# have to reencode once keys are clear
|
||||
t.pendingRequestsByNode.mgetOrPut(toNode.id, newSeq[seq[byte]]()).add(message)
|
||||
trace "Send message: Node with this id already has ongoing keyexchage, delaying packet",
|
||||
myport = t.bindAddress.port , dstId = toNode, qlen=t.pendingRequestsByNode[toNode.id].len
|
||||
|
||||
proc sendWhoareyou(t: Transport, toId: NodeId, a: Address,
|
||||
requestNonce: AESGCMNonce, node: Option[Node]) =
|
||||
@ -92,16 +132,33 @@ proc sendWhoareyou(t: Transport, toId: NodeId, a: Address,
|
||||
let data = encodeWhoareyouPacket(t.rng[], t.codec, toId, a, requestNonce,
|
||||
recordSeq, pubkey)
|
||||
sleepAsync(handshakeTimeout).addCallback() do(data: pointer):
|
||||
# TODO: should we still provide cancellation in case handshake completes
|
||||
# correctly?
|
||||
t.codec.handshakes.del(key)
|
||||
# handshake key is popped in decodeHandshakePacket. if not yet popped by timeout:
|
||||
if t.codec.hasHandshake(key):
|
||||
debug "Handshake timeout", myport = t.bindAddress.port , dstId = toId, address = a
|
||||
t.codec.handshakes.del(key)
|
||||
|
||||
trace "Send whoareyou", dstId = toId, address = a
|
||||
t.sendToA(a, data)
|
||||
else:
|
||||
debug "Node with this id already has ongoing handshake, ignoring packet"
|
||||
# TODO: is this reasonable to drop it? Should we allow a mini-queue here?
|
||||
# Queue should be on sender side, as this is random encoded!
|
||||
debug "Node with this id already has ongoing handshake, queuing packet", myport = t.bindAddress.port , dstId = toId, address = a
|
||||
|
||||
proc sendPending(t:Transport, toNode: Node):
|
||||
Future[void] {.async.} =
|
||||
if t.pendingRequestsByNode.hasKey(toNode.id):
|
||||
trace "Found pending request", myport = t.bindAddress.port, src = toNode, len = t.pendingRequestsByNode[toNode.id].len
|
||||
for message in t.pendingRequestsByNode[toNode.id]:
|
||||
trace "Sending pending packet", myport = t.bindAddress.port, dstId = toNode.id
|
||||
let address = toNode.address.get()
|
||||
let (data, nonce, haskey) = encodeMessagePacket(t.rng[], t.codec, toNode.id, address, message)
|
||||
t.registerRequest(toNode, message, nonce)
|
||||
t.send(toNode, data)
|
||||
t.pendingRequestsByNode.del(toNode.id)
|
||||
|
||||
proc receive*(t: Transport, a: Address, packet: openArray[byte]) =
|
||||
dht_transport_rx_packets.inc()
|
||||
dht_transport_rx_bytes.inc(packet.len.int64)
|
||||
let decoded = t.codec.decodePacket(a, packet)
|
||||
if decoded.isOk:
|
||||
let packet = decoded[]
|
||||
@ -109,20 +166,33 @@ proc receive*(t: Transport, a: Address, packet: openArray[byte]) =
|
||||
of OrdinaryMessage:
|
||||
if packet.messageOpt.isSome():
|
||||
let message = packet.messageOpt.get()
|
||||
trace "Received message packet", srcId = packet.srcId, address = a,
|
||||
trace "Received message packet", myport = t.bindAddress.port, srcId = packet.srcId, address = a,
|
||||
kind = message.kind, p = $packet
|
||||
t.client.handleMessage(packet.srcId, a, message)
|
||||
else:
|
||||
trace "Not decryptable message packet received",
|
||||
trace "Not decryptable message packet received", myport = t.bindAddress.port,
|
||||
srcId = packet.srcId, address = a
|
||||
# If we already have a keyexchange in progress, we have a case of simultaneous cross-connect.
|
||||
# We could try to decide here which should go on, but since we are on top of UDP, a more robust
|
||||
# choice is to answer here and resolve conflicts in the next stage (reception of Whoareyou), or
|
||||
# even later (reception of Handshake).
|
||||
if packet.srcId in t.keyexchangeInProgress:
|
||||
trace "cross-connect detected, still sending Whoareyou"
|
||||
t.sendWhoareyou(packet.srcId, a, packet.requestNonce,
|
||||
t.client.getNode(packet.srcId))
|
||||
|
||||
of Flag.Whoareyou:
|
||||
trace "Received whoareyou packet", address = a
|
||||
var pr: PendingRequest
|
||||
if t.pendingRequests.take(packet.whoareyou.requestNonce, pr):
|
||||
let toNode = pr.node
|
||||
trace "Received whoareyou packet", myport = t.bindAddress.port, address = a
|
||||
var
|
||||
prt: (PendingRequest, Moment)
|
||||
if t.pendingRequests.take(packet.whoareyou.requestNonce, prt):
|
||||
let
|
||||
pr = prt[0]
|
||||
startTime = prt[1]
|
||||
toNode = pr.node
|
||||
rtt = Moment.now() - startTime
|
||||
# trace "whoareyou RTT:", rtt, node = toNode
|
||||
toNode.registerRtt(rtt)
|
||||
# This is a node we previously contacted and thus must have an address.
|
||||
doAssert(toNode.address.isSome())
|
||||
let address = toNode.address.get()
|
||||
@ -136,12 +206,17 @@ proc receive*(t: Transport, a: Address, packet: openArray[byte]) =
|
||||
toNode.pubkey
|
||||
).expect("Valid handshake packet to encode")
|
||||
|
||||
trace "Send handshake message packet", dstId = toNode.id, address
|
||||
trace "Send handshake message packet", myport = t.bindAddress.port, dstId = toNode.id, address
|
||||
t.send(toNode, data)
|
||||
# keyexchange ready, we can send queued packets
|
||||
t.keyexchangeInProgress.excl(toNode.id)
|
||||
trace "keyexchangeInProgress removed (finished)", myport = t.bindAddress.port, dstId = toNode.id, address
|
||||
discard t.sendPending(toNode)
|
||||
|
||||
else:
|
||||
debug "Timed out or unrequested whoareyou packet", address = a
|
||||
of HandshakeMessage:
|
||||
trace "Received handshake message packet", srcId = packet.srcIdHs,
|
||||
trace "Received handshake message packet", myport = t.bindAddress.port, srcId = packet.srcIdHs,
|
||||
address = a, kind = packet.message.kind
|
||||
t.client.handleMessage(packet.srcIdHs, a, packet.message)
|
||||
# For a handshake message it is possible that we received an newer SPR.
|
||||
@ -155,28 +230,35 @@ proc receive*(t: Transport, a: Address, packet: openArray[byte]) =
|
||||
if node.address.isSome() and a == node.address.get():
|
||||
# TODO: maybe here we could verify that the address matches what we were
|
||||
# sending the 'whoareyou' message to. In that case, we can set 'seen'
|
||||
node.seen = true
|
||||
# TODO: verify how this works with restrictive NAT and firewall scenarios.
|
||||
node.registerSeen()
|
||||
if t.client.addNode(node):
|
||||
trace "Added new node to routing table after handshake", node
|
||||
trace "Added new node to routing table after handshake", node, tablesize=t.client.nodesDiscovered()
|
||||
discard t.sendPending(node)
|
||||
else:
|
||||
trace "address mismatch, not adding seen flag", node, address = a, nodeAddress = node.address.get()
|
||||
else:
|
||||
trace "Packet decoding error", error = decoded.error, address = a
|
||||
dht_transport_rx_packets.inc(labelValues = ["failed_decode"])
|
||||
dht_transport_rx_bytes.inc(packet.len.int64, labelValues = ["failed_decode"])
|
||||
trace "Packet decoding error", myport = t.bindAddress.port, error = decoded.error, address = a
|
||||
|
||||
proc processClient[T](transp: DatagramTransport, raddr: TransportAddress):
|
||||
Future[void] {.async.} =
|
||||
let t = getUserData[Transport[T]](transp)
|
||||
|
||||
# TODO: should we use `peekMessage()` to avoid allocation?
|
||||
let buf = try: transp.getMessage()
|
||||
except TransportOsError as e:
|
||||
# This is likely to be local network connection issues.
|
||||
warn "Transport getMessage", exception = e.name, msg = e.msg
|
||||
return
|
||||
let buf = try:
|
||||
transp.getMessage()
|
||||
except TransportOsError as e:
|
||||
# This is likely to be local network connection issues.
|
||||
warn "Transport getMessage", exception = e.name, msg = e.msg
|
||||
return
|
||||
|
||||
let ip = try: raddr.address()
|
||||
except ValueError as e:
|
||||
error "Not a valid IpAddress", exception = e.name, msg = e.msg
|
||||
return
|
||||
let a = Address(ip: ValidIpAddress.init(ip), port: raddr.port)
|
||||
let a = Address(ip: ip, port: raddr.port)
|
||||
|
||||
t.receive(a, buf)
|
||||
|
||||
@ -209,7 +291,7 @@ proc newTransport*[T](
|
||||
|
||||
Transport[T](
|
||||
client: client,
|
||||
bindAddress: Address(ip: ValidIpAddress.init(bindIp), port: bindPort),
|
||||
bindAddress: Address(ip: bindIp, port: bindPort),
|
||||
codec: Codec(
|
||||
localNode: localNode,
|
||||
privKey: privKey,
|
||||
|
||||
@ -1,10 +1,6 @@
|
||||
|
||||
switch("define", "libp2p_pki_schemes=secp256k1")
|
||||
|
||||
include "build.nims"
|
||||
|
||||
# begin Nimble config (version 2)
|
||||
--noNimblePath
|
||||
when withDir(thisDir(), system.fileExists("nimble.paths")):
|
||||
include "nimble.paths"
|
||||
# end Nimble config
|
||||
|
||||
310
nimble.lock
310
nimble.lock
@ -1,310 +0,0 @@
|
||||
{
|
||||
"packages": {
|
||||
"nim": {
|
||||
"version": "1.6.14",
|
||||
"vcsRevision": "71ba2e7f3c5815d956b1ae0341b0743242b8fec6",
|
||||
"url": "https://github.com/nim-lang/Nim.git",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [],
|
||||
"checksums": {
|
||||
"sha1": "f9ce6fa986a4e75514fe26d4c773789b8897eb18"
|
||||
}
|
||||
},
|
||||
"zlib": {
|
||||
"version": "0.1.0",
|
||||
"vcsRevision": "f34ca261efd90f118dc1647beefd2f7a69b05d93",
|
||||
"url": "https://github.com/status-im/nim-zlib",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [
|
||||
"stew"
|
||||
],
|
||||
"checksums": {
|
||||
"sha1": "bffa9edcea9c879d827ec64a44d342dafd04ce7a"
|
||||
}
|
||||
},
|
||||
"stew": {
|
||||
"version": "0.1.0",
|
||||
"vcsRevision": "7afe7e3c070758cac1f628e4330109f3ef6fc853",
|
||||
"url": "https://github.com/status-im/nim-stew",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [
|
||||
"results"
|
||||
],
|
||||
"checksums": {
|
||||
"sha1": "00ced0b61233f6c57d50bbda170fee644ccc2934"
|
||||
}
|
||||
},
|
||||
"httputils": {
|
||||
"version": "0.3.0",
|
||||
"vcsRevision": "3b491a40c60aad9e8d3407443f46f62511e63b18",
|
||||
"url": "https://github.com/status-im/nim-http-utils",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [],
|
||||
"checksums": {
|
||||
"sha1": "1331f33585eda05d1e50385fa7871c3bf2a449d7"
|
||||
}
|
||||
},
|
||||
"chronos": {
|
||||
"version": "3.2.0",
|
||||
"vcsRevision": "0277b65be2c7a365ac13df002fba6e172be55537",
|
||||
"url": "https://github.com/status-im/nim-chronos",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [],
|
||||
"checksums": {
|
||||
"sha1": "78a41db7fb05b937196d4fa2f1e3fb4353b36a07"
|
||||
}
|
||||
},
|
||||
"upraises": {
|
||||
"version": "0.1.0",
|
||||
"vcsRevision": "ff4f8108e44fba9b35cac535ab63d3927e8fd3c2",
|
||||
"url": "https://github.com/markspanbroek/upraises",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [],
|
||||
"checksums": {
|
||||
"sha1": "a0243c8039e12d547dbb2e9c73789c16bb8bc956"
|
||||
}
|
||||
},
|
||||
"sqlite3_abi": {
|
||||
"version": "3.40.1.1",
|
||||
"vcsRevision": "362e1bd9f689ad9f5380d9d27f0705b3d4dfc7d3",
|
||||
"url": "https://github.com/arnetheduck/nim-sqlite3-abi",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [],
|
||||
"checksums": {
|
||||
"sha1": "8e91db8156a82383d9c48f53b33e48f4e93077b1"
|
||||
}
|
||||
},
|
||||
"questionable": {
|
||||
"version": "0.10.10",
|
||||
"vcsRevision": "b3cf35ac450fd42c9ea83dc084f5cba2efc55da3",
|
||||
"url": "https://github.com/codex-storage/questionable",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [],
|
||||
"checksums": {
|
||||
"sha1": "8bb23a05d7f21619010471aa009e27d3fa73d93a"
|
||||
}
|
||||
},
|
||||
"results": {
|
||||
"version": "0.4.0",
|
||||
"vcsRevision": "f3c666a272c69d70cb41e7245e7f6844797303ad",
|
||||
"url": "https://github.com/arnetheduck/nim-results",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [],
|
||||
"checksums": {
|
||||
"sha1": "51e08ca9524db98dc909fb39192272cc2b5451c7"
|
||||
}
|
||||
},
|
||||
"unittest2": {
|
||||
"version": "0.0.7",
|
||||
"vcsRevision": "b178f47527074964f76c395ad0dfc81cf118f379",
|
||||
"url": "https://github.com/status-im/nim-unittest2",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [],
|
||||
"checksums": {
|
||||
"sha1": "b6d4a5cbe28b43c166d6442ba6804aafd4abe368"
|
||||
}
|
||||
},
|
||||
"websock": {
|
||||
"version": "0.1.0",
|
||||
"vcsRevision": "2c3ae3137f3c9cb48134285bd4a47186fa51f0e8",
|
||||
"url": "https://github.com/status-im/nim-websock",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [
|
||||
"httputils",
|
||||
"stew",
|
||||
"chronos",
|
||||
"chronicles",
|
||||
"zlib"
|
||||
],
|
||||
"checksums": {
|
||||
"sha1": "6ea200c4a34315a3c8fd3e63db991546144fbd2c"
|
||||
}
|
||||
},
|
||||
"secp256k1": {
|
||||
"version": "0.6.0.3.1",
|
||||
"vcsRevision": "2acbbdcc0e63002a013fff49f015708522875832",
|
||||
"url": "https://github.com/status-im/nim-secp256k1",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [],
|
||||
"checksums": {
|
||||
"sha1": "146818431dec16ededb951f42fc36832949bcc8f"
|
||||
}
|
||||
},
|
||||
"bearssl": {
|
||||
"version": "0.2.0",
|
||||
"vcsRevision": "99fcb3405c55b27cfffbf60f5368c55da7346f23",
|
||||
"url": "https://github.com/status-im/nim-bearssl",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [],
|
||||
"checksums": {
|
||||
"sha1": "919dc7c8b6f5e774932b211574eb9c5886f29cc2"
|
||||
}
|
||||
},
|
||||
"dnsclient": {
|
||||
"version": "0.3.4",
|
||||
"vcsRevision": "23214235d4784d24aceed99bbfe153379ea557c8",
|
||||
"url": "https://github.com/ba0f3/dnsclient.nim",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [],
|
||||
"checksums": {
|
||||
"sha1": "65262c7e533ff49d6aca5539da4bc6c6ce132f40"
|
||||
}
|
||||
},
|
||||
"nimcrypto": {
|
||||
"version": "0.5.4",
|
||||
"vcsRevision": "24e006df85927f64916e60511620583b11403178",
|
||||
"url": "https://github.com/status-im/nimcrypto",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [],
|
||||
"checksums": {
|
||||
"sha1": "a4db2105de265930f1578bb7957f49fa39b10d9b"
|
||||
}
|
||||
},
|
||||
"json_serialization": {
|
||||
"version": "0.1.5",
|
||||
"vcsRevision": "bb53d49caf2a6c6cf1df365ba84af93cdcfa7aa3",
|
||||
"url": "https://github.com/status-im/nim-json-serialization",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [
|
||||
"serialization"
|
||||
],
|
||||
"checksums": {
|
||||
"sha1": "a3369eafda12e4aa57215019b4e27ac536c909ee"
|
||||
}
|
||||
},
|
||||
"testutils": {
|
||||
"version": "0.5.0",
|
||||
"vcsRevision": "b56a5953e37fc5117bd6ea6dfa18418c5e112815",
|
||||
"url": "https://github.com/status-im/nim-testutils",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [],
|
||||
"checksums": {
|
||||
"sha1": "5969947aac865bacf9487b5fb5c33e4a59463f0f"
|
||||
}
|
||||
},
|
||||
"npeg": {
|
||||
"version": "1.2.1",
|
||||
"vcsRevision": "b15a10e388b91b898c581dbbcb6a718d46b27d2f",
|
||||
"url": "https://github.com/zevv/npeg",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [],
|
||||
"checksums": {
|
||||
"sha1": "6a413c727f23ea913421753a13bc394b148f74a7"
|
||||
}
|
||||
},
|
||||
"serialization": {
|
||||
"version": "0.2.0",
|
||||
"vcsRevision": "384eb2561ee755446cff512a8e057325848b86a7",
|
||||
"url": "https://github.com/status-im/nim-serialization",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [
|
||||
"faststreams"
|
||||
],
|
||||
"checksums": {
|
||||
"sha1": "feef734e39589686712b052286ed5c947cbaa380"
|
||||
}
|
||||
},
|
||||
"faststreams": {
|
||||
"version": "0.3.0",
|
||||
"vcsRevision": "720fc5e5c8e428d9d0af618e1e27c44b42350309",
|
||||
"url": "https://github.com/status-im/nim-faststreams",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [],
|
||||
"checksums": {
|
||||
"sha1": "ab178ba25970b95d953434b5d86b4d60396ccb64"
|
||||
}
|
||||
},
|
||||
"datastore": {
|
||||
"version": "0.0.1",
|
||||
"vcsRevision": "0cde8aeb67c59fd0ac95496dc6b5e1168d6632aa",
|
||||
"url": "https://github.com/codex-storage/nim-datastore",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [
|
||||
"questionable",
|
||||
"asynctest",
|
||||
"upraises",
|
||||
"sqlite3_abi"
|
||||
],
|
||||
"checksums": {
|
||||
"sha1": "2c03bb47de97962d2a64be1ed0a8161cd9d65159"
|
||||
}
|
||||
},
|
||||
"asynctest": {
|
||||
"version": "0.4.1",
|
||||
"vcsRevision": "fe1a34caf572b05f8bdba3b650f1871af9fce31e",
|
||||
"url": "https://github.com/codex-storage/asynctest",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [],
|
||||
"checksums": {
|
||||
"sha1": "1203c9fce32d96e8ed71190aea1b314cfba915ba"
|
||||
}
|
||||
},
|
||||
"stint": {
|
||||
"version": "0.0.1",
|
||||
"vcsRevision": "86621eced1dcfb5e25903019ebcfc76ed9128ec5",
|
||||
"url": "https://github.com/status-im/nim-stint",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [],
|
||||
"checksums": {
|
||||
"sha1": "6b4a6fdd317202b7c092dc32b74a6573f81cca62"
|
||||
}
|
||||
},
|
||||
"metrics": {
|
||||
"version": "0.0.1",
|
||||
"vcsRevision": "6142e433fc8ea9b73379770a788017ac528d46ff",
|
||||
"url": "https://github.com/status-im/nim-metrics",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [],
|
||||
"checksums": {
|
||||
"sha1": "16ba266012d32d49631ca00add8e4698343758e0"
|
||||
}
|
||||
},
|
||||
"libp2p": {
|
||||
"version": "1.1.0",
|
||||
"vcsRevision": "440461b24b9e66542b34d26a0b908c17f6549d05",
|
||||
"url": "https://github.com/status-im/nim-libp2p",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [
|
||||
"metrics",
|
||||
"unittest2",
|
||||
"nimcrypto",
|
||||
"bearssl",
|
||||
"websock",
|
||||
"dnsclient",
|
||||
"secp256k1",
|
||||
"chronicles"
|
||||
],
|
||||
"checksums": {
|
||||
"sha1": "f68a38d5704eae32f196254b1e46d853273752bb"
|
||||
}
|
||||
},
|
||||
"chronicles": {
|
||||
"version": "0.10.3",
|
||||
"vcsRevision": "c9c8e58ec3f89b655a046c485f622f9021c68b61",
|
||||
"url": "https://github.com/status-im/nim-chronicles",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [
|
||||
"testutils",
|
||||
"json_serialization"
|
||||
],
|
||||
"checksums": {
|
||||
"sha1": "03bc6acab0af1f362fa3323399804e16de1d98f1"
|
||||
}
|
||||
},
|
||||
"protobuf_serialization": {
|
||||
"version": "0.3.0",
|
||||
"vcsRevision": "28214b3e40c755a9886d2ec8f261ec48fbb6bec6",
|
||||
"url": "https://github.com/status-im/nim-protobuf-serialization",
|
||||
"downloadMethod": "git",
|
||||
"dependencies": [
|
||||
"faststreams",
|
||||
"serialization",
|
||||
"npeg"
|
||||
],
|
||||
"checksums": {
|
||||
"sha1": "86e064ec560c1241453d2cd8f82b36b61a8a90e0"
|
||||
}
|
||||
}
|
||||
},
|
||||
"version": 1
|
||||
}
|
||||
@ -1,16 +1,32 @@
|
||||
import std / [os, strutils, sequtils]
|
||||
|
||||
task testAll, "Run DHT tests":
|
||||
exec "nim c -r tests/testAll.nim"
|
||||
exec "nim c -r test.nim"
|
||||
rmFile "./test"
|
||||
|
||||
task compileParallelTests, "Compile parallel tests":
|
||||
exec "nim c --hints:off --verbosity:0 dht/test_providers.nim"
|
||||
exec "nim c --hints:off --verbosity:0 dht/test_providermngr.nim"
|
||||
exec "nim c --hints:off --verbosity:0 discv5/test_discoveryv5.nim"
|
||||
exec "nim c --hints:off --verbosity:0 discv5/test_discoveryv5_encoding.nim"
|
||||
|
||||
task test, "Run DHT tests":
|
||||
exec "nim c -r -d:testsAll --verbosity:0 tests/testAllParallel.nim"
|
||||
# compile with trace logging to make sure it doesn't crash
|
||||
exec "nim c -d:testsAll -d:chronicles_enabled=on -d:chronicles_log_level=TRACE test.nim"
|
||||
rmFile "./test"
|
||||
compileParallelTestsTask()
|
||||
exec "nim c -r -d:testsAll --verbosity:0 testAllParallel.nim"
|
||||
rmFile "./testAllParallel"
|
||||
|
||||
task testPart1, "Run DHT tests A":
|
||||
exec "nim c -r -d:testsPart1 tests/testAllParallel.nim"
|
||||
compileParallelTestsTask()
|
||||
exec "nim c -r -d:testsPart1 testAllParallel.nim"
|
||||
rmFile "./testAllParallel"
|
||||
|
||||
task testPart2, "Run DHT tests B":
|
||||
exec "nim c -r -d:testsPart2 tests/testAllParallel.nim"
|
||||
compileParallelTestsTask()
|
||||
exec "nim c -r -d:testsPart2 testAllParallel.nim"
|
||||
rmFile "./testAllParallel"
|
||||
|
||||
task coverage, "generates code coverage report":
|
||||
var (output, exitCode) = gorgeEx("which lcov")
|
||||
@ -43,7 +59,7 @@ task coverage, "generates code coverage report":
|
||||
if f.endswith(".nim"): nimSrcs.add " " & f.absolutePath.quoteShell()
|
||||
|
||||
echo "======== Running Tests ======== "
|
||||
exec("nim c -r tests/coverage.nim")
|
||||
exec("nim c -r coverage.nim")
|
||||
exec("rm nimcache/*.c")
|
||||
rmDir("coverage"); mkDir("coverage")
|
||||
echo " ======== Running LCOV ======== "
|
||||
@ -1,2 +0,0 @@
|
||||
|
||||
include ./testAll
|
||||
@ -1,15 +0,0 @@
|
||||
switch("define", "testsAll")
|
||||
|
||||
switch("debugger", "native")
|
||||
switch("lineDir", "on")
|
||||
switch("define", "debug")
|
||||
# switch("opt", "none")
|
||||
switch("verbosity", "0")
|
||||
switch("hints", "off")
|
||||
switch("warnings", "off")
|
||||
switch("define", "chronicles_log_level=INFO")
|
||||
switch("nimcache", "nimcache")
|
||||
switch("passC", "-fprofile-arcs")
|
||||
switch("passC", "-ftest-coverage")
|
||||
switch("passL", "-fprofile-arcs")
|
||||
switch("passL", "-ftest-coverage")
|
||||
@ -1,17 +1,14 @@
|
||||
import
|
||||
std/net,
|
||||
bearssl/rand,
|
||||
chronos,
|
||||
libp2p/crypto/[crypto, secp],
|
||||
libp2p/multiaddress,
|
||||
codexdht/discv5/[node, routing_table, spr],
|
||||
codexdht/discv5/crypto as dhtcrypto,
|
||||
codexdht/discv5/protocol as discv5_protocol,
|
||||
stew/shims/net
|
||||
|
||||
export net
|
||||
codexdht/discv5/protocol as discv5_protocol
|
||||
|
||||
proc localAddress*(port: int): Address =
|
||||
Address(ip: ValidIpAddress.init("127.0.0.1"), port: Port(port))
|
||||
Address(ip: IPv4_loopback(), port: Port(port))
|
||||
|
||||
proc example*(T: type PrivateKey, rng: ref HmacDrbgContext): PrivateKey =
|
||||
PrivateKey.random(PKScheme.Secp256k1, rng[]).expect("Valid rng for private key")
|
||||
@ -54,7 +51,7 @@ proc nodeIdInNodes*(id: NodeId, nodes: openArray[Node]): bool =
|
||||
if id == n.id: return true
|
||||
|
||||
proc generateNode*(privKey: PrivateKey, port: int,
|
||||
ip: ValidIpAddress = ValidIpAddress.init("127.0.0.1")): Node =
|
||||
ip: IpAddress = parseIpAddress("127.0.0.1")): Node =
|
||||
|
||||
let
|
||||
port = Port(port)
|
||||
@ -72,7 +69,7 @@ proc generateNRandomNodes*(rng: ref HmacDrbgContext, n: int): seq[Node] =
|
||||
res
|
||||
|
||||
proc nodeAndPrivKeyAtDistance*(n: Node, rng: var HmacDrbgContext, d: uint32,
|
||||
ip: ValidIpAddress = ValidIpAddress.init("127.0.0.1")): (Node, PrivateKey) =
|
||||
ip: IpAddress = parseIpAddress("127.0.0.1")): (Node, PrivateKey) =
|
||||
while true:
|
||||
let
|
||||
privKey = PrivateKey.random(rng).expect("Valid rng for private key")
|
||||
@ -81,37 +78,37 @@ proc nodeAndPrivKeyAtDistance*(n: Node, rng: var HmacDrbgContext, d: uint32,
|
||||
return (node, privKey)
|
||||
|
||||
proc nodeAtDistance*(n: Node, rng: var HmacDrbgContext, d: uint32,
|
||||
ip: ValidIpAddress = ValidIpAddress.init("127.0.0.1")): Node =
|
||||
ip: IpAddress = parseIpAddress("127.0.0.1")): Node =
|
||||
let (node, _) = n.nodeAndPrivKeyAtDistance(rng, d, ip)
|
||||
node
|
||||
|
||||
proc nodesAtDistance*(
|
||||
n: Node, rng: var HmacDrbgContext, d: uint32, amount: int,
|
||||
ip: ValidIpAddress = ValidIpAddress.init("127.0.0.1")): seq[Node] =
|
||||
ip: IpAddress = parseIpAddress("127.0.0.1")): seq[Node] =
|
||||
for i in 0..<amount:
|
||||
result.add(nodeAtDistance(n, rng, d, ip))
|
||||
|
||||
proc nodesAtDistanceUniqueIp*(
|
||||
n: Node, rng: var HmacDrbgContext, d: uint32, amount: int,
|
||||
ip: ValidIpAddress = ValidIpAddress.init("127.0.0.1")): seq[Node] =
|
||||
ip: IpAddress = parseIpAddress("127.0.0.1")): seq[Node] =
|
||||
var ta = initTAddress(ip, Port(0))
|
||||
for i in 0..<amount:
|
||||
ta.inc()
|
||||
result.add(nodeAtDistance(n, rng, d, ValidIpAddress.init(ta.address())))
|
||||
result.add(nodeAtDistance(n, rng, d, ta.address()))
|
||||
|
||||
proc addSeenNode*(d: discv5_protocol.Protocol, n: Node): bool =
|
||||
# Add it as a seen node, warning: for testing convenience only!
|
||||
n.seen = true
|
||||
n.registerSeen()
|
||||
d.addNode(n)
|
||||
|
||||
func udpExample*(_: type MultiAddress): MultiAddress =
|
||||
## creates a new udp multiaddress on a random port
|
||||
Multiaddress.init("/ip4/0.0.0.0/udp/0")
|
||||
## creates a new udp MultiAddress on a random port
|
||||
MultiAddress.init("/ip4/0.0.0.0/udp/0")
|
||||
|
||||
func udpExamples*(_: type MultiAddress, count: int): seq[MultiAddress] =
|
||||
var res: seq[MultiAddress] = @[]
|
||||
for i in 1..count:
|
||||
res.add Multiaddress.init("/ip4/0.0.0.0/udp/" & $i).get
|
||||
res.add MultiAddress.init("/ip4/0.0.0.0/udp/" & $i).get
|
||||
return res
|
||||
|
||||
proc toSignedPeerRecord*(privKey: PrivateKey) : SignedPeerRecord =
|
||||
|
||||
@ -2,10 +2,10 @@
|
||||
import std/sequtils
|
||||
|
||||
import pkg/chronos
|
||||
import pkg/asynctest
|
||||
import pkg/asynctest/chronos/unittest
|
||||
import pkg/datastore
|
||||
from pkg/libp2p import PeerId
|
||||
|
||||
import codexdht/dht
|
||||
import codexdht/private/eth/p2p/discoveryv5/spr
|
||||
import codexdht/private/eth/p2p/discoveryv5/providers
|
||||
import codexdht/discv5/node
|
||||
@ -100,10 +100,10 @@ suite "Test Providers Manager multiple":
|
||||
not (await manager.contains(nodeIds[49]))
|
||||
not (await manager.contains(nodeIds[99]))
|
||||
|
||||
test "Should remove by PeerId":
|
||||
(await (manager.remove(providers[0].data.peerId))).tryGet
|
||||
(await (manager.remove(providers[5].data.peerId))).tryGet
|
||||
(await (manager.remove(providers[9].data.peerId))).tryGet
|
||||
test "Should remove by PeerId with associated keys":
|
||||
(await (manager.remove(providers[0].data.peerId, true))).tryGet
|
||||
(await (manager.remove(providers[5].data.peerId, true))).tryGet
|
||||
(await (manager.remove(providers[9].data.peerId, true))).tryGet
|
||||
|
||||
for id in nodeIds:
|
||||
check:
|
||||
@ -116,6 +116,22 @@ suite "Test Providers Manager multiple":
|
||||
not (await manager.contains(providers[5].data.peerId))
|
||||
not (await manager.contains(providers[9].data.peerId))
|
||||
|
||||
test "Should not return keys without provider":
|
||||
for id in nodeIds:
|
||||
check:
|
||||
(await manager.get(id)).tryGet.len == 10
|
||||
|
||||
for provider in providers:
|
||||
(await (manager.remove(provider.data.peerId))).tryGet
|
||||
|
||||
for id in nodeIds:
|
||||
check:
|
||||
(await manager.get(id)).tryGet.len == 0
|
||||
|
||||
for provider in providers:
|
||||
check:
|
||||
not (await manager.contains(provider.data.peerId))
|
||||
|
||||
suite "Test providers with cache":
|
||||
let
|
||||
rng = newRng()
|
||||
@ -164,9 +180,9 @@ suite "Test providers with cache":
|
||||
not (await manager.contains(nodeIds[99]))
|
||||
|
||||
test "Should remove by PeerId":
|
||||
(await (manager.remove(providers[0].data.peerId))).tryGet
|
||||
(await (manager.remove(providers[5].data.peerId))).tryGet
|
||||
(await (manager.remove(providers[9].data.peerId))).tryGet
|
||||
(await (manager.remove(providers[0].data.peerId, true))).tryGet
|
||||
(await (manager.remove(providers[5].data.peerId, true))).tryGet
|
||||
(await (manager.remove(providers[9].data.peerId, true))).tryGet
|
||||
|
||||
for id in nodeIds:
|
||||
check:
|
||||
@ -218,6 +234,24 @@ suite "Test Provider Maintenance":
|
||||
for id in nodeIds:
|
||||
check: (await manager.get(id)).tryGet.len == 0
|
||||
|
||||
test "Should not cleanup unexpired":
|
||||
let
|
||||
unexpired = PrivateKey.example(rng).toSignedPeerRecord()
|
||||
|
||||
(await manager.add(nodeIds[0], unexpired, ttl = 1.minutes)).tryGet
|
||||
|
||||
await sleepAsync(500.millis)
|
||||
await manager.store.cleanupExpired()
|
||||
|
||||
let
|
||||
unexpiredProvs = (await manager.get(nodeIds[0])).tryGet
|
||||
|
||||
check:
|
||||
unexpiredProvs.len == 1
|
||||
await (unexpired.data.peerId in manager)
|
||||
|
||||
(await manager.remove(nodeIds[0])).tryGet
|
||||
|
||||
test "Should cleanup orphaned":
|
||||
for id in nodeIds:
|
||||
check: (await manager.get(id)).tryGet.len == 0
|
||||
|
||||
@ -10,18 +10,15 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[options, sequtils],
|
||||
asynctest,
|
||||
std/[options],
|
||||
asynctest/chronos/unittest2,
|
||||
bearssl/rand,
|
||||
chronicles,
|
||||
chronos,
|
||||
nimcrypto,
|
||||
libp2p/crypto/[crypto, secp],
|
||||
libp2p/[multiaddress, multicodec, multihash, routing_record, signed_envelope],
|
||||
codexdht/dht,
|
||||
codexdht/discv5/crypto as dhtcrypto,
|
||||
codexdht/discv5/protocol as discv5_protocol,
|
||||
stew/byteutils,
|
||||
test_helper
|
||||
|
||||
proc bootstrapNodes(
|
||||
@ -59,7 +56,7 @@ proc bootstrapNetwork(
|
||||
#waitFor bootNode.bootstrap() # immediate, since no bootnodes are defined above
|
||||
|
||||
var res = await bootstrapNodes(nodecount - 1,
|
||||
@[bootnode.localNode.record],
|
||||
@[bootNode.localNode.record],
|
||||
rng,
|
||||
delay)
|
||||
res.insert((bootNode, bootNodeKey), 0)
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
|
||||
import
|
||||
std/tables,
|
||||
chronos, chronicles, stint, asynctest, stew/shims/net,
|
||||
chronos, chronicles, stint, asynctest/chronos/unittest,
|
||||
stew/byteutils, bearssl/rand,
|
||||
libp2p/crypto/crypto,
|
||||
codexdht/discv5/[transport, spr, node, routing_table, encoding, sessions, nodes_verification],
|
||||
@ -287,7 +287,7 @@ suite "Discovery v5 Tests":
|
||||
await mainNode.closeWait()
|
||||
await testNode.closeWait()
|
||||
|
||||
proc testLookupTargets(fast: bool = false) {.async.} =
|
||||
proc testLookupTargets(fast: bool = false): Future[bool] {.async.} =
|
||||
const
|
||||
nodeCount = 17
|
||||
|
||||
@ -306,9 +306,9 @@ suite "Discovery v5 Tests":
|
||||
for t in nodes:
|
||||
if n != t:
|
||||
let pong = await n.ping(t.localNode)
|
||||
check pong.isOk()
|
||||
if pong.isErr():
|
||||
echo pong.error
|
||||
return false
|
||||
# check (await n.ping(t.localNode)).isOk()
|
||||
|
||||
for i in 1 ..< nodeCount:
|
||||
@ -318,16 +318,19 @@ suite "Discovery v5 Tests":
|
||||
let target = nodes[i]
|
||||
let discovered = await nodes[nodeCount-1].lookup(target.localNode.id, fast = fast)
|
||||
debug "Lookup result", target = target.localNode, discovered
|
||||
check discovered[0] == target.localNode
|
||||
if discovered[0] != target.localNode:
|
||||
return false
|
||||
|
||||
for node in nodes:
|
||||
await node.closeWait()
|
||||
|
||||
return true
|
||||
|
||||
test "Lookup targets":
|
||||
await testLookupTargets()
|
||||
check await testLookupTargets()
|
||||
|
||||
test "Lookup targets using traditional findNode":
|
||||
await testLookupTargets(fast = true)
|
||||
check await testLookupTargets(fast = true)
|
||||
|
||||
test "Resolve target":
|
||||
let
|
||||
@ -412,31 +415,37 @@ suite "Discovery v5 Tests":
|
||||
await mainNode.closeWait()
|
||||
await lookupNode.closeWait()
|
||||
|
||||
# We no longer support field filtering
|
||||
# test "Random nodes with spr field filter":
|
||||
# let
|
||||
# lookupNode = initDiscoveryNode(rng, PrivateKey.example(rng), localAddress(20301))
|
||||
# targetNode = generateNode(PrivateKey.example(rng))
|
||||
# otherNode = generateNode(PrivateKey.example(rng))
|
||||
# anotherNode = generateNode(PrivateKey.example(rng))
|
||||
test "Random nodes, also with filter":
|
||||
let
|
||||
lookupNode = initDiscoveryNode(rng, PrivateKey.example(rng), localAddress(20301))
|
||||
targetNode = initDiscoveryNode(rng, PrivateKey.example(rng), localAddress(20302))
|
||||
otherNode = initDiscoveryNode(rng, PrivateKey.example(rng), localAddress(20303))
|
||||
anotherNode = initDiscoveryNode(rng, PrivateKey.example(rng), localAddress(20304))
|
||||
|
||||
# check:
|
||||
# lookupNode.addNode(targetNode)
|
||||
# lookupNode.addNode(otherNode)
|
||||
# lookupNode.addNode(anotherNode)
|
||||
check:
|
||||
lookupNode.addNode(targetNode.localNode.record)
|
||||
lookupNode.addNode(otherNode.localNode.record)
|
||||
lookupNode.addNode(anotherNode.localNode.record)
|
||||
|
||||
# let discovered = lookupNode.randomNodes(10)
|
||||
# check discovered.len == 3
|
||||
# let discoveredFiltered = lookupNode.randomNodes(10,
|
||||
# ("test", @[byte 1,2,3,4]))
|
||||
# check discoveredFiltered.len == 1 and discoveredFiltered.contains(targetNode)
|
||||
let discovered = lookupNode.randomNodes(10)
|
||||
check discovered.len == 3
|
||||
let discoveredFiltered = lookupNode.randomNodes(10,
|
||||
proc(n: Node) : bool = n.address.get.port == Port(20302))
|
||||
check discoveredFiltered.len == 1 and discoveredFiltered.contains(targetNode.localNode)
|
||||
let discoveredEmpty = lookupNode.randomNodes(10,
|
||||
proc(n: Node) : bool = n.address.get.port == Port(20305))
|
||||
check discoveredEmpty.len == 0
|
||||
|
||||
await lookupNode.closeWait()
|
||||
await targetNode.closeWait()
|
||||
await otherNode.closeWait()
|
||||
await anotherNode.closeWait()
|
||||
|
||||
# await lookupNode.closeWait()
|
||||
|
||||
test "New protocol with spr":
|
||||
let
|
||||
privKey = PrivateKey.example(rng)
|
||||
ip = some(ValidIpAddress.init("127.0.0.1"))
|
||||
ip = some(parseIpAddress("127.0.0.1"))
|
||||
port = Port(20301)
|
||||
node = newProtocol(privKey, ip, some(port), some(port), bindPort = port,
|
||||
rng = rng)
|
||||
@ -531,7 +540,7 @@ suite "Discovery v5 Tests":
|
||||
let
|
||||
port = Port(9000)
|
||||
fromNoderecord = SignedPeerRecord.init(1, PrivateKey.example(rng),
|
||||
some(ValidIpAddress.init("11.12.13.14")),
|
||||
some(parseIpAddress("11.12.13.14")),
|
||||
some(port), some(port))[]
|
||||
fromNode = newNode(fromNoderecord)[]
|
||||
privKey = PrivateKey.example(rng)
|
||||
@ -543,7 +552,7 @@ suite "Discovery v5 Tests":
|
||||
block: # Duplicates
|
||||
let
|
||||
record = SignedPeerRecord.init(
|
||||
1, privKey, some(ValidIpAddress.init("12.13.14.15")),
|
||||
1, privKey, some(parseIpAddress("12.13.14.15")),
|
||||
some(port), some(port))[]
|
||||
|
||||
# Exact duplicates
|
||||
@ -553,7 +562,7 @@ suite "Discovery v5 Tests":
|
||||
|
||||
# Node id duplicates
|
||||
let recordSameId = SignedPeerRecord.init(
|
||||
1, privKey, some(ValidIpAddress.init("212.13.14.15")),
|
||||
1, privKey, some(parseIpAddress("212.13.14.15")),
|
||||
some(port), some(port))[]
|
||||
records.add(recordSameId)
|
||||
nodes = verifyNodesRecords(records, fromNode, limit, targetDistance)
|
||||
@ -562,7 +571,7 @@ suite "Discovery v5 Tests":
|
||||
block: # No address
|
||||
let
|
||||
recordNoAddress = SignedPeerRecord.init(
|
||||
1, privKey, none(ValidIpAddress), some(port), some(port))[]
|
||||
1, privKey, none(IpAddress), some(port), some(port))[]
|
||||
records = [recordNoAddress]
|
||||
test = verifyNodesRecords(records, fromNode, limit, targetDistance)
|
||||
check test.len == 0
|
||||
@ -570,7 +579,7 @@ suite "Discovery v5 Tests":
|
||||
block: # Invalid address - site local
|
||||
let
|
||||
recordInvalidAddress = SignedPeerRecord.init(
|
||||
1, privKey, some(ValidIpAddress.init("10.1.2.3")),
|
||||
1, privKey, some(parseIpAddress("10.1.2.3")),
|
||||
some(port), some(port))[]
|
||||
records = [recordInvalidAddress]
|
||||
test = verifyNodesRecords(records, fromNode, limit, targetDistance)
|
||||
@ -579,7 +588,7 @@ suite "Discovery v5 Tests":
|
||||
block: # Invalid address - loopback
|
||||
let
|
||||
recordInvalidAddress = SignedPeerRecord.init(
|
||||
1, privKey, some(ValidIpAddress.init("127.0.0.1")),
|
||||
1, privKey, some(parseIpAddress("127.0.0.1")),
|
||||
some(port), some(port))[]
|
||||
records = [recordInvalidAddress]
|
||||
test = verifyNodesRecords(records, fromNode, limit, targetDistance)
|
||||
@ -588,7 +597,7 @@ suite "Discovery v5 Tests":
|
||||
block: # Invalid distance
|
||||
let
|
||||
recordInvalidDistance = SignedPeerRecord.init(
|
||||
1, privKey, some(ValidIpAddress.init("12.13.14.15")),
|
||||
1, privKey, some(parseIpAddress("12.13.14.15")),
|
||||
some(port), some(port))[]
|
||||
records = [recordInvalidDistance]
|
||||
test = verifyNodesRecords(records, fromNode, limit, @[0'u16])
|
||||
@ -597,7 +606,7 @@ suite "Discovery v5 Tests":
|
||||
block: # Invalid distance but distance validation is disabled
|
||||
let
|
||||
recordInvalidDistance = SignedPeerRecord.init(
|
||||
1, privKey, some(ValidIpAddress.init("12.13.14.15")),
|
||||
1, privKey, some(parseIpAddress("12.13.14.15")),
|
||||
some(port), some(port))[]
|
||||
records = [recordInvalidDistance]
|
||||
test = verifyNodesRecords(records, fromNode, limit)
|
||||
@ -624,12 +633,12 @@ suite "Discovery v5 Tests":
|
||||
let
|
||||
privKey = PrivateKey.example(rng)
|
||||
enrRec = SignedPeerRecord.init(1, privKey,
|
||||
some(ValidIpAddress.init("127.0.0.1")), some(Port(9000)),
|
||||
some(parseIpAddress("127.0.0.1")), some(Port(9000)),
|
||||
some(Port(9000))).expect("Properly intialized private key")
|
||||
sendNode = newNode(enrRec).expect("Properly initialized record")
|
||||
var codec = Codec(localNode: sendNode, privKey: privKey, sessions: Sessions.init(5))
|
||||
|
||||
let (packet, _) = encodeMessagePacket(rng[], codec,
|
||||
let (packet, _, _) = encodeMessagePacket(rng[], codec,
|
||||
receiveNode.localNode.id, receiveNode.localNode.address.get(), @[])
|
||||
receiveNode.transport.receive(a, packet)
|
||||
|
||||
@ -653,13 +662,13 @@ suite "Discovery v5 Tests":
|
||||
let
|
||||
privKey = PrivateKey.example(rng)
|
||||
enrRec = SignedPeerRecord.init(1, privKey,
|
||||
some(ValidIpAddress.init("127.0.0.1")), some(Port(9000)),
|
||||
some(parseIpAddress("127.0.0.1")), some(Port(9000)),
|
||||
some(Port(9000))).expect("Properly intialized private key")
|
||||
sendNode = newNode(enrRec).expect("Properly initialized record")
|
||||
var codec = Codec(localNode: sendNode, privKey: privKey, sessions: Sessions.init(5))
|
||||
for i in 0 ..< 5:
|
||||
let a = localAddress(20303 + i)
|
||||
let (packet, _) = encodeMessagePacket(rng[], codec,
|
||||
let (packet, _, _) = encodeMessagePacket(rng[], codec,
|
||||
receiveNode.localNode.id, receiveNode.localNode.address.get(), @[])
|
||||
receiveNode.transport.receive(a, packet)
|
||||
|
||||
@ -684,14 +693,14 @@ suite "Discovery v5 Tests":
|
||||
a = localAddress(20303)
|
||||
privKey = PrivateKey.example(rng)
|
||||
enrRec = SignedPeerRecord.init(1, privKey,
|
||||
some(ValidIpAddress.init("127.0.0.1")), some(Port(9000)),
|
||||
some(parseIpAddress("127.0.0.1")), some(Port(9000)),
|
||||
some(Port(9000))).expect("Properly intialized private key")
|
||||
sendNode = newNode(enrRec).expect("Properly initialized record")
|
||||
var codec = Codec(localNode: sendNode, privKey: privKey, sessions: Sessions.init(5))
|
||||
|
||||
var firstRequestNonce: AESGCMNonce
|
||||
for i in 0 ..< 5:
|
||||
let (packet, requestNonce) = encodeMessagePacket(rng[], codec,
|
||||
let (packet, requestNonce, _) = encodeMessagePacket(rng[], codec,
|
||||
receiveNode.localNode.id, receiveNode.localNode.address.get(), @[])
|
||||
receiveNode.transport.receive(a, packet)
|
||||
if i == 0:
|
||||
|
||||
@ -2,14 +2,13 @@
|
||||
|
||||
import
|
||||
std/[options, sequtils, tables],
|
||||
asynctest/unittest2,
|
||||
asynctest/chronos/unittest2,
|
||||
bearssl/rand,
|
||||
chronos,
|
||||
libp2p/crypto/secp,
|
||||
codexdht/discv5/[messages, messages_encoding, encoding, spr, node, sessions],
|
||||
codexdht/discv5/crypto,
|
||||
stew/byteutils,
|
||||
stew/shims/net,
|
||||
stint,
|
||||
../dht/test_helper
|
||||
|
||||
@ -275,11 +274,11 @@ suite "Discovery v5.1 Packet Encodings Test Vectors":
|
||||
|
||||
let
|
||||
enrRecA = SignedPeerRecord.init(1, privKeyA,
|
||||
some(ValidIpAddress.init("127.0.0.1")), some(Port(9001)),
|
||||
some(parseIpAddress("127.0.0.1")), some(Port(9001)),
|
||||
some(Port(9001))).expect("Properly intialized private key")
|
||||
|
||||
enrRecB = SignedPeerRecord.init(1, privKeyB,
|
||||
some(ValidIpAddress.init("127.0.0.1")), some(Port(9001)),
|
||||
some(parseIpAddress("127.0.0.1")), some(Port(9001)),
|
||||
some(Port(9001))).expect("Properly intialized private key")
|
||||
|
||||
nodeA = newNode(enrRecA).expect("Properly initialized record")
|
||||
@ -508,11 +507,11 @@ suite "Discovery v5.1 Additional Encode/Decode":
|
||||
|
||||
let
|
||||
enrRecA = SignedPeerRecord.init(1, privKeyA,
|
||||
some(ValidIpAddress.init("127.0.0.1")), some(Port(9001)),
|
||||
some(parseIpAddress("127.0.0.1")), some(Port(9001)),
|
||||
some(Port(9001))).expect("Properly intialized private key")
|
||||
|
||||
enrRecB = SignedPeerRecord.init(1, privKeyB,
|
||||
some(ValidIpAddress.init("127.0.0.1")), some(Port(9001)),
|
||||
some(parseIpAddress("127.0.0.1")), some(Port(9001)),
|
||||
some(Port(9001))).expect("Properly intialized private key")
|
||||
|
||||
nodeA = newNode(enrRecA).expect("Properly initialized record")
|
||||
@ -526,7 +525,7 @@ suite "Discovery v5.1 Additional Encode/Decode":
|
||||
reqId = RequestId.init(rng[])
|
||||
message = encodeMessage(m, reqId)
|
||||
|
||||
let (data, nonce) = encodeMessagePacket(rng[], codecA, nodeB.id,
|
||||
let (data, nonce, _) = encodeMessagePacket(rng[], codecA, nodeB.id,
|
||||
nodeB.address.get(), message)
|
||||
|
||||
let decoded = codecB.decodePacket(nodeA.address.get(), data)
|
||||
@ -642,7 +641,7 @@ suite "Discovery v5.1 Additional Encode/Decode":
|
||||
codecB.sessions.store(nodeA.id, nodeA.address.get(), secrets.initiatorKey,
|
||||
secrets.recipientKey)
|
||||
|
||||
let (data, nonce) = encodeMessagePacket(rng[], codecA, nodeB.id,
|
||||
let (data, nonce, _) = encodeMessagePacket(rng[], codecA, nodeB.id,
|
||||
nodeB.address.get(), message)
|
||||
|
||||
let decoded = codecB.decodePacket(nodeA.address.get(), data)
|
||||
|
||||
13
tests/test.nimble
Normal file
13
tests/test.nimble
Normal file
@ -0,0 +1,13 @@
|
||||
# Package
|
||||
|
||||
version = "0.4.0"
|
||||
author = "Status Research & Development GmbH"
|
||||
description = "Tests for Logos Storage DHT"
|
||||
license = "MIT"
|
||||
installFiles = @["build.nims"]
|
||||
|
||||
# Dependencies
|
||||
requires "asynctest >= 0.5.2 & < 0.6.0"
|
||||
requires "unittest2 <= 0.0.9"
|
||||
|
||||
include "build.nims"
|
||||
@ -8,13 +8,13 @@ var cmds: seq[string]
|
||||
|
||||
when defined(testsPart1) or defined(testsAll):
|
||||
cmds.add [
|
||||
"nim c -r --hints:off --verbosity:0 tests/dht/test_providers.nim",
|
||||
"nim c -r --hints:off --verbosity:0 tests/dht/test_providermngr.nim",
|
||||
"nim c -r --hints:off --verbosity:0 dht/test_providers.nim",
|
||||
"nim c -r --hints:off --verbosity:0 dht/test_providermngr.nim",
|
||||
]
|
||||
when defined(testsPart2) or defined(testsAll):
|
||||
cmds.add [
|
||||
"nim c -r --hints:off --verbosity:0 tests/discv5/test_discoveryv5.nim",
|
||||
"nim c -r --hints:off --verbosity:0 tests/discv5/test_discoveryv5_encoding.nim",
|
||||
"nim c -r --hints:off --verbosity:0 discv5/test_discoveryv5.nim",
|
||||
"nim c -r --hints:off --verbosity:0 discv5/test_discoveryv5_encoding.nim",
|
||||
]
|
||||
|
||||
echo "Running Test Commands: ", cmds
|
||||
|
||||
3
vendor/atlas.workspace
vendored
3
vendor/atlas.workspace
vendored
@ -1,3 +0,0 @@
|
||||
deps=""
|
||||
resolver="MaxVer"
|
||||
overrides="urls.rules"
|
||||
Loading…
x
Reference in New Issue
Block a user