Compare commits

...

87 Commits

Author SHA1 Message Date
Chrysostomos Nanakos
754765ba31
chore: orc support (#110)
Support ORC memory model (https://github.com/logos-storage/logos-storage-nim-dht/issues/109)

Signed-off-by: Chrysostomos Nanakos <chris@include.gr>
2025-12-23 23:23:31 +02:00
Arnaud
99884b5971
Rename Codex to Logos Storage (#108) 2025-12-15 13:46:04 +01:00
Jacek Sieka
6c7de03622
chore: bump stew et al (#107)
* fix use of deprecated imports
* bump stew
* `results` is its own package
* drop protobuf_serialization
* force leveldb version
2025-12-11 13:47:10 +01:00
Eric
f6eef1ac95
Merge pull request #104 from codex-storage/update-to-nim-2-x
Update to nim 2 x
2025-02-14 12:01:54 +11:00
Arnaud
fb17db8187
Update dependencies versions 2025-02-13 20:54:49 +01:00
Eric
d435c6945f
Merge pull request #105 from codex-storage/fix/deps/2.0-deps
fix(deps): remove deps pinned to commit hash
2025-02-13 15:25:50 +11:00
Eric
89d22c156e
bump nimcrypto 2025-02-13 15:07:51 +11:00
Eric
ee33946afb
bump nimcrypto 2025-02-13 12:19:43 +11:00
Eric
c777106e7f
set non-major range for remaining deps 2025-02-13 12:15:22 +11:00
Eric
14d4dd97e9
toBytes -> toBytesBE 2025-02-13 12:15:00 +11:00
Eric
a9e17f4a33
remove nim 1.6 from ci 2025-02-13 12:09:50 +11:00
Eric
bc27eebb85
fix pinned deps
Leaving nim-datastore as a commit hash until it has a relevant release tag
2025-02-13 12:08:09 +11:00
Ben
0f67d21bbc
updates nim-datastore 2025-02-10 11:25:40 +01:00
Arnaud
4bd3a39e00
Update to Nim 2.0.14 2025-01-07 10:51:55 +01:00
Arnaud
5f22be0420
Remove useless comment 2024-12-18 10:52:06 +01:00
Arnaud
4eb4e9126a
Use IpAddress instead of ValidAddress; remove unused import 2024-12-18 10:50:02 +01:00
Arnaud
5320e8c81e
Remove .lock and file and direct dependency to nim-results 2024-12-10 10:05:59 +01:00
Arnaud
cc54a4f0ec
Set dependencies versions and commit hashes and introduce nimble.lock 2024-12-09 18:57:41 +01:00
Arnaud
e7e45de75f
Nim 2 config auto generated 2024-12-09 18:57:03 +01:00
Arnaud
a3f203bbea
Add nimbledeps to gitignore 2024-12-09 18:56:18 +01:00
Arnaud
de39c2006e
Add Nim version 2.0.12 to CI matrix 2024-12-09 12:56:14 +01:00
Arnaud
cafb6ffe53
Update version 2024-12-09 12:55:24 +01:00
Arnaud
570fb9a936
Update dependencies 2024-12-09 12:48:03 +01:00
Arnaud
9fdf0eca8a
Add Nim 2.x specific configuration 2024-12-09 12:47:35 +01:00
Arnaud
d73dc48515
Add pragma for exception raises 2024-12-09 12:47:08 +01:00
Csaba Kiraly
57f4b6f7cb
Merge pull request #103 from codex-storage/fix-randomNodes
fix potential infinite loop in randomNodes
2024-10-18 20:45:49 +02:00
Csaba Kiraly
ee4e2102d9
Merge pull request #99 from codex-storage/fix-removal
add link reliability metrics, fix aggressive node removal on first packet loss
2024-10-18 20:14:23 +02:00
Csaba Kiraly
a6cfe1a084
fix potential infinite loop in randomNodes
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-17 12:38:54 +02:00
Csaba Kiraly
1a344f1fd7
log reliability based on loss statistics
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-15 18:17:49 +02:00
Csaba Kiraly
fee5a9ced2
set NoreplyRemoveThreshold to 0.5
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-14 15:35:15 +02:00
Csaba Kiraly
6310c50ce0
introduce NoreplyRemoveThreshold
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

# Conflicts:
#	codexdht/private/eth/p2p/discoveryv5/protocol.nim
2024-10-14 15:35:10 +02:00
Csaba Kiraly
7507e99c96
register "not seen" when missing replies
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-14 15:33:34 +02:00
Csaba Kiraly
02bc12e639
change node seen flag to an exponential moving average
keep defaults as before

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

# Conflicts:
#	codexdht/private/eth/p2p/discoveryv5/node.nim
#	codexdht/private/eth/p2p/discoveryv5/routing_table.nim
2024-10-14 15:33:29 +02:00
Csaba Kiraly
e1c1089e4f
fix aggressive node removal from on first packet loss
UDP packets get lost easily. We can't just remove
nodes from the routing table at first loss, as it can
create issues in small networks and in cases of temporary
connection failures.

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-14 15:28:09 +02:00
Csaba Kiraly
c1d2ea410d
Merge pull request #102 from codex-storage/measure-rtt-bw
Measure rtt, estimate bw, and log every 5 minutes
2024-10-14 14:19:35 +02:00
Csaba Kiraly
8b1660464d
don't log bandwidth estimates
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-14 13:57:52 +02:00
Csaba Kiraly
7057663f81
fixup: remove excessive debug
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-14 11:19:36 +02:00
Csaba Kiraly
ff5391a35e
Merge pull request #100 from codex-storage/metrics
rename and add more dht metrics
2024-10-10 12:51:09 +02:00
Csaba Kiraly
4ccaaee721
rename metrics to dht_ from discovery_
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

# Conflicts:
#	codexdht/private/eth/p2p/discoveryv5/transport.nim
2024-10-10 11:44:26 +02:00
Csaba Kiraly
80cc069c5e
metrics: add transport byte counters
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

# Conflicts:
#	codexdht/private/eth/p2p/discoveryv5/transport.nim
2024-10-10 11:43:23 +02:00
Csaba Kiraly
ffeeeeb3fb
transport: add metrics
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

# Conflicts:
#	codexdht/private/eth/p2p/discoveryv5/transport.nim
2024-10-10 11:42:11 +02:00
Csaba Kiraly
4d2250477e
metrics: add discovery_routing_table_buckets
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-10 11:40:45 +02:00
Csaba Kiraly
b7b04ed9e4
metrics: rename routing_table_nodes to discovery_routing_table_nodes
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-10 11:40:44 +02:00
Csaba Kiraly
6e180af4aa
Merge pull request #101 from codex-storage/logging
Logging updates
2024-10-10 11:22:23 +02:00
Csaba Kiraly
706cb50041
add debugPrintLoop to print neighborhood info
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-08 11:31:06 +02:00
Csaba Kiraly
0825d887ea
add bandwidth estimate
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-08 11:31:06 +02:00
Csaba Kiraly
ec4f0d4a84
add transport level RTT measurement
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-08 11:18:02 +02:00
Csaba Kiraly
0b69de242f
add rtt measurement
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-08 11:17:58 +02:00
Csaba Kiraly
f3eec2a202
node: add RTT and bandwidth measurement holders
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-08 11:17:29 +02:00
Csaba Kiraly
f6971cc947
logging: better logging of SPR update
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-08 11:15:50 +02:00
Csaba Kiraly
4d9e39d86c
transport: improve logging
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

# Conflicts:
#	codexdht/private/eth/p2p/discoveryv5/transport.nim
2024-10-08 11:15:20 +02:00
Csaba Kiraly
b8bcb2d08d
Merge pull request #95 from codex-storage/factorize
Factorize code
2024-10-07 14:06:59 +02:00
Csaba Kiraly
f121d080e7
Merge pull request #96 from codex-storage/reduce-timeouts
Reduce timeouts
2024-10-03 10:54:44 +02:00
Csaba Kiraly
fef297c622
Merge pull request #94 from codex-storage/feature-FindNodeFastResultLimit
Add separate limit for results returned in FindNodeFast
2024-10-01 15:04:26 +02:00
Csaba Kiraly
936a5ec6fa
Merge pull request #93 from codex-storage/fix-FindNodeResultLimit
fix returning too many nodes when FindNodeResultLimit!=BUCKET_SIZE
2024-10-01 14:51:33 +02:00
Ben Bierens
9acdca795b
routing table logging update (#97)
* Clear logs for adding and removing of nodes. routingtable log topic for filtering.

* Makes node ID shortening consistent with other short-id formats

* redundant else block

* fixes dependencies
2024-09-23 15:49:08 +02:00
Ben Bierens
5f38fd9570
GCC-14 (#98)
* bumps bearssl

* updates version of bearssl in lockfiles

* fixes that checksum

* attempt to bump various dependencies

* updates asynctest version tag

* asynctest sha

* bumps to working version of nim-datastore

* adjusts asynctest imports for chronos

* chronos checksum

* checksum for datastore

* libp2p version tag

* libp2p checksum

* moves libp2p from codex-branch to latest master

* libp2p checksum

* splits the test dependencies from the dev dependencies (example nim-ethers)

* sets path

* pathing in tests

* oops wrong version

* adds build.nims to installfiles for test module

* attempt to fix import paths

* bumps nim-datastore

* datastore checksum

* greatly simplify CI

* fixes asynctest import

* builds parallel tests before running

* bumps datastore

* turns nim-stable back off

* pins nim-datastore version

* bumps checkout to v4

* Review comment by Mark

Co-authored-by: markspanbroek <mark@spanbroek.net>

* Review comment by Mark

Co-authored-by: markspanbroek <mark@spanbroek.net>

---------

Co-authored-by: markspanbroek <mark@spanbroek.net>
2024-08-20 11:04:48 +02:00
Csaba Kiraly
5624700855
reduce default timeouts
We really don't need these to be 2 and 4 seconds.
Later we should tune it better based on measurements
or estimates. We should also check the relation between
these three values.

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-07-01 04:34:10 +02:00
Csaba Kiraly
76da855725
use handshakeTimeout if handshake starting in sendMessage
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-07-01 04:20:20 +02:00
Csaba Kiraly
4c9c92232b
remove unused sendRequest call
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-07-01 04:14:17 +02:00
Csaba Kiraly
148b10908d
trace log: do not log binary encoding
Even at trace level this feels too much.

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-07-01 04:14:13 +02:00
Csaba Kiraly
f299c23e2e
remove lookupWorkerFast duplicate code
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-07-01 04:14:03 +02:00
Csaba Kiraly
bdf57381e3
introduce FindNodeFastResultLimit
We do not need that many responses with FindNodeFast, since the
reposes can be ordered by distance

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-07-01 04:06:43 +02:00
Csaba Kiraly
4b82bdc2f9
fix returning too many nodes when FindNodeResultLimit!=BUCKET_SIZE
Code assumed these two values to be the same, resulting in
reception errors.

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-07-01 03:55:03 +02:00
Csaba Kiraly
d8160ff0f7
add logging helper for Protocol
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-06-28 17:39:13 +02:00
Csaba Kiraly
f766cb39b1
encoding: introducing type cipher=aes128
Introducing the cipher type to ease changing cipher.
No functional change

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-06-28 17:37:26 +02:00
Csaba Kiraly
316464fc71
dht: waitMessage: expose timeout as parameter, keeping default
defults to ResponseTimeout as before

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-06-28 17:35:29 +02:00
Csaba Kiraly
6e61e02091
fixup: move sendRequest forward
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-06-28 17:34:49 +02:00
Csaba Kiraly
dfff39091b
introduce waitResponse wrapper
initialize wait for response before sending request.
This is needed in cases where the response arrives before
moving to the next instruction, such as a directly connected
test.

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-06-28 17:33:56 +02:00
Giuliano Mega
63822e8356
Update nim-codex-dht to Chronos V4 (#90)
Update nim-codex-dht to Chronos v4
2024-05-23 17:49:44 -03:00
Giuliano Mega
2299317116
Merge pull request #91 from codex-storage/chore/update-libp2p-repo
Update repo for libp2p
2024-03-18 19:40:01 -03:00
gmega
717cd0a50c
Merge branch 'master' into chore/update-libp2p-repo 2024-03-18 19:11:57 -03:00
Giuliano Mega
223ce9240b
Merge pull request #92 from codex-storage/fix/nimble-archive-osx
fix name for nimble archive for OSX
2024-03-18 19:11:28 -03:00
gmega
709a873862
fix name for nimble archive for OSX 2024-03-18 18:48:04 -03:00
gmega
b3d01245e9
update repo for libp2p 2024-03-18 18:42:48 -03:00
Dmitriy Ryajov
beefafcc6f
Update CleanupInterval to 24 hours (#88) 2023-11-21 17:14:15 -08:00
Dmitriy Ryajov
a7f14bc9b7
Fix logging format (#87)
* add shortLog for Address

* compile with trace logging to catch errors
2023-11-20 09:34:40 -08:00
Dmitriy Ryajov
dd4985435a
Fix timeout and delete (#86)
* use unix time for ttl

* don't remove all entries on peer removal

* cleanup questionable tuple destructure

* ignore vscode

* fix endians decoding

* allow removing by peerId

* invalidate cache by peerId on remove

* update test
2023-11-17 14:01:16 -08:00
Csaba Kiraly
91b2eaec89
Fix: arrive to working keys in case of simultaneous cross connect (#84)
* improve tracing of message exchange

run e.g. as
```
nim c -r -d:debug -d:chronicles_enabled=on -d:chronicles_log_level=TRACE -d:chronicles_sinks=textlines[nocolors,stdout] tests/dht/test_providers.nim >err
```

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

* add debug on Handshake timeour

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

* queue messages during handshake and send later

If a handshake was already in progress, messages were dropped.
Instead of this, it is better to queue these and send as soon
as the handshake is finished and thus the encryption key is known.

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

* rename handshakeInProgress to keyexchangeInProgress

Handshake is also a name of a message, which makes previous
name less clear.

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

* keyexchangeInProgress: do not remove on handshake received

This is the wrong direction, not needed

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

* fix cross-connect key exchange

Since key exchange can be started both ways simultaneously, and
these might not get finalised with UDP transport, we can't be
sure what encryption key will be used by the other side:
- the one derived in the key-exchange started by us,
- the one derived in the key-exchange started by the other node.
To alleviate this issue, we store two decryption keys in each session.

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

---------

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2023-11-17 11:50:28 -08:00
Csaba Kiraly
66116b9bf6
Fix: queue messages when there is no encryption key (#83)
* encodeMessagePacket: expose haskey

encodeMessagePacket checks for session and behaves differently
based on that. Exposing this difference in behavior.

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

* improve tracing of message exchange

run e.g. as
```
nim c -r -d:debug -d:chronicles_enabled=on -d:chronicles_log_level=TRACE -d:chronicles_sinks=textlines[nocolors,stdout] tests/dht/test_providers.nim >err
```

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

* add debug on Handshake timeour

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

* queue messages during handshake and send later

If a handshake was already in progress, messages were dropped.
Instead of this, it is better to queue these and send as soon
as the handshake is finished and thus the encryption key is known.

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

* rename handshakeInProgress to keyexchangeInProgress

Handshake is also a name of a message, which makes previous
name less clear.

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

* keyexchangeInProgress: do not remove on handshake received

This is the wrong direction, not needed

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

---------

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2023-11-17 11:18:48 -08:00
Jaremy Creechley
60dc4e764c
Fix changes from stint (#81)
* some formatting tweaks to make errors easier to grok

* stint removed overloads for regular ints - use stew versions instead

* various name style fixes

* ignore vscode stuff

* revert style changes

* revert unneeded var rename changes

---------

Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2023-11-17 11:08:45 -08:00
Dmitriy Ryajov
ee5d8acb05
cleanup and avoid lockups (#85) 2023-09-20 09:20:26 -07:00
Slava
ed7caa119d
Update CI workflow trigger branch (#82)
* Update CI workflow trigger branch

* Update Codecov workflow trigger branch and badges
2023-08-26 09:31:31 +03:00
Jaremy Creechley
fdd02450aa
bump release (#79) 2023-07-25 19:56:25 -07:00
Jaremy Creechley
b585290397
Bump deps (#77)
Updates all Status IM and Codex Storage deps to the latest except for nim-stint which is held back due to some compiler issue. 

* fix nimble name
* don't override nimble
* update all deps
* import nimble.lock and fix urls
* don't forget nim
* bump to chronos with async notifications
2023-07-21 15:29:38 -07:00
Dmitriy Ryajov
9ae0bfb1c3
Fix nimble install (#78)
* make encryption scheme explicit

* supress compiler noise

* make `nimble install` without `-d` work

* move `libp2p_pki_schemes=secp256k1` to config.nims

* fix include
2023-07-21 15:51:42 -06:00
Jaremy Creechley
1f27eb4aff
Upgrade secp256k1 deps (#76)
* import full secret hash from upstream
* use full secret hash including prefix byte 0x02 / 0x03
* import nimble lock
* fix atlas lock
* update stint
* cleanup urls
* bump lock files
* match lockfiles
2023-07-19 17:04:28 -07:00
39 changed files with 982 additions and 1301 deletions

View File

@ -1,42 +0,0 @@
name: Install Nimble
description: install nimble
inputs:
nimble_version:
description: "install nimble"
# TODO: make sure to change to tagged release when available
default: "latest"
os:
description: "operating system"
default: "linux"
cpu:
description: "cpu architecture"
default: "amd64"
runs:
using: "composite"
steps:
- uses: actions/checkout@v3
- name: Build Nimble
shell: bash
run: |
set -x
mkdir -p .nimble
cd .nimble
if [[ '${{ inputs.cpu }}' == 'amd64' ]]; then
CPU=x64
elif [[ '${{ inputs.cpu }}' == 'i386' ]]; then
CPU=x32
else
CPU=${{ inputs.cpu }}
fi
if [[ '${{ inputs.os }}' == 'macos' ]]; then
OS=apple
else
OS='${{ inputs.os }}'
fi
URL=https://github.com/nim-lang/nimble/releases/download/${{ inputs.nimble_version }}/nimble-"$OS"_"$CPU".tar.gz
curl -o nimble.tar.gz -L -s -S "$URL"
tar -xvf nimble.tar.gz
- name: Derive environment variables
shell: bash
run: echo '${{ github.workspace }}/.nimble/' >> $GITHUB_PATH

View File

@ -1,134 +1,22 @@
name: CI name: CI
on:
push: on: [push, pull_request]
branches:
- main
pull_request:
workflow_dispatch:
jobs: jobs:
build: test:
timeout-minutes: 90 runs-on: ${{ matrix.os }}
strategy: strategy:
fail-fast: false
matrix: matrix:
target: nim: [2.2.4]
- os: linux os: [ubuntu-latest, macos-latest, windows-latest]
cpu: amd64
tests: all
- os: macos
cpu: amd64
tests: all
- os: windows
cpu: amd64
tests: part1
- os: windows
cpu: amd64
tests: part2
branch: [version-1-6]
include:
- target:
os: linux
builder: ubuntu-20.04
shell: bash
- target:
os: macos
builder: macos-12
shell: bash
- target:
os: windows
builder: windows-latest
shell: msys2 {0}
defaults:
run:
shell: ${{ matrix.shell }}
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
runs-on: ${{ matrix.builder }}
continue-on-error: ${{ matrix.branch == 'version-1-6' || matrix.branch == 'devel' }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v4
with: - uses: jiro4989/setup-nim-action@v2
submodules: true with:
nim-version: ${{matrix.nim}}
- name: MSYS2 (Windows amd64) repo-token: ${{ secrets.GITHUB_TOKEN }}
if: runner.os == 'Windows' && matrix.target.cpu == 'amd64' - name: Build
uses: msys2/setup-msys2@v2 run: nimble install -y
with: - name: Test
path-type: inherit run: nimble test -y
install: >-
base-devel
git
mingw-w64-x86_64-toolchain
- name: Restore Nim DLLs dependencies (Windows) from cache
if: runner.os == 'Windows'
id: windows-dlls-cache
uses: actions/cache@v2
with:
path: external/dlls
key: 'dlls'
- name: Install DLL dependencies (Windows)
if: >
steps.windows-dlls-cache.outputs.cache-hit != 'true' &&
runner.os == 'Windows'
run: |
mkdir external
curl -L "https://nim-lang.org/download/windeps.zip" -o external/windeps.zip
7z x external/windeps.zip -oexternal/dlls
- name: Path to cached dependencies (Windows)
if: >
runner.os == 'Windows'
run: |
echo '${{ github.workspace }}'"/external/dlls" >> $GITHUB_PATH
## Restore nimble deps
- name: Restore nimble dependencies from cache
id: nimble_deps
uses: actions/cache@v3
with:
path: |
~/.nimble
${{ github.workspace }}/.nimble
key: ${{ matrix.builder }}-${{ matrix.target.cpu }}-dotnimble-${{ hashFiles('nimble.lock') }}
- name: Setup Nimble
uses: "./.github/actions/install_nimble"
with:
os: ${{ matrix.target.os }}
cpu: ${{ matrix.target.cpu }}
- name: Setup Env
run: |
nimble -v
- name: Setup Deps
run: |
nimble install -d
nimble setup
- name: Run tests
if: runner.os != 'Windows'
run: |
nimble test -y
- name: Run windows tests part1
if: runner.os == 'Windows' && matrix.target.tests == 'part1'
run: |
if [[ "${{ matrix.target.os }}" == "windows" ]]; then
# https://github.com/status-im/nimbus-eth2/issues/3121
export NIMFLAGS="-d:nimRawSetjmp"
fi
nimble testPart1 -y
- name: Run windows tests part2
if: runner.os == 'Windows' && matrix.target.tests == 'part2'
run: |
if [[ "${{ matrix.target.os }}" == "windows" ]]; then
export NIMFLAGS="-d:nimRawSetjmp"
fi
nimble testPart2 -y

View File

@ -1,69 +0,0 @@
name: Generate and upload code coverage
on:
#On push to common branches, this computes the "bases stats" for PRs
push:
branches:
- main
pull_request:
workflow_dispatch:
jobs:
All_Tests:
name: All tests
runs-on: ubuntu-20.04
strategy:
matrix:
nim-options: [
""
]
test-program: [
"test"
]
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Environment setup
run: |
sudo apt-get update
sudo apt-get install -y lcov build-essential git curl
mkdir coverage
- name: Restore nimble dependencies from cache
id: nimble_deps
uses: actions/cache@v3
with:
path: |
~/.nimble
key: ubuntu-20.04-amd64-${{ hashFiles('nimble.lock') }}
- name: Setup Nimble
uses: "./.github/actions/install_nimble"
with:
os: linux
cpu: x64
- name: Setup Env
run: |
nimble -v
- name: Setup Deps
run: |
nimble install -d
nimble setup
- name: Run tests
run: |
nimble -y --verbose coverage
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v2
with:
directory: ./coverage/
fail_ci_if_error: true
files: ./coverage/coverage.f.info
flags: unittests
name: codecov-umbrella
verbose: true

2
.gitignore vendored
View File

@ -12,3 +12,5 @@ vendor/*
NimBinaries NimBinaries
.update.timestamp .update.timestamp
*.dSYM *.dSYM
.vscode/*
nimbledeps

View File

@ -1,12 +1,12 @@
# A DHT implementation for Codex # A DHT implementation for Logos Storage
[![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
[![Stability: experimental](https://img.shields.io/badge/stability-experimental-orange.svg)](#stability) [![Stability: experimental](https://img.shields.io/badge/stability-experimental-orange.svg)](#stability)
[![CI (GitHub Actions)](https://github.com/status-im/nim-libp2p-dht/workflows/CI/badge.svg?branch=main)](https://github.com/status-im/nim-libp2p-dht/actions?query=workflow%3ACI+branch%3Amain) [![CI (GitHub Actions)](https://github.com/logos-storage/logos-storage-nim-dht/workflows/CI/badge.svg?branch=master)](https://github.com/logos-storage/logos-storage-nim-dht/actions/workflows/ci.yml?query=workflow%3ACI+branch%3Amaster)
[![codecov](https://codecov.io/gh/status-im/nim-libp2p-dht/branch/main/graph/badge.svg?token=tlmMJgU4l7)](https://codecov.io/gh/status-im/nim-libp2p-dht) [![codecov](https://codecov.io/gh/logos-storage/logos-storage-nim-dht/branch/master/graph/badge.svg?token=tlmMJgU4l7)](https://codecov.io/gh/logos-storage/logos-storage-nim-dht)
This DHT implementation is aiming to provide a DHT for Codex with the following properties This DHT implementation is aiming to provide a DHT for Logos Storage with the following properties
* flexible secure transport usage with * flexible secure transport usage with
* fast UDP based operation * fast UDP based operation
* eventual fallback to TCP-based operation (maybe though libp2p) * eventual fallback to TCP-based operation (maybe though libp2p)

View File

@ -1,143 +0,0 @@
{
"items": {
"nimbus-build-system": {
"dir": "vendor/nimbus-build-system",
"url": "https://github.com/status-im/nimbus-build-system",
"commit": "239c3a7fbb88fd241da0ade3246fd2e5fcff4f25"
},
"nim-nat-traversal": {
"dir": "vendor/nim-nat-traversal",
"url": "https://github.com/status-im/nim-nat-traversal",
"commit": "802d75edcc656e616120fb27f950ff1285ddcbba"
},
"nim-zlib": {
"dir": "vendor/nim-zlib",
"url": "https://github.com/status-im/nim-zlib",
"commit": "f34ca261efd90f118dc1647beefd2f7a69b05d93"
},
"nim-stew": {
"dir": "vendor/nim-stew",
"url": "https://github.com/status-im/nim-stew.git",
"commit": "e18f5a62af2ade7a1fd1d39635d4e04d944def08"
},
"nim-http-utils": {
"dir": "vendor/nim-http-utils",
"url": "https://github.com/status-im/nim-http-utils.git",
"commit": "3b491a40c60aad9e8d3407443f46f62511e63b18"
},
"nim-chronos": {
"dir": "vendor/nim-chronos",
"url": "https://github.com/status-im/nim-chronos.git",
"commit": "6525f4ce1d1a7eba146e5f1a53f6f105077ae686"
},
"upraises": {
"dir": "vendor/upraises",
"url": "https://github.com/markspanbroek/upraises.git",
"commit": "bc2628989b63854d980e92dadbd58f83e34b6f25"
},
"nim-sqlite3-abi": {
"dir": "vendor/nim-sqlite3-abi",
"url": "https://github.com/arnetheduck/nim-sqlite3-abi.git",
"commit": "362e1bd9f689ad9f5380d9d27f0705b3d4dfc7d3"
},
"questionable": {
"dir": "vendor/questionable",
"url": "https://github.com/status-im/questionable.git",
"commit": "0d7ce8efdedaf184680cb7268721fca0af947a74"
},
"nim-websock": {
"dir": "vendor/nim-websock",
"url": "https://github.com/status-im/nim-websock.git",
"commit": "2c3ae3137f3c9cb48134285bd4a47186fa51f0e8"
},
"nim-secp256k1": {
"dir": "vendor/nim-secp256k1",
"url": "https://github.com/status-im/nim-secp256k1.git",
"commit": "5340cf188168d6afcafc8023770d880f067c0b2f"
},
"nim-bearssl": {
"dir": "vendor/nim-bearssl",
"url": "https://github.com/status-im/nim-bearssl.git",
"commit": "f4c4233de453cb7eac0ce3f3ffad6496295f83ab"
},
"dnsclient.nim": {
"dir": "vendor/dnsclient.nim",
"url": "https://github.com/ba0f3/dnsclient.nim",
"commit": "23214235d4784d24aceed99bbfe153379ea557c8"
},
"nimcrypto": {
"dir": "vendor/nimcrypto",
"url": "https://github.com/status-im/nimcrypto.git",
"commit": "a5742a9a214ac33f91615f3862c7b099aec43b00"
},
"nim-json-serialization": {
"dir": "vendor/nim-json-serialization",
"url": "https://github.com/status-im/nim-json-serialization.git",
"commit": "e5b18fb710c3d0167ec79f3b892f5a7a1bc6d1a4"
},
"nim-testutils": {
"dir": "vendor/nim-testutils",
"url": "https://github.com/status-im/nim-testutils",
"commit": "b56a5953e37fc5117bd6ea6dfa18418c5e112815"
},
"nim-unittest2": {
"dir": "vendor/nim-unittest2",
"url": "https://github.com/status-im/nim-unittest2.git",
"commit": "b178f47527074964f76c395ad0dfc81cf118f379"
},
"npeg": {
"dir": "vendor/npeg",
"url": "https://github.com/zevv/npeg",
"commit": "b15a10e388b91b898c581dbbcb6a718d46b27d2f"
},
"nim-serialization": {
"dir": "vendor/nim-serialization",
"url": "https://github.com/status-im/nim-serialization.git",
"commit": "493d18b8292fc03aa4f835fd825dea1183f97466"
},
"nim-faststreams": {
"dir": "vendor/nim-faststreams",
"url": "https://github.com/status-im/nim-faststreams.git",
"commit": "1b561a9e71b6bdad1c1cdff753418906037e9d09"
},
"nim-datastore": {
"dir": "vendor/nim-datastore",
"url": "https://github.com/codex-storage/nim-datastore.git",
"commit": "0cde8aeb67c59fd0ac95496dc6b5e1168d6632aa"
},
"asynctest": {
"dir": "vendor/asynctest",
"url": "https://github.com/markspanbroek/asynctest",
"commit": "a236a5f0f3031573ac2cb082b63dbf6e170e06e7"
},
"nim-stint": {
"dir": "vendor/nim-stint",
"url": "https://github.com/status-im/nim-stint.git",
"commit": "036c71d06a6b22f8f967ba9d54afd2189c3872ca"
},
"nim-metrics": {
"dir": "vendor/nim-metrics",
"url": "https://github.com/status-im/nim-metrics.git",
"commit": "743f81d4f6c6ebf0ac02389f2392ff8b4235bee5"
},
"nim-libp2p": {
"dir": "vendor/nim-libp2p",
"url": "https://github.com/status-im/nim-libp2p.git",
"commit": "a3e9d1ed80c048cd5abc839cbe0863cefcedc702"
},
"nim-chronicles": {
"dir": "vendor/nim-chronicles",
"url": "https://github.com/status-im/nim-chronicles.git",
"commit": "7631f7b2ee03398cb1512a79923264e8f9410af6"
},
"nim-protobuf-serialization": {
"dir": "vendor/nim-protobuf-serialization",
"url": "https://github.com/status-im/nim-protobuf-serialization",
"commit": "28214b3e40c755a9886d2ec8f261ec48fbb6bec6"
}
},
"nimcfg": "############# begin Atlas config section ##########\n--noNimblePath\n--path:\"vendor/nim-secp256k1\"\n--path:\"vendor/nim-protobuf-serialization\"\n--path:\"vendor/nimcrypto\"\n--path:\"vendor/nim-bearssl\"\n--path:\"vendor/nim-chronicles\"\n--path:\"vendor/nim-chronos\"\n--path:\"vendor/nim-libp2p\"\n--path:\"vendor/nim-metrics\"\n--path:\"vendor/nim-stew\"\n--path:\"vendor/nim-stint\"\n--path:\"vendor/asynctest\"\n--path:\"vendor/nim-datastore\"\n--path:\"vendor/questionable\"\n--path:\"vendor/nim-faststreams\"\n--path:\"vendor/nim-serialization\"\n--path:\"vendor/npeg/src\"\n--path:\"vendor/nim-unittest2\"\n--path:\"vendor/nim-testutils\"\n--path:\"vendor/nim-json-serialization\"\n--path:\"vendor/nim-http-utils\"\n--path:\"vendor/dnsclient.nim/src\"\n--path:\"vendor/nim-websock\"\n--path:\"vendor/nim-sqlite3-abi\"\n--path:\"vendor/upraises\"\n--path:\"vendor/nim-zlib\"\n############# end Atlas config section ##########\n",
"nimVersion": "1.6.14",
"gccVersion": "",
"clangVersion": ""
}

View File

@ -1,22 +0,0 @@
coverage:
status:
project:
default:
# advanced settings
# Prevents PR from being blocked with a reduction in coverage.
# Note, if we want to re-enable this, a `threshold` value can be used
# allow coverage to drop by x% while still posting a success status.
# `informational`: https://docs.codecov.com/docs/commit-status#informational
# `threshold`: https://docs.codecov.com/docs/commit-status#threshold
informational: true
patch:
default:
# advanced settings
# Prevents PR from being blocked with a reduction in coverage.
# Note, if we want to re-enable this, a `threshold` value can be used
# allow coverage to drop by x% while still posting a success status.
# `informational`: https://docs.codecov.com/docs/commit-status#informational
# `threshold`: https://docs.codecov.com/docs/commit-status#threshold
informational: true

View File

@ -1,28 +1,42 @@
# Package # Package
version = "0.3.2" version = "0.6.0"
author = "Status Research & Development GmbH" author = "Status Research & Development GmbH"
description = "DHT based on the libp2p Kademlia spec" description = "DHT based on Eth discv5 implementation"
license = "MIT" license = "MIT"
skipDirs = @["tests"] skipDirs = @["tests"]
# Dependencies # Dependencies
requires "nim >= 1.2.0" requires "nim >= 2.2.4 & < 3.0.0"
requires "secp256k1#b3f38e2795e805743b299dc5d96d332db375b520" # >= 0.5.2 & < 0.6.0 requires "secp256k1 >= 0.6.0 & < 0.7.0"
requires "protobuf_serialization#27b400fdf3bd8ce7120ca66fc1de39d3f1a5804a" # >= 0.2.0 & < 0.3.0 requires "nimcrypto >= 0.6.2 & < 0.8.0"
requires "nimcrypto == 0.5.4" requires "bearssl >= 0.2.5 & < 0.3.0"
requires "bearssl#head" requires "chronicles >= 0.11.2 & < 0.13.0"
requires "chronicles >= 0.10.2 & < 0.11.0" requires "chronos >= 4.0.4 & < 4.1.0"
requires "chronos#1394c9e04957928afc1db33d2e0965cfb677a1e0" # >= 3.0.11 & < 3.1.0 requires "libp2p >= 1.14.1 & < 2.0.0"
requires "libp2p#unstable" requires "metrics >= 0.1.0 & < 0.2.0"
requires "metrics" requires "stew >= 0.4.2"
requires "stew#head" requires "stint >= 0.8.1 & < 0.9.0"
requires "stint" requires "https://github.com/logos-storage/nim-datastore >= 0.2.1 & < 0.3.0"
requires "asynctest >= 0.3.1 & < 0.4.0" requires "questionable >= 0.10.15 & < 0.11.0"
requires "https://github.com/status-im/nim-datastore#head" requires "leveldbstatic >= 0.2.1 & < 0.3.0"
requires "questionable"
include "build.nims" task testAll, "Run all test suites":
exec "nimble install -d -y"
withDir "tests":
exec "nimble testAll"
task test, "Run the test suite":
exec "nimble install -d -y"
withDir "tests":
exec "nimble test"
task testPart1, "Run the test suite part 1":
exec "nimble install -d -y"
withDir "tests":
exec "nimble testPart1"
task testPart2, "Run the test suite part 2":
exec "nimble install -d -y"
withDir "tests":
exec "nimble testPart2"

View File

@ -1,30 +1,104 @@
import import
std/sugar, std/sugar,
libp2p/crypto/[crypto, secp] libp2p/crypto/[crypto, secp],
stew/[byteutils, objects, ptrops],
results
from secp256k1 import ecdhRaw, SkEcdhRawSecret, toRaw import secp256k1
const
KeyLength* = secp256k1.SkEcdhSecretSize
## Ecdh shared secret key length without leading byte
## (publicKey * privateKey).x, where length of x is 32 bytes
FullKeyLength* = KeyLength + 1
## Ecdh shared secret with leading byte 0x02 or 0x03
type
SharedSecret* = object
## Representation of ECDH shared secret, without leading `y` byte
data*: array[KeyLength, byte]
SharedSecretFull* = object
## Representation of ECDH shared secret, with leading `y` byte
## (`y` is 0x02 when (publicKey * privateKey).y is even or 0x03 when odd)
data*: array[FullKeyLength, byte]
proc fromHex*(T: type PrivateKey, data: string): Result[PrivateKey, cstring] = proc fromHex*(T: type PrivateKey, data: string): Result[PrivateKey, cstring] =
let skKey = ? SkPrivateKey.init(data).mapErr(e => let skKey = ? secp.SkPrivateKey.init(data).mapErr(e =>
("Failed to init private key from hex string: " & $e).cstring) ("Failed to init private key from hex string: " & $e).cstring)
ok PrivateKey.init(skKey) ok PrivateKey.init(skKey)
proc fromHex*(T: type PublicKey, data: string): Result[PublicKey, cstring] = proc fromHex*(T: type PublicKey, data: string): Result[PublicKey, cstring] =
let skKey = ? SkPublicKey.init(data).mapErr(e => let skKey = ? secp.SkPublicKey.init(data).mapErr(e =>
("Failed to init public key from hex string: " & $e).cstring) ("Failed to init public key from hex string: " & $e).cstring)
ok PublicKey.init(skKey) ok PublicKey.init(skKey)
func ecdhRaw*(seckey: SkPrivateKey, pubkey: SkPublicKey): SkEcdhRawSecret {.borrow.}
proc ecdhSharedSecretHash(output: ptr byte, x32, y32: ptr byte, data: pointer): cint
{.cdecl, raises: [].} =
## Hash function used by `ecdhSharedSecret` below
##
## `x32` and `y32` are result of scalar multiplication of publicKey * privateKey.
## Both `x32` and `y32` are 32 bytes length.
##
## Take the `x32` part as ecdh shared secret.
## output length is derived from x32 length and taken from ecdh
## generic parameter `KeyLength`
copyMem(output, x32, KeyLength)
return 1
func ecdhSharedSecret(seckey: SkPrivateKey, pubkey: secp.SkPublicKey): SharedSecret =
## Compute ecdh agreed shared secret.
let res = secp256k1.ecdh[KeyLength](
secp256k1.SkSecretKey(seckey),
secp256k1.SkPublicKey(pubkey),
ecdhSharedSecretHash,
nil,
)
# This function only fail if the hash function return zero.
# Because our hash function always success, we can turn the error into defect
doAssert res.isOk, $res.error
SharedSecret(data: res.get)
proc toRaw*(pubkey: PublicKey): seq[byte] =
secp256k1.SkPublicKey(pubkey.skkey).toRaw()[1..^1]
proc ecdhSharedSecretFullHash(output: ptr byte, x32, y32: ptr byte, data: pointer): cint
{.cdecl, raises: [].} =
## Hash function used by `ecdhSharedSecretFull` below
# `x32` and `y32` are result of scalar multiplication of publicKey * privateKey.
# Leading byte is 0x02 if `y32` is even and 0x03 if odd. Then concat with `x32`.
# output length is derived from `x32` length + 1 and taken from ecdh
# generic parameter `FullKeyLength`
# output[0] = 0x02 | (y32[31] & 1)
output[] = 0x02 or (y32.offset(31)[] and 0x01)
copyMem(output.offset(1), x32, KeyLength)
return 1
func ecdhSharedSecretFull*(seckey: PrivateKey, pubkey: PublicKey): SharedSecretFull =
## Compute ecdh agreed shared secret with leading byte.
##
let res = ecdh[FullKeyLength](secp256k1.SkSecretKey(seckey.skkey),
secp256k1.SkPublicKey(pubkey.skkey),
ecdhSharedSecretFullHash, nil)
# This function only fail if the hash function return zero.
# Because our hash function always success, we can turn the error into defect
doAssert res.isOk, $res.error
SharedSecretFull(data: res.get)
proc ecdhRaw*( proc ecdhRaw*(
priv: PrivateKey, priv: PrivateKey,
pub: PublicKey): Result[SkEcdhRawSecret, cstring] = pub: PublicKey
): Result[SharedSecretFull, cstring] =
## emulate old ecdhRaw style keys
##
## this includes a leading 0x02 or 0x03
##
# TODO: Do we need to support non-secp256k1 schemes? # TODO: Do we need to support non-secp256k1 schemes?
if priv.scheme != Secp256k1 or pub.scheme != Secp256k1: if priv.scheme != Secp256k1 or pub.scheme != Secp256k1:
return err "Must use secp256k1 scheme".cstring return err "Must use secp256k1 scheme".cstring
ok ecdhRaw(priv.skkey, pub.skkey) ok ecdhSharedSecretFull(priv, pub)
proc toRaw*(pubkey: PublicKey): seq[byte] =
secp256k1.SkPublicKey(pubkey.skkey).toRaw()[1..^1]

View File

@ -1,4 +1,4 @@
# codex-dht - Codex DHT # logos-storage-dht - Logos Storage DHT
# Copyright (c) 2022 Status Research & Development GmbH # Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of # Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
@ -11,19 +11,21 @@
## https://github.com/ethereum/devp2p/blob/master/discv5/discv5-theory.md#sessions ## https://github.com/ethereum/devp2p/blob/master/discv5/discv5-theory.md#sessions
## ##
{.push raises: [Defect].} {.push raises: [].}
import import
std/[hashes, net, options, sugar, tables], std/[hashes, net, options, sugar, tables],
stew/endians2,
bearssl/rand, bearssl/rand,
chronicles, chronicles,
stew/[results, byteutils], stew/[byteutils],
stint, stint,
libp2p/crypto/crypto as libp2p_crypto, libp2p/crypto/crypto as libp2p_crypto,
libp2p/crypto/secp, libp2p/crypto/secp,
libp2p/signed_envelope, libp2p/signed_envelope,
metrics, metrics,
nimcrypto, nimcrypto,
results,
"."/[messages, messages_encoding, node, spr, hkdf, sessions], "."/[messages, messages_encoding, node, spr, hkdf, sessions],
"."/crypto "."/crypto
@ -32,13 +34,16 @@ from stew/objects import checkedEnumAssign
export crypto export crypto
declareCounter discovery_session_lru_cache_hits, "Session LRU cache hits" declareCounter dht_session_lru_cache_hits, "Session LRU cache hits"
declareCounter discovery_session_lru_cache_misses, "Session LRU cache misses" declareCounter dht_session_lru_cache_misses, "Session LRU cache misses"
declareCounter discovery_session_decrypt_failures, "Session decrypt failures" declareCounter dht_session_decrypt_failures, "Session decrypt failures"
logScope: logScope:
topics = "discv5" topics = "discv5"
type
cipher = aes128
const const
version: uint16 = 1 version: uint16 = 1
idSignatureText = "discovery v5 identity proof" idSignatureText = "discovery v5 identity proof"
@ -161,7 +166,7 @@ proc deriveKeys*(n1, n2: NodeId, priv: PrivateKey, pub: PublicKey,
ok secrets ok secrets
proc encryptGCM*(key: AesKey, nonce, pt, authData: openArray[byte]): seq[byte] = proc encryptGCM*(key: AesKey, nonce, pt, authData: openArray[byte]): seq[byte] =
var ectx: GCM[aes128] var ectx: GCM[cipher]
ectx.init(key, nonce, authData) ectx.init(key, nonce, authData)
result = newSeq[byte](pt.len + gcmTagSize) result = newSeq[byte](pt.len + gcmTagSize)
ectx.encrypt(pt, result) ectx.encrypt(pt, result)
@ -174,7 +179,7 @@ proc decryptGCM*(key: AesKey, nonce, ct, authData: openArray[byte]):
debug "cipher is missing tag", len = ct.len debug "cipher is missing tag", len = ct.len
return return
var dctx: GCM[aes128] var dctx: GCM[cipher]
dctx.init(key, nonce, authData) dctx.init(key, nonce, authData)
var res = newSeq[byte](ct.len - gcmTagSize) var res = newSeq[byte](ct.len - gcmTagSize)
var tag: array[gcmTagSize, byte] var tag: array[gcmTagSize, byte]
@ -188,7 +193,7 @@ proc decryptGCM*(key: AesKey, nonce, ct, authData: openArray[byte]):
return some(res) return some(res)
proc encryptHeader*(id: NodeId, iv, header: openArray[byte]): seq[byte] = proc encryptHeader*(id: NodeId, iv, header: openArray[byte]): seq[byte] =
var ectx: CTR[aes128] var ectx: CTR[cipher]
ectx.init(id.toByteArrayBE().toOpenArray(0, 15), iv) ectx.init(id.toByteArrayBE().toOpenArray(0, 15), iv)
result = newSeq[byte](header.len) result = newSeq[byte](header.len)
ectx.encrypt(header, result) ectx.encrypt(header, result)
@ -200,7 +205,7 @@ proc hasHandshake*(c: Codec, key: HandshakeKey): bool =
proc encodeStaticHeader*(flag: Flag, nonce: AESGCMNonce, authSize: int): proc encodeStaticHeader*(flag: Flag, nonce: AESGCMNonce, authSize: int):
seq[byte] = seq[byte] =
result.add(protocolId) result.add(protocolId)
result.add(version.toBytesBE()) result.add(endians2.toBytesBE(version))
result.add(byte(flag)) result.add(byte(flag))
result.add(nonce) result.add(nonce)
# TODO: assert on authSize of > 2^16? # TODO: assert on authSize of > 2^16?
@ -208,8 +213,9 @@ proc encodeStaticHeader*(flag: Flag, nonce: AESGCMNonce, authSize: int):
proc encodeMessagePacket*(rng: var HmacDrbgContext, c: var Codec, proc encodeMessagePacket*(rng: var HmacDrbgContext, c: var Codec,
toId: NodeId, toAddr: Address, message: openArray[byte]): toId: NodeId, toAddr: Address, message: openArray[byte]):
(seq[byte], AESGCMNonce) = (seq[byte], AESGCMNonce, bool) =
var nonce: AESGCMNonce var nonce: AESGCMNonce
var haskey: bool
hmacDrbgGenerate(rng, nonce) # Random AESGCM nonce hmacDrbgGenerate(rng, nonce) # Random AESGCM nonce
var iv: array[ivSize, byte] var iv: array[ivSize, byte]
hmacDrbgGenerate(rng, iv) # Random IV hmacDrbgGenerate(rng, iv) # Random IV
@ -225,10 +231,11 @@ proc encodeMessagePacket*(rng: var HmacDrbgContext, c: var Codec,
# message # message
var messageEncrypted: seq[byte] var messageEncrypted: seq[byte]
var initiatorKey, recipientKey: AesKey var initiatorKey, recipientKey1, recipientKey2: AesKey
if c.sessions.load(toId, toAddr, recipientKey, initiatorKey): if c.sessions.load(toId, toAddr, recipientKey1, recipientKey2, initiatorKey):
haskey = true
messageEncrypted = encryptGCM(initiatorKey, nonce, message, @iv & header) messageEncrypted = encryptGCM(initiatorKey, nonce, message, @iv & header)
discovery_session_lru_cache_hits.inc() dht_session_lru_cache_hits.inc()
else: else:
# We might not have the node's keys if the handshake hasn't been performed # We might not have the node's keys if the handshake hasn't been performed
# yet. That's fine, we send a random-packet and we will be responded with # yet. That's fine, we send a random-packet and we will be responded with
@ -237,10 +244,11 @@ proc encodeMessagePacket*(rng: var HmacDrbgContext, c: var Codec,
# message. 16 bytes for the gcm tag and 4 bytes for ping with requestId of # message. 16 bytes for the gcm tag and 4 bytes for ping with requestId of
# 1 byte (e.g "01c20101"). Could increase to 27 for 8 bytes requestId in # 1 byte (e.g "01c20101"). Could increase to 27 for 8 bytes requestId in
# case this must not look like a random packet. # case this must not look like a random packet.
haskey = false
var randomData: array[gcmTagSize + 4, byte] var randomData: array[gcmTagSize + 4, byte]
hmacDrbgGenerate(rng, randomData) hmacDrbgGenerate(rng, randomData)
messageEncrypted.add(randomData) messageEncrypted.add(randomData)
discovery_session_lru_cache_misses.inc() dht_session_lru_cache_misses.inc()
let maskedHeader = encryptHeader(toId, iv, header) let maskedHeader = encryptHeader(toId, iv, header)
@ -249,7 +257,7 @@ proc encodeMessagePacket*(rng: var HmacDrbgContext, c: var Codec,
packet.add(maskedHeader) packet.add(maskedHeader)
packet.add(messageEncrypted) packet.add(messageEncrypted)
return (packet, nonce) return (packet, nonce, haskey)
proc encodeWhoareyouPacket*(rng: var HmacDrbgContext, c: var Codec, proc encodeWhoareyouPacket*(rng: var HmacDrbgContext, c: var Codec,
toId: NodeId, toAddr: Address, requestNonce: AESGCMNonce, recordSeq: uint64, toId: NodeId, toAddr: Address, requestNonce: AESGCMNonce, recordSeq: uint64,
@ -307,7 +315,7 @@ proc encodeHandshakePacket*(rng: var HmacDrbgContext, c: var Codec,
authdataHead.add(c.localNode.id.toByteArrayBE()) authdataHead.add(c.localNode.id.toByteArrayBE())
let ephKeys = ? KeyPair.random(rng) let ephKeys = ? KeyPair.random(PKScheme.Secp256k1, rng)
.mapErr((e: CryptoError) => .mapErr((e: CryptoError) =>
("Failed to create random key pair: " & $e).cstring) ("Failed to create random key pair: " & $e).cstring)
@ -370,7 +378,7 @@ proc decodeHeader*(id: NodeId, iv, maskedHeader: openArray[byte]):
DecodeResult[(StaticHeader, seq[byte])] = DecodeResult[(StaticHeader, seq[byte])] =
# No need to check staticHeader size as that is included in minimum packet # No need to check staticHeader size as that is included in minimum packet
# size check in decodePacket # size check in decodePacket
var ectx: CTR[aes128] var ectx: CTR[cipher]
ectx.init(id.toByteArrayBE().toOpenArray(0, aesKeySize - 1), iv) ectx.init(id.toByteArrayBE().toOpenArray(0, aesKeySize - 1), iv)
# Decrypt static-header part of the header # Decrypt static-header part of the header
var staticHeader = newSeq[byte](staticHeaderSize) var staticHeader = newSeq[byte](staticHeaderSize)
@ -419,26 +427,35 @@ proc decodeMessagePacket(c: var Codec, fromAddr: Address, nonce: AESGCMNonce,
let srcId = NodeId.fromBytesBE(header.toOpenArray(staticHeaderSize, let srcId = NodeId.fromBytesBE(header.toOpenArray(staticHeaderSize,
header.high)) header.high))
var initiatorKey, recipientKey: AesKey var initiatorKey, recipientKey1, recipientKey2: AesKey
if not c.sessions.load(srcId, fromAddr, recipientKey, initiatorKey): if not c.sessions.load(srcId, fromAddr, recipientKey1, recipientKey2, initiatorKey):
# Don't consider this an error, simply haven't done a handshake yet or # Don't consider this an error, simply haven't done a handshake yet or
# the session got removed. # the session got removed.
trace "Decrypting failed (no keys)" trace "Decrypting failed (no keys)"
discovery_session_lru_cache_misses.inc() dht_session_lru_cache_misses.inc()
return ok(Packet(flag: Flag.OrdinaryMessage, requestNonce: nonce, return ok(Packet(flag: Flag.OrdinaryMessage, requestNonce: nonce,
srcId: srcId)) srcId: srcId))
discovery_session_lru_cache_hits.inc() dht_session_lru_cache_hits.inc()
let pt = decryptGCM(recipientKey, nonce, ct, @iv & @header) var pt = decryptGCM(recipientKey2, nonce, ct, @iv & @header)
if pt.isNone(): if pt.isNone():
# Don't consider this an error, the session got probably removed at the trace "Decrypting failed, trying other key"
# peer's side and a random message is send. pt = decryptGCM(recipientKey1, nonce, ct, @iv & @header)
trace "Decrypting failed (invalid keys)" if pt.isNone():
c.sessions.del(srcId, fromAddr) # Don't consider this an error, the session got probably removed at the
discovery_session_decrypt_failures.inc() # peer's side and a random message is send.
return ok(Packet(flag: Flag.OrdinaryMessage, requestNonce: nonce, # This might also be a cross-connect. Not deleting key, as it might be
srcId: srcId)) # needed later, depending on message order.
trace "Decrypting failed (invalid keys)", address = fromAddr
#c.sessions.del(srcId, fromAddr)
dht_session_decrypt_failures.inc()
return ok(Packet(flag: Flag.OrdinaryMessage, requestNonce: nonce,
srcId: srcId))
# Most probably the same decryption key will work next time. We should
# elevate it's priority.
c.sessions.swapr(srcId, fromAddr)
let message = ? decodeMessage(pt.get()) let message = ? decodeMessage(pt.get())

View File

@ -1,4 +1,4 @@
# codex-dht - Codex DHT # logos-storage-dht - Logos Storage DHT
# Copyright (c) 2021 Status Research & Development GmbH # Copyright (c) 2021 Status Research & Development GmbH
# Licensed and distributed under either of # Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
@ -15,7 +15,7 @@
## To select the right address, a majority count is done. This is done over a ## To select the right address, a majority count is done. This is done over a
## sort of moving window as votes expire after `IpVoteTimeout`. ## sort of moving window as votes expire after `IpVoteTimeout`.
{.push raises: [Defect].} {.push raises: [].}
import import
std/[tables, options], std/[tables, options],

View File

@ -1,6 +1,6 @@
import std/[tables, lists, options] import std/[tables, lists, options]
{.push raises: [Defect].} {.push raises: [].}
export tables, lists, options export tables, lists, options
@ -55,3 +55,10 @@ iterator items*[K, V](lru: LRUCache[K, V]): V =
for item in lru.list: for item in lru.list:
yield item[1] yield item[1]
iterator keys*[K, V](lru: LRUCache[K, V]): K =
## Get cached keys - this doesn't touch the cache
##
for item in lru.table.keys:
yield item

View File

@ -1,4 +1,4 @@
# codex-dht - Codex DHT # logos-storage-dht - Logos Storage DHT
# Copyright (c) 2022 Status Research & Development GmbH # Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of # Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
@ -10,7 +10,7 @@
## These messages get protobuf encoded, while in the spec they get RLP encoded. ## These messages get protobuf encoded, while in the spec they get RLP encoded.
## ##
{.push raises: [Defect].} {.push raises: [].}
import import
std/[hashes, net], std/[hashes, net],

View File

@ -1,4 +1,4 @@
# codex-dht - Codex DHT # logos-storage-dht - Logos Storage DHT
# Copyright (c) 2020-2022 Status Research & Development GmbH # Copyright (c) 2020-2022 Status Research & Development GmbH
# Licensed and distributed under either of # Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
@ -11,8 +11,10 @@
import import
std/net, std/net,
chronicles, chronicles,
stew/endians2,
libp2p/routing_record, libp2p/routing_record,
libp2p/signed_envelope, libp2p/signed_envelope,
libp2p/protobuf/minprotobuf,
"."/[messages, spr, node], "."/[messages, spr, node],
../../../../dht/providers_encoding ../../../../dht/providers_encoding
@ -98,7 +100,7 @@ proc getField*(pb: ProtoBuffer, field: int,
if not(res): if not(res):
ok(false) ok(false)
else: else:
family = uint8.fromBytesBE(buffer).IpAddressFamily family = endians2.fromBytesBE(uint8, buffer).IpAddressFamily
ok(true) ok(true)
proc write*(pb: var ProtoBuffer, field: int, family: IpAddressFamily) = proc write*(pb: var ProtoBuffer, field: int, family: IpAddressFamily) =
@ -324,7 +326,7 @@ proc encodeMessage*[T: SomeMessage](p: T, reqId: RequestId): seq[byte] =
pb.write(2, encoded) pb.write(2, encoded)
pb.finish() pb.finish()
result.add(pb.buffer) result.add(pb.buffer)
trace "Encoded protobuf message", typ = $T, encoded trace "Encoded protobuf message", typ = $T
proc decodeMessage*(body: openArray[byte]): DecodeResult[Message] = proc decodeMessage*(body: openArray[byte]): DecodeResult[Message] =
## Decodes to the specific `Message` type. ## Decodes to the specific `Message` type.

View File

@ -1,40 +1,51 @@
# codex-dht - Codex DHT # logos-storage-dht - Logos Storage DHT
# Copyright (c) 2022 Status Research & Development GmbH # Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of # Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms. # at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [Defect].} {.push raises: [].}
import import
std/hashes, std/[hashes, net],
bearssl/rand, bearssl/rand,
chronicles, chronicles,
chronos, chronos,
nimcrypto, nimcrypto,
stew/shims/net,
stint, stint,
./crypto, ./crypto,
./spr ./spr
export stint export stint
const
avgSmoothingFactor = 0.9
seenSmoothingFactor = 0.9
type type
NodeId* = UInt256 NodeId* = UInt256
Address* = object Address* = object
ip*: ValidIpAddress ip*: IpAddress
port*: Port port*: Port
Stats* = object
rttMin*: float #millisec
rttAvg*: float #millisec
bwAvg*: float #bps
bwMax*: float #bps
Node* = ref object Node* = ref object
id*: NodeId id*: NodeId
pubkey*: PublicKey pubkey*: PublicKey
address*: Option[Address] address*: Option[Address]
record*: SignedPeerRecord record*: SignedPeerRecord
seen*: bool ## Indicates if there was at least one successful seen*: float ## Indicates if there was at least one successful
## request-response with this node, or if the nde was verified ## request-response with this node, or if the nde was verified
## through the underlying transport mechanisms. ## through the underlying transport mechanisms. After first contact
## it tracks how reliable is the communication with the node.
stats*: Stats # traffic measurements and statistics
func toNodeId*(pid: PeerId): NodeId = func toNodeId*(pid: PeerId): NodeId =
## Convert public key to a node identifier. ## Convert public key to a node identifier.
@ -57,7 +68,7 @@ func newNode*(
id: ? pk.toNodeId(), id: ? pk.toNodeId(),
pubkey: pk, pubkey: pk,
record: record, record: record,
address: Address(ip: ValidIpAddress.init(ip), port: port).some) address: Address(ip: ip, port: port).some)
ok node ok node
@ -77,7 +88,9 @@ func newNode*(r: SignedPeerRecord): Result[Node, cstring] =
nodeId = ? pk.get().toNodeId() nodeId = ? pk.get().toNodeId()
if r.ip.isSome() and r.udp.isSome(): if r.ip.isSome() and r.udp.isSome():
let a = Address(ip: ipv4(r.ip.get()), port: Port(r.udp.get())) let a = Address(
ip: IpAddress(family: IPv4, address_v4: r.ip.get()), port: Port(r.udp.get())
)
ok(Node( ok(Node(
id: nodeId, id: nodeId,
@ -91,7 +104,7 @@ func newNode*(r: SignedPeerRecord): Result[Node, cstring] =
record: r, record: r,
address: none(Address))) address: none(Address)))
proc update*(n: Node, pk: PrivateKey, ip: Option[ValidIpAddress], proc update*(n: Node, pk: PrivateKey, ip: Option[IpAddress],
tcpPort, udpPort: Option[Port] = none[Port]()): Result[void, cstring] = tcpPort, udpPort: Option[Port] = none[Port]()): Result[void, cstring] =
? n.record.update(pk, ip, tcpPort, udpPort) ? n.record.update(pk, ip, tcpPort, udpPort)
@ -135,14 +148,14 @@ func shortLog*(id: NodeId): string =
result = sid result = sid
else: else:
result = newStringOfCap(10) result = newStringOfCap(10)
for i in 0..<2: for i in 0..<3:
result.add(sid[i]) result.add(sid[i])
result.add("*") result.add("*")
for i in (len(sid) - 6)..sid.high: for i in (len(sid) - 6)..sid.high:
result.add(sid[i]) result.add(sid[i])
chronicles.formatIt(NodeId): shortLog(it) chronicles.formatIt(NodeId): shortLog(it)
func hash*(ip: ValidIpAddress): Hash = func hash*(ip: IpAddress): Hash =
case ip.family case ip.family
of IpAddressFamily.IPv6: hash(ip.address_v6) of IpAddressFamily.IPv6: hash(ip.address_v6)
of IpAddressFamily.IPv4: hash(ip.address_v4) of IpAddressFamily.IPv4: hash(ip.address_v4)
@ -177,3 +190,38 @@ func shortLog*(nodes: seq[Node]): string =
result.add("]") result.add("]")
chronicles.formatIt(seq[Node]): shortLog(it) chronicles.formatIt(seq[Node]): shortLog(it)
func shortLog*(address: Address): string =
$address
chronicles.formatIt(Address): shortLog(it)
func registerSeen*(n:Node, seen = true) =
## Register event of seeing (getting message from) or not seeing (missing message) node
## Note: interpretation might depend on NAT type
if n.seen == 0: # first time seeing the node
n.seen = 1
else:
n.seen = seenSmoothingFactor * n.seen + (1.0 - seenSmoothingFactor) * seen.float
func alreadySeen*(n:Node) : bool =
## Was the node seen at least once?
n.seen > 0
# collecting performane metrics
func registerRtt*(n: Node, rtt: Duration) =
## register an RTT measurement
let rttMs = rtt.nanoseconds.float / 1e6
n.stats.rttMin =
if n.stats.rttMin == 0: rttMs
else: min(n.stats.rttMin, rttMs)
n.stats.rttAvg =
if n.stats.rttAvg == 0: rttMs
else: avgSmoothingFactor * n.stats.rttAvg + (1.0 - avgSmoothingFactor) * rttMs
func registerBw*(n: Node, bw: float) =
## register an bandwidth measurement
n.stats.bwMax = max(n.stats.bwMax, bw)
n.stats.bwAvg =
if n.stats.bwAvg == 0: bw
else: avgSmoothingFactor * n.stats.bwAvg + (1.0 - avgSmoothingFactor) * bw

View File

@ -1,8 +1,8 @@
{.push raises: [Defect].} {.push raises: [].}
import import
std/[sets, options], std/[net, sets, options],
stew/results, stew/shims/net, chronicles, chronos, results, chronicles, chronos,
"."/[node, spr, routing_table] "."/[node, spr, routing_table]
logScope: logScope:

View File

@ -1,4 +1,4 @@
# codex-dht - Codex DHT # logos-storage-dht - Logos Storage DHT
# Copyright (c) 2022 Status Research & Development GmbH # Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of # Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
@ -71,18 +71,18 @@
## more requests will be needed for a lookup (adding bandwidth and latency). ## more requests will be needed for a lookup (adding bandwidth and latency).
## This might be a concern for mobile devices. ## This might be a concern for mobile devices.
{.push raises: [Defect].} {.push raises: [].}
import import
std/[tables, sets, options, math, sequtils, algorithm, strutils], std/[net, tables, sets, options, math, sequtils, algorithm, strutils],
stew/shims/net as stewNet,
json_serialization/std/net, json_serialization/std/net,
stew/[base64, endians2, results], stew/[base64, endians2],
pkg/[chronicles, chronicles/chronos_tools], pkg/[chronicles, chronicles/chronos_tools],
pkg/chronos, pkg/chronos,
pkg/stint, pkg/stint,
pkg/bearssl/rand, pkg/bearssl/rand,
pkg/metrics pkg/metrics,
pkg/results
import "."/[ import "."/[
messages, messages,
@ -100,13 +100,13 @@ import nimcrypto except toHex
export options, results, node, spr, providers export options, results, node, spr, providers
declareCounter discovery_message_requests_outgoing, declareCounter dht_message_requests_outgoing,
"Discovery protocol outgoing message requests", labels = ["response"] "Discovery protocol outgoing message requests", labels = ["response"]
declareCounter discovery_message_requests_incoming, declareCounter dht_message_requests_incoming,
"Discovery protocol incoming message requests", labels = ["response"] "Discovery protocol incoming message requests", labels = ["response"]
declareCounter discovery_unsolicited_messages, declareCounter dht_unsolicited_messages,
"Discovery protocol unsolicited or timed-out messages" "Discovery protocol unsolicited or timed-out messages"
declareCounter discovery_enr_auto_update, declareCounter dht_enr_auto_update,
"Amount of discovery IP:port address SPR auto updates" "Amount of discovery IP:port address SPR auto updates"
logScope: logScope:
@ -117,6 +117,7 @@ const
LookupRequestLimit = 3 ## Amount of distances requested in a single Findnode LookupRequestLimit = 3 ## Amount of distances requested in a single Findnode
## message for a lookup or query ## message for a lookup or query
FindNodeResultLimit = 16 ## Maximum amount of SPRs in the total Nodes messages FindNodeResultLimit = 16 ## Maximum amount of SPRs in the total Nodes messages
FindNodeFastResultLimit = 6 ## Maximum amount of SPRs in response to findNodeFast
## that will be processed ## that will be processed
MaxNodesPerMessage = 3 ## Maximum amount of SPRs per individual Nodes message MaxNodesPerMessage = 3 ## Maximum amount of SPRs per individual Nodes message
RefreshInterval = 5.minutes ## Interval of launching a random query to RefreshInterval = 5.minutes ## Interval of launching a random query to
@ -125,12 +126,17 @@ const
RevalidateMax = 10000 ## Revalidation of a peer is done between min and max milliseconds. RevalidateMax = 10000 ## Revalidation of a peer is done between min and max milliseconds.
## value in milliseconds ## value in milliseconds
IpMajorityInterval = 5.minutes ## Interval for checking the latest IP:Port IpMajorityInterval = 5.minutes ## Interval for checking the latest IP:Port
DebugPrintInterval = 5.minutes ## Interval to print neighborhood with stats
## majority and updating this when SPR auto update is set. ## majority and updating this when SPR auto update is set.
InitialLookups = 1 ## Amount of lookups done when populating the routing table InitialLookups = 1 ## Amount of lookups done when populating the routing table
ResponseTimeout* = 4.seconds ## timeout for the response of a request-response ResponseTimeout* = 1.seconds ## timeout for the response of a request-response
MaxProvidersEntries* = 1_000_000 # one million records MaxProvidersEntries* = 1_000_000 # one million records
MaxProvidersPerEntry* = 20 # providers per entry MaxProvidersPerEntry* = 20 # providers per entry
## call ## call
FindnodeSeenThreshold = 1.0 ## threshold used as findnode response filter
LookupSeenThreshold = 0.0 ## threshold used for lookup nodeset selection
QuerySeenThreshold = 0.0 ## threshold used for query nodeset selection
NoreplyRemoveThreshold = 0.5 ## remove node on no reply if 'seen' is below this value
func shortLog*(record: SignedPeerRecord): string = func shortLog*(record: SignedPeerRecord): string =
## Returns compact string representation of ``SignedPeerRecord``. ## Returns compact string representation of ``SignedPeerRecord``.
@ -166,6 +172,7 @@ type
refreshLoop: Future[void] refreshLoop: Future[void]
revalidateLoop: Future[void] revalidateLoop: Future[void]
ipMajorityLoop: Future[void] ipMajorityLoop: Future[void]
debugPrintLoop: Future[void]
lastLookup: chronos.Moment lastLookup: chronos.Moment
bootstrapRecords*: seq[SignedPeerRecord] bootstrapRecords*: seq[SignedPeerRecord]
ipVote: IpVote ipVote: IpVote
@ -182,6 +189,9 @@ type
DiscResult*[T] = Result[T, cstring] DiscResult*[T] = Result[T, cstring]
func `$`*(p: Protocol): string =
$p.localNode.id
const const
defaultDiscoveryConfig* = DiscoveryConfig( defaultDiscoveryConfig* = DiscoveryConfig(
tableIpLimits: DefaultTableIpLimits, tableIpLimits: DefaultTableIpLimits,
@ -231,7 +241,7 @@ proc randomNodes*(d: Protocol, maxAmount: int): seq[Node] =
d.routingTable.randomNodes(maxAmount) d.routingTable.randomNodes(maxAmount)
proc randomNodes*(d: Protocol, maxAmount: int, proc randomNodes*(d: Protocol, maxAmount: int,
pred: proc(x: Node): bool {.gcsafe, noSideEffect.}): seq[Node] = pred: proc(x: Node): bool {.gcsafe, noSideEffect, raises: [].}): seq[Node] =
## Get a `maxAmount` of random nodes from the local routing table with the ## Get a `maxAmount` of random nodes from the local routing table with the
## `pred` predicate function applied as filter on the nodes selected. ## `pred` predicate function applied as filter on the nodes selected.
d.routingTable.randomNodes(maxAmount, pred) d.routingTable.randomNodes(maxAmount, pred)
@ -243,14 +253,14 @@ proc randomNodes*(d: Protocol, maxAmount: int,
d.randomNodes(maxAmount, proc(x: Node): bool = x.record.contains(enrField)) d.randomNodes(maxAmount, proc(x: Node): bool = x.record.contains(enrField))
proc neighbours*(d: Protocol, id: NodeId, k: int = BUCKET_SIZE, proc neighbours*(d: Protocol, id: NodeId, k: int = BUCKET_SIZE,
seenOnly = false): seq[Node] = seenThreshold = 0.0): seq[Node] =
## Return up to k neighbours (closest node ids) of the given node id. ## Return up to k neighbours (closest node ids) of the given node id.
d.routingTable.neighbours(id, k, seenOnly) d.routingTable.neighbours(id, k, seenThreshold)
proc neighboursAtDistances*(d: Protocol, distances: seq[uint16], proc neighboursAtDistances*(d: Protocol, distances: seq[uint16],
k: int = BUCKET_SIZE, seenOnly = false): seq[Node] = k: int = BUCKET_SIZE, seenThreshold = 0.0): seq[Node] =
## Return up to k neighbours (closest node ids) at given distances. ## Return up to k neighbours (closest node ids) at given distances.
d.routingTable.neighboursAtDistances(distances, k, seenOnly) d.routingTable.neighboursAtDistances(distances, k, seenThreshold)
proc nodesDiscovered*(d: Protocol): int = d.routingTable.len proc nodesDiscovered*(d: Protocol): int = d.routingTable.len
@ -272,7 +282,7 @@ proc updateRecord*(
newSpr = spr.get() newSpr = spr.get()
seqNo = d.localNode.record.seqNum seqNo = d.localNode.record.seqNum
info "Updated discovery SPR", uri = newSpr.toURI() info "Updated discovery SPR", uri = newSpr.toURI(), newSpr = newSpr.data
d.localNode.record = newSpr d.localNode.record = newSpr
d.localNode.record.data.seqNo = seqNo d.localNode.record.data.seqNo = seqNo
@ -338,7 +348,7 @@ proc handleFindNode(d: Protocol, fromId: NodeId, fromAddr: Address,
# TODO: Still deduplicate also? # TODO: Still deduplicate also?
if fn.distances.all(proc (x: uint16): bool = return x <= 256): if fn.distances.all(proc (x: uint16): bool = return x <= 256):
d.sendNodes(fromId, fromAddr, reqId, d.sendNodes(fromId, fromAddr, reqId,
d.routingTable.neighboursAtDistances(fn.distances, seenOnly = true)) d.routingTable.neighboursAtDistances(fn.distances, FindNodeResultLimit, FindnodeSeenThreshold))
else: else:
# At least one invalid distance, but the polite node we are, still respond # At least one invalid distance, but the polite node we are, still respond
# with empty nodes. # with empty nodes.
@ -347,7 +357,7 @@ proc handleFindNode(d: Protocol, fromId: NodeId, fromAddr: Address,
proc handleFindNodeFast(d: Protocol, fromId: NodeId, fromAddr: Address, proc handleFindNodeFast(d: Protocol, fromId: NodeId, fromAddr: Address,
fnf: FindNodeFastMessage, reqId: RequestId) = fnf: FindNodeFastMessage, reqId: RequestId) =
d.sendNodes(fromId, fromAddr, reqId, d.sendNodes(fromId, fromAddr, reqId,
d.routingTable.neighbours(fnf.target, seenOnly = true)) d.routingTable.neighbours(fnf.target, FindNodeFastResultLimit, FindnodeSeenThreshold))
# TODO: if known, maybe we should add exact target even if not yet "seen" # TODO: if known, maybe we should add exact target even if not yet "seen"
proc handleTalkReq(d: Protocol, fromId: NodeId, fromAddr: Address, proc handleTalkReq(d: Protocol, fromId: NodeId, fromAddr: Address,
@ -369,7 +379,7 @@ proc handleTalkReq(d: Protocol, fromId: NodeId, fromAddr: Address,
proc addProviderLocal(p: Protocol, cId: NodeId, prov: SignedPeerRecord) {.async.} = proc addProviderLocal(p: Protocol, cId: NodeId, prov: SignedPeerRecord) {.async.} =
trace "adding provider to local db", n = p.localNode, cId, prov trace "adding provider to local db", n = p.localNode, cId, prov
if (let res = (await p.providers.add(cid, prov)); res.isErr): if (let res = (await p.providers.add(cId, prov)); res.isErr):
trace "Unable to add provider", cid, peerId = prov.data.peerId trace "Unable to add provider", cid, peerId = prov.data.peerId
proc handleAddProvider( proc handleAddProvider(
@ -403,27 +413,27 @@ proc handleMessage(d: Protocol, srcId: NodeId, fromAddr: Address,
message: Message) = message: Message) =
case message.kind case message.kind
of ping: of ping:
discovery_message_requests_incoming.inc() dht_message_requests_incoming.inc()
d.handlePing(srcId, fromAddr, message.ping, message.reqId) d.handlePing(srcId, fromAddr, message.ping, message.reqId)
of findNode: of findNode:
discovery_message_requests_incoming.inc() dht_message_requests_incoming.inc()
d.handleFindNode(srcId, fromAddr, message.findNode, message.reqId) d.handleFindNode(srcId, fromAddr, message.findNode, message.reqId)
of findNodeFast: of findNodeFast:
discovery_message_requests_incoming.inc() dht_message_requests_incoming.inc()
d.handleFindNodeFast(srcId, fromAddr, message.findNodeFast, message.reqId) d.handleFindNodeFast(srcId, fromAddr, message.findNodeFast, message.reqId)
of talkReq: of talkReq:
discovery_message_requests_incoming.inc() dht_message_requests_incoming.inc()
d.handleTalkReq(srcId, fromAddr, message.talkReq, message.reqId) d.handleTalkReq(srcId, fromAddr, message.talkReq, message.reqId)
of addProvider: of addProvider:
discovery_message_requests_incoming.inc() dht_message_requests_incoming.inc()
discovery_message_requests_incoming.inc(labelValues = ["no_response"]) dht_message_requests_incoming.inc(labelValues = ["no_response"])
d.handleAddProvider(srcId, fromAddr, message.addProvider, message.reqId) d.handleAddProvider(srcId, fromAddr, message.addProvider, message.reqId)
of getProviders: of getProviders:
discovery_message_requests_incoming.inc() dht_message_requests_incoming.inc()
asyncSpawn d.handleGetProviders(srcId, fromAddr, message.getProviders, message.reqId) asyncSpawn d.handleGetProviders(srcId, fromAddr, message.getProviders, message.reqId)
of regTopic, topicQuery: of regTopic, topicQuery:
discovery_message_requests_incoming.inc() dht_message_requests_incoming.inc()
discovery_message_requests_incoming.inc(labelValues = ["no_response"]) dht_message_requests_incoming.inc(labelValues = ["no_response"])
trace "Received unimplemented message kind", kind = message.kind, trace "Received unimplemented message kind", kind = message.kind,
origin = fromAddr origin = fromAddr
else: else:
@ -431,7 +441,7 @@ proc handleMessage(d: Protocol, srcId: NodeId, fromAddr: Address,
if d.awaitedMessages.take((srcId, message.reqId), waiter): if d.awaitedMessages.take((srcId, message.reqId), waiter):
waiter.complete(some(message)) waiter.complete(some(message))
else: else:
discovery_unsolicited_messages.inc() dht_unsolicited_messages.inc()
trace "Timed out or unrequested message", kind = message.kind, trace "Timed out or unrequested message", kind = message.kind,
origin = fromAddr origin = fromAddr
@ -443,27 +453,50 @@ proc registerTalkProtocol*(d: Protocol, protocolId: seq[byte],
else: else:
ok() ok()
proc replaceNode(d: Protocol, n: Node) = proc replaceNode(d: Protocol, n: Node, forceRemoveBelow = 1.0) =
if n.record notin d.bootstrapRecords: if n.record notin d.bootstrapRecords:
d.routingTable.replaceNode(n) d.routingTable.replaceNode(n, forceRemoveBelow)
else: else:
# For now we never remove bootstrap nodes. It might make sense to actually # For now we never remove bootstrap nodes. It might make sense to actually
# do so and to retry them only in case we drop to a really low amount of # do so and to retry them only in case we drop to a really low amount of
# peers in the routing table. # peers in the routing table.
debug "Message request to bootstrap node failed", src=d.localNode, dst=n debug "Message request to bootstrap node failed", src=d.localNode, dst=n
proc sendRequest*[T: SomeMessage](d: Protocol, toNode: Node, m: T,
reqId: RequestId) =
doAssert(toNode.address.isSome())
let
message = encodeMessage(m, reqId)
proc waitMessage(d: Protocol, fromNode: Node, reqId: RequestId): trace "Send message packet", dstId = toNode.id,
address = toNode.address, kind = messageKind(T)
dht_message_requests_outgoing.inc()
d.transport.sendMessage(toNode, message)
proc waitResponse*[T: SomeMessage](d: Protocol, node: Node, msg: T):
Future[Option[Message]] =
let reqId = RequestId.init(d.rng[])
result = d.waitMessage(node, reqId)
sendRequest(d, node, msg, reqId)
proc waitMessage(d: Protocol, fromNode: Node, reqId: RequestId, timeout = ResponseTimeout):
Future[Option[Message]] = Future[Option[Message]] =
result = newFuture[Option[Message]]("waitMessage") result = newFuture[Option[Message]]("waitMessage")
let res = result let res = result
let key = (fromNode.id, reqId) let key = (fromNode.id, reqId)
sleepAsync(ResponseTimeout).addCallback() do(data: pointer): sleepAsync(timeout).addCallback() do(data: pointer):
d.awaitedMessages.del(key) d.awaitedMessages.del(key)
if not res.finished: if not res.finished:
res.complete(none(Message)) res.complete(none(Message))
d.awaitedMessages[key] = result d.awaitedMessages[key] = result
proc waitNodeResponses*[T: SomeMessage](d: Protocol, node: Node, msg: T):
Future[DiscResult[seq[SignedPeerRecord]]] =
let reqId = RequestId.init(d.rng[])
result = d.waitNodes(node, reqId)
sendRequest(d, node, msg, reqId)
proc waitNodes(d: Protocol, fromNode: Node, reqId: RequestId): proc waitNodes(d: Protocol, fromNode: Node, reqId: RequestId):
Future[DiscResult[seq[SignedPeerRecord]]] {.async.} = Future[DiscResult[seq[SignedPeerRecord]]] {.async.} =
## Wait for one or more nodes replies. ## Wait for one or more nodes replies.
@ -472,72 +505,70 @@ proc waitNodes(d: Protocol, fromNode: Node, reqId: RequestId):
## on that, more replies will be awaited. ## on that, more replies will be awaited.
## If one reply is lost here (timed out), others are ignored too. ## If one reply is lost here (timed out), others are ignored too.
## Same counts for out of order receival. ## Same counts for out of order receival.
let startTime = Moment.now()
var op = await d.waitMessage(fromNode, reqId) var op = await d.waitMessage(fromNode, reqId)
if op.isSome: if op.isSome:
if op.get.kind == MessageKind.nodes: if op.get.kind == MessageKind.nodes:
var res = op.get.nodes.sprs var res = op.get.nodes.sprs
let total = op.get.nodes.total let
total = op.get.nodes.total
firstTime = Moment.now()
rtt = firstTime - startTime
# trace "nodes RTT:", rtt, node = fromNode
fromNode.registerRtt(rtt)
for i in 1 ..< total: for i in 1 ..< total:
op = await d.waitMessage(fromNode, reqId) op = await d.waitMessage(fromNode, reqId)
if op.isSome and op.get.kind == MessageKind.nodes: if op.isSome and op.get.kind == MessageKind.nodes:
res.add(op.get.nodes.sprs) res.add(op.get.nodes.sprs)
# Estimate bandwidth based on UDP packet train received, assuming these were
# released fast and spaced in time by bandwidth bottleneck. This is just a rough
# packet-pair based estimate, far from being perfect.
# TODO: get message size from lower layer for better bandwidth estimate
# TODO: get better reception timestamp from lower layers
let
deltaT = Moment.now() - firstTime
bwBps = 500.0 * 8.0 / (deltaT.nanoseconds.float / i.float / 1e9)
# trace "bw estimate:", deltaT = deltaT, i, bw_mbps = bwBps / 1e6, node = fromNode
fromNode.registerBw(bwBps)
else: else:
# No error on this as we received some nodes. # No error on this as we received some nodes.
break break
return ok(res) return ok(res)
else: else:
discovery_message_requests_outgoing.inc(labelValues = ["invalid_response"]) dht_message_requests_outgoing.inc(labelValues = ["invalid_response"])
return err("Invalid response to find node message") return err("Invalid response to find node message")
else: else:
discovery_message_requests_outgoing.inc(labelValues = ["no_response"]) dht_message_requests_outgoing.inc(labelValues = ["no_response"])
return err("Nodes message not received in time") return err("Nodes message not received in time")
proc sendRequest*[T: SomeMessage](d: Protocol, toId: NodeId, toAddr: Address, m: T):
RequestId =
let
reqId = RequestId.init(d.rng[])
message = encodeMessage(m, reqId)
trace "Send message packet", dstId = toId, toAddr, kind = messageKind(T)
discovery_message_requests_outgoing.inc()
d.transport.sendMessage(toId, toAddr, message)
return reqId
proc sendRequest*[T: SomeMessage](d: Protocol, toNode: Node, m: T):
RequestId =
doAssert(toNode.address.isSome())
let
reqId = RequestId.init(d.rng[])
message = encodeMessage(m, reqId)
trace "Send message packet", dstId = toNode.id,
address = toNode.address, kind = messageKind(T)
discovery_message_requests_outgoing.inc()
d.transport.sendMessage(toNode, message)
return reqId
proc ping*(d: Protocol, toNode: Node): proc ping*(d: Protocol, toNode: Node):
Future[DiscResult[PongMessage]] {.async.} = Future[DiscResult[PongMessage]] {.async.} =
## Send a discovery ping message. ## Send a discovery ping message.
## ##
## Returns the received pong message or an error. ## Returns the received pong message or an error.
let reqId = d.sendRequest(toNode, let
PingMessage(sprSeq: d.localNode.record.seqNum)) msg = PingMessage(sprSeq: d.localNode.record.seqNum)
let resp = await d.waitMessage(toNode, reqId) startTime = Moment.now()
resp = await d.waitResponse(toNode, msg)
rtt = Moment.now() - startTime
# trace "ping RTT:", rtt, node = toNode
toNode.registerRtt(rtt)
d.routingTable.setJustSeen(toNode, resp.isSome())
if resp.isSome(): if resp.isSome():
if resp.get().kind == pong: if resp.get().kind == pong:
d.routingTable.setJustSeen(toNode)
return ok(resp.get().pong) return ok(resp.get().pong)
else: else:
d.replaceNode(toNode) d.replaceNode(toNode)
discovery_message_requests_outgoing.inc(labelValues = ["invalid_response"]) dht_message_requests_outgoing.inc(labelValues = ["invalid_response"])
return err("Invalid response to ping message") return err("Invalid response to ping message")
else: else:
d.replaceNode(toNode) # A ping (or the pong) was lost, what should we do? Previous implementation called
discovery_message_requests_outgoing.inc(labelValues = ["no_response"]) # d.replaceNode(toNode) immediately, which removed the node. This is too aggressive,
# especially if we have a temporary network outage. Although bootstrap nodes are protected
# from being removed, everything else would slowly be removed.
d.replaceNode(toNode, NoreplyRemoveThreshold)
dht_message_requests_outgoing.inc(labelValues = ["no_response"])
return err("Pong message not received in time") return err("Pong message not received in time")
proc findNode*(d: Protocol, toNode: Node, distances: seq[uint16]): proc findNode*(d: Protocol, toNode: Node, distances: seq[uint16]):
@ -546,12 +577,13 @@ proc findNode*(d: Protocol, toNode: Node, distances: seq[uint16]):
## ##
## Returns the received nodes or an error. ## Returns the received nodes or an error.
## Received SPRs are already validated and converted to `Node`. ## Received SPRs are already validated and converted to `Node`.
let reqId = d.sendRequest(toNode, FindNodeMessage(distances: distances)) let
let nodes = await d.waitNodes(toNode, reqId) msg = FindNodeMessage(distances: distances)
nodes = await d.waitNodeResponses(toNode, msg)
d.routingTable.setJustSeen(toNode, nodes.isOk)
if nodes.isOk: if nodes.isOk:
let res = verifyNodesRecords(nodes.get(), toNode, FindNodeResultLimit, distances) let res = verifyNodesRecords(nodes.get(), toNode, FindNodeResultLimit, distances)
d.routingTable.setJustSeen(toNode)
return ok(res) return ok(res)
else: else:
trace "findNode nodes not OK." trace "findNode nodes not OK."
@ -564,12 +596,13 @@ proc findNodeFast*(d: Protocol, toNode: Node, target: NodeId):
## ##
## Returns the received nodes or an error. ## Returns the received nodes or an error.
## Received SPRs are already validated and converted to `Node`. ## Received SPRs are already validated and converted to `Node`.
let reqId = d.sendRequest(toNode, FindNodeFastMessage(target: target)) let
let nodes = await d.waitNodes(toNode, reqId) msg = FindNodeFastMessage(target: target)
nodes = await d.waitNodeResponses(toNode, msg)
d.routingTable.setJustSeen(toNode, nodes.isOk)
if nodes.isOk: if nodes.isOk:
let res = verifyNodesRecords(nodes.get(), toNode, FindNodeResultLimit) let res = verifyNodesRecords(nodes.get(), toNode, FindNodeFastResultLimit)
d.routingTable.setJustSeen(toNode)
return ok(res) return ok(res)
else: else:
d.replaceNode(toNode) d.replaceNode(toNode)
@ -581,21 +614,26 @@ proc talkReq*(d: Protocol, toNode: Node, protocol, request: seq[byte]):
## Send a discovery talkreq message. ## Send a discovery talkreq message.
## ##
## Returns the received talkresp message or an error. ## Returns the received talkresp message or an error.
let reqId = d.sendRequest(toNode, let
TalkReqMessage(protocol: protocol, request: request)) msg = TalkReqMessage(protocol: protocol, request: request)
let resp = await d.waitMessage(toNode, reqId) startTime = Moment.now()
resp = await d.waitResponse(toNode, msg)
rtt = Moment.now() - startTime
# trace "talk RTT:", rtt, node = toNode
toNode.registerRtt(rtt)
d.routingTable.setJustSeen(toNode, resp.isSome())
if resp.isSome(): if resp.isSome():
if resp.get().kind == talkResp: if resp.get().kind == talkResp:
d.routingTable.setJustSeen(toNode)
return ok(resp.get().talkResp.response) return ok(resp.get().talkResp.response)
else: else:
d.replaceNode(toNode) d.replaceNode(toNode)
discovery_message_requests_outgoing.inc(labelValues = ["invalid_response"]) dht_message_requests_outgoing.inc(labelValues = ["invalid_response"])
return err("Invalid response to talk request message") return err("Invalid response to talk request message")
else: else:
d.replaceNode(toNode) # remove on loss only if there is a replacement
discovery_message_requests_outgoing.inc(labelValues = ["no_response"]) d.replaceNode(toNode, NoreplyRemoveThreshold)
dht_message_requests_outgoing.inc(labelValues = ["no_response"])
return err("Talk response message not received in time") return err("Talk response message not received in time")
proc lookupDistances*(target, dest: NodeId): seq[uint16] = proc lookupDistances*(target, dest: NodeId): seq[uint16] =
@ -610,25 +648,18 @@ proc lookupDistances*(target, dest: NodeId): seq[uint16] =
result.add(td - uint16(i)) result.add(td - uint16(i))
inc i inc i
proc lookupWorker(d: Protocol, destNode: Node, target: NodeId): proc lookupWorker(d: Protocol, destNode: Node, target: NodeId, fast: bool):
Future[seq[Node]] {.async.} = Future[seq[Node]] {.async.} =
let dists = lookupDistances(target, destNode.id)
# Instead of doing max `LookupRequestLimit` findNode requests, make use let r =
# of the discv5.1 functionality to request nodes for multiple distances. if fast:
let r = await d.findNode(destNode, dists) await d.findNodeFast(destNode, target)
if r.isOk: else:
result.add(r[]) # Instead of doing max `LookupRequestLimit` findNode requests, make use
# of the discv5.1 functionality to request nodes for multiple distances.
let dists = lookupDistances(target, destNode.id)
await d.findNode(destNode, dists)
# Attempt to add all nodes discovered
for n in result:
discard d.addNode(n)
proc lookupWorkerFast(d: Protocol, destNode: Node, target: NodeId):
Future[seq[Node]] {.async.} =
## use terget NodeId based find_node
let r = await d.findNodeFast(destNode, target)
if r.isOk: if r.isOk:
result.add(r[]) result.add(r[])
@ -642,7 +673,7 @@ proc lookup*(d: Protocol, target: NodeId, fast: bool = false): Future[seq[Node]]
# `closestNodes` holds the k closest nodes to target found, sorted by distance # `closestNodes` holds the k closest nodes to target found, sorted by distance
# Unvalidated nodes are used for requests as a form of validation. # Unvalidated nodes are used for requests as a form of validation.
var closestNodes = d.routingTable.neighbours(target, BUCKET_SIZE, var closestNodes = d.routingTable.neighbours(target, BUCKET_SIZE,
seenOnly = false) LookupSeenThreshold)
var asked, seen = initHashSet[NodeId]() var asked, seen = initHashSet[NodeId]()
asked.incl(d.localNode.id) # No need to ask our own node asked.incl(d.localNode.id) # No need to ask our own node
@ -659,10 +690,7 @@ proc lookup*(d: Protocol, target: NodeId, fast: bool = false): Future[seq[Node]]
while i < closestNodes.len and pendingQueries.len < Alpha: while i < closestNodes.len and pendingQueries.len < Alpha:
let n = closestNodes[i] let n = closestNodes[i]
if not asked.containsOrIncl(n.id): if not asked.containsOrIncl(n.id):
if fast: pendingQueries.add(d.lookupWorker(n, target, fast))
pendingQueries.add(d.lookupWorkerFast(n, target))
else:
pendingQueries.add(d.lookupWorker(n, target))
inc i inc i
trace "discv5 pending queries", total = pendingQueries.len trace "discv5 pending queries", total = pendingQueries.len
@ -707,7 +735,8 @@ proc addProvider*(
res.add(d.localNode) res.add(d.localNode)
for toNode in res: for toNode in res:
if toNode != d.localNode: if toNode != d.localNode:
discard d.sendRequest(toNode, AddProviderMessage(cId: cId, prov: pr)) let reqId = RequestId.init(d.rng[])
d.sendRequest(toNode, AddProviderMessage(cId: cId, prov: pr), reqId)
else: else:
asyncSpawn d.addProviderLocal(cId, pr) asyncSpawn d.addProviderLocal(cId, pr)
@ -720,22 +749,21 @@ proc sendGetProviders(d: Protocol, toNode: Node,
trace "sendGetProviders", toNode, msg trace "sendGetProviders", toNode, msg
let let
reqId = d.sendRequest(toNode, msg) resp = await d.waitResponse(toNode, msg)
resp = await d.waitMessage(toNode, reqId)
d.routingTable.setJustSeen(toNode, resp.isSome())
if resp.isSome(): if resp.isSome():
if resp.get().kind == MessageKind.providers: if resp.get().kind == MessageKind.providers:
d.routingTable.setJustSeen(toNode)
return ok(resp.get().provs) return ok(resp.get().provs)
else: else:
# TODO: do we need to do something when there is an invalid response? # TODO: do we need to do something when there is an invalid response?
d.replaceNode(toNode) d.replaceNode(toNode)
discovery_message_requests_outgoing.inc(labelValues = ["invalid_response"]) dht_message_requests_outgoing.inc(labelValues = ["invalid_response"])
return err("Invalid response to GetProviders message") return err("Invalid response to GetProviders message")
else: else:
# TODO: do we need to do something when there is no response? # remove on loss only if there is a replacement
d.replaceNode(toNode) d.replaceNode(toNode, NoreplyRemoveThreshold)
discovery_message_requests_outgoing.inc(labelValues = ["no_response"]) dht_message_requests_outgoing.inc(labelValues = ["no_response"])
return err("GetProviders response message not received in time") return err("GetProviders response message not received in time")
proc getProvidersLocal*( proc getProvidersLocal*(
@ -808,7 +836,7 @@ proc query*(d: Protocol, target: NodeId, k = BUCKET_SIZE): Future[seq[Node]]
## This will take k nodes from the routing table closest to target and ## This will take k nodes from the routing table closest to target and
## query them for nodes closest to target. If there are less than k nodes in ## query them for nodes closest to target. If there are less than k nodes in
## the routing table, nodes returned by the first queries will be used. ## the routing table, nodes returned by the first queries will be used.
var queryBuffer = d.routingTable.neighbours(target, k, seenOnly = false) var queryBuffer = d.routingTable.neighbours(target, k, QuerySeenThreshold)
var asked, seen = initHashSet[NodeId]() var asked, seen = initHashSet[NodeId]()
asked.incl(d.localNode.id) # No need to ask our own node asked.incl(d.localNode.id) # No need to ask our own node
@ -823,7 +851,7 @@ proc query*(d: Protocol, target: NodeId, k = BUCKET_SIZE): Future[seq[Node]]
while i < min(queryBuffer.len, k) and pendingQueries.len < Alpha: while i < min(queryBuffer.len, k) and pendingQueries.len < Alpha:
let n = queryBuffer[i] let n = queryBuffer[i]
if not asked.containsOrIncl(n.id): if not asked.containsOrIncl(n.id):
pendingQueries.add(d.lookupWorker(n, target)) pendingQueries.add(d.lookupWorker(n, target, false))
inc i inc i
trace "discv5 pending queries", total = pendingQueries.len trace "discv5 pending queries", total = pendingQueries.len
@ -934,7 +962,8 @@ proc revalidateNode*(d: Protocol, n: Node) {.async.} =
discard d.addNode(nodes[][0]) discard d.addNode(nodes[][0])
# Get IP and port from pong message and add it to the ip votes # Get IP and port from pong message and add it to the ip votes
let a = Address(ip: ValidIpAddress.init(res.ip), port: Port(res.port)) trace "pong rx", n, myip = res.ip, myport = res.port
let a = Address(ip: res.ip, port: Port(res.port))
d.ipVote.insert(n.id, a) d.ipVote.insert(n.id, a)
proc revalidateLoop(d: Protocol) {.async.} = proc revalidateLoop(d: Protocol) {.async.} =
@ -1004,7 +1033,7 @@ proc ipMajorityLoop(d: Protocol) {.async.} =
warn "Failed updating SPR with newly discovered external address", warn "Failed updating SPR with newly discovered external address",
majority, previous, error = res.error majority, previous, error = res.error
else: else:
discovery_enr_auto_update.inc() dht_enr_auto_update.inc()
info "Updated SPR with newly discovered external address", info "Updated SPR with newly discovered external address",
majority, previous, uri = toURI(d.localNode.record) majority, previous, uri = toURI(d.localNode.record)
else: else:
@ -1019,6 +1048,19 @@ proc ipMajorityLoop(d: Protocol) {.async.} =
trace "ipMajorityLoop canceled" trace "ipMajorityLoop canceled"
trace "ipMajorityLoop exited!" trace "ipMajorityLoop exited!"
proc debugPrintLoop(d: Protocol) {.async.} =
## Loop which prints the neighborhood with stats
while true:
await sleepAsync(DebugPrintInterval)
for b in d.routingTable.buckets:
debug "bucket", depth = b.getDepth,
len = b.nodes.len, standby = b.replacementLen
for n in b.nodes:
debug "node", n, rttMin = n.stats.rttMin.int, rttAvg = n.stats.rttAvg.int,
reliability = n.seen.round(3)
# bandwidth estimates are based on limited information, so not logging it yet to avoid confusion
# trace "node", n, bwMaxMbps = (n.stats.bwMax / 1e6).round(3), bwAvgMbps = (n.stats.bwAvg / 1e6).round(3)
func init*( func init*(
T: type DiscoveryConfig, T: type DiscoveryConfig,
tableIpLimit: uint, tableIpLimit: uint,
@ -1034,7 +1076,7 @@ func init*(
proc newProtocol*( proc newProtocol*(
privKey: PrivateKey, privKey: PrivateKey,
enrIp: Option[ValidIpAddress], enrIp: Option[IpAddress],
enrTcpPort, enrUdpPort: Option[Port], enrTcpPort, enrUdpPort: Option[Port],
localEnrFields: openArray[(string, seq[byte])] = [], localEnrFields: openArray[(string, seq[byte])] = [],
bootstrapRecords: openArray[SignedPeerRecord] = [], bootstrapRecords: openArray[SignedPeerRecord] = [],
@ -1156,6 +1198,7 @@ proc start*(d: Protocol) {.async.} =
d.refreshLoop = refreshLoop(d) d.refreshLoop = refreshLoop(d)
d.revalidateLoop = revalidateLoop(d) d.revalidateLoop = revalidateLoop(d)
d.ipMajorityLoop = ipMajorityLoop(d) d.ipMajorityLoop = ipMajorityLoop(d)
d.debugPrintLoop = debugPrintLoop(d)
await d.providers.start() await d.providers.start()

View File

@ -1,16 +1,17 @@
# codex-dht - Codex DHT # logos-storage-dht - Logos Storage DHT
# Copyright (c) 2022 Status Research & Development GmbH # Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of # Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms. # at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [Defect].} {.push raises: [].}
import std/sequtils import std/sequtils
import pkg/chronicles import pkg/chronicles
import pkg/libp2p import pkg/libp2p
import pkg/questionable
import ../node import ../node
import ../lru import ../lru
@ -35,22 +36,21 @@ type
func add*( func add*(
self: var ProvidersCache, self: var ProvidersCache,
id: NodeId, id: NodeId,
provider: SignedPeerRecord) = record: SignedPeerRecord) =
## Add providers for an id
## to the cache
if self.disable: if self.disable:
return return
var providers = without var providers =? self.cache.get(id):
if id notin self.cache: providers = Providers.init(self.maxProviders.int)
Providers.init(self.maxProviders.int)
else:
self.cache.get(id).get()
let let
peerId = provider.data.peerId peerId = record.data.peerId
trace "Adding provider to cache", id, peerId trace "Adding provider record to cache", id, peerId
providers.put(peerId, provider) providers.put(peerId, record)
self.cache.put(id, providers) self.cache.put(id, providers)
proc get*( proc get*(
@ -58,14 +58,13 @@ proc get*(
id: NodeId, id: NodeId,
start = 0, start = 0,
stop = MaxProvidersPerEntry.int): seq[SignedPeerRecord] = stop = MaxProvidersPerEntry.int): seq[SignedPeerRecord] =
## Get providers for an id
## from the cache
if self.disable: if self.disable:
return return
if id in self.cache: if recs =? self.cache.get(id):
let
recs = self.cache.get(id).get
let let
providers = toSeq(recs)[start..<min(recs.len, stop)] providers = toSeq(recs)[start..<min(recs.len, stop)]
@ -74,23 +73,40 @@ proc get*(
func remove*( func remove*(
self: var ProvidersCache, self: var ProvidersCache,
id: NodeId,
peerId: PeerId) = peerId: PeerId) =
## Remove a provider record from an id
## from the cache
##
if self.disable: if self.disable:
return return
if id notin self.cache: for id in self.cache.keys:
if var providers =? self.cache.get(id):
trace "Removing provider from cache", id, peerId
providers.del(peerId)
self.cache.put(id, providers)
func remove*(
self: var ProvidersCache,
id: NodeId,
peerId: PeerId) =
## Remove a provider record from an id
## from the cache
##
if self.disable:
return return
var if var providers =? self.cache.get(id):
providers = self.cache.get(id).get() trace "Removing record from cache", id
providers.del(peerId)
trace "Removing provider from cache", id self.cache.put(id, providers)
providers.del(peerId)
self.cache.put(id, providers)
func drop*(self: var ProvidersCache, id: NodeId) = func drop*(self: var ProvidersCache, id: NodeId) =
## Drop all the providers for an entry
##
if self.disable: if self.disable:
return return

View File

@ -1,11 +1,11 @@
# codex-dht - Codex DHT # logos-storage-dht - Logos Storage DHT
# Copyright (c) 2022 Status Research & Development GmbH # Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of # Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms. # at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [Defect].} {.push raises: [].}
import std/sequtils import std/sequtils
import std/strutils import std/strutils

View File

@ -1,15 +1,17 @@
# codex-dht - Codex DHT # logos-storage-dht - Logos Storage DHT
# Copyright (c) 2022 Status Research & Development GmbH # Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of # Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms. # at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [Defect].} {.push raises: [].}
import std/options import std/options
import std/sequtils import std/sequtils
from std/times import now, utc, toTime, toUnix
import pkg/stew/endians2
import pkg/chronos import pkg/chronos
import pkg/libp2p import pkg/libp2p
import pkg/datastore import pkg/datastore
@ -21,16 +23,13 @@ import ./common
const const
ExpiredCleanupBatch* = 1000 ExpiredCleanupBatch* = 1000
CleanupInterval* = 5.minutes CleanupInterval* = 24.hours
proc cleanupExpired*( proc cleanupExpired*(
store: Datastore, store: Datastore,
batchSize = ExpiredCleanupBatch) {.async.} = batchSize = ExpiredCleanupBatch) {.async.} =
trace "Cleaning up expired records" trace "Cleaning up expired records"
let
now = Moment.now()
let let
q = Query.init(CidKey, limit = batchSize) q = Query.init(CidKey, limit = batchSize)
@ -47,11 +46,13 @@ proc cleanupExpired*(
var var
keys = newSeq[Key]() keys = newSeq[Key]()
let
now = times.now().utc().toTime().toUnix()
for item in iter: for item in iter:
if pair =? (await item) and pair.key.isSome: if (maybeKey, data) =? (await item) and key =? maybeKey:
let let
(key, data) = (pair.key.get(), pair.data) expired = endians2.fromBytesBE(uint64, data).int64
expired = Moment.init(uint64.fromBytesBE(data).int64, Microsecond)
if now >= expired: if now >= expired:
trace "Found expired record", key trace "Found expired record", key
@ -74,7 +75,7 @@ proc cleanupOrphaned*(
trace "Cleaning up orphaned records" trace "Cleaning up orphaned records"
let let
providersQuery = Query.init(ProvidersKey, limit = batchSize) providersQuery = Query.init(ProvidersKey, limit = batchSize, value = false)
block: block:
without iter =? (await store.query(providersQuery)), err: without iter =? (await store.query(providersQuery)), err:
@ -83,7 +84,7 @@ proc cleanupOrphaned*(
defer: defer:
if not isNil(iter): if not isNil(iter):
trace "Cleaning up query iterator" trace "Cleaning up orphaned query iterator"
discard (await iter.dispose()) discard (await iter.dispose())
var count = 0 var count = 0
@ -92,10 +93,7 @@ proc cleanupOrphaned*(
trace "Batch cleaned up", size = batchSize trace "Batch cleaned up", size = batchSize
count.inc count.inc
if pair =? (await item) and pair.key.isSome: if (maybeKey, _) =? (await item) and key =? maybeKey:
let
key = pair.key.get()
without peerId =? key.fromProvKey(), err: without peerId =? key.fromProvKey(), err:
trace "Error extracting parts from cid key", key trace "Error extracting parts from cid key", key
continue continue
@ -104,15 +102,17 @@ proc cleanupOrphaned*(
trace "Error building cid key", err = err.msg trace "Error building cid key", err = err.msg
continue continue
without cidIter =? (await store.query(Query.init(cidKey, limit = 1))), err: without cidIter =? (await store.query(Query.init(cidKey, limit = 1, value = false))), err:
trace "Error querying key", cidKey trace "Error querying key", cidKey, err = err.msg
continue continue
let let
res = (await allFinished(toSeq(cidIter))) res = block:
.filterIt( it.completed ) var count = 0
.mapIt( it.read.get ) for item in cidIter:
.filterIt( it.key.isSome ).len if (key, _) =? (await item) and key.isSome:
count.inc
count
if not isNil(cidIter): if not isNil(cidIter):
trace "Disposing cid iter" trace "Disposing cid iter"

View File

@ -1,4 +1,4 @@
# codex-dht - Codex DHT # logos-storage-dht - Logos Storage DHT
# Copyright (c) 2022 Status Research & Development GmbH # Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of # Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
@ -7,17 +7,18 @@
import std/sequtils import std/sequtils
import std/strutils import std/strutils
from std/times import now, utc, toTime, toUnix
import pkg/stew/endians2
import pkg/datastore import pkg/datastore
import pkg/chronos import pkg/chronos
import pkg/libp2p import pkg/libp2p
import pkg/chronicles import pkg/chronicles
import pkg/stew/results as rs
import pkg/stew/byteutils import pkg/stew/byteutils
import pkg/questionable import pkg/questionable
import pkg/questionable/results import pkg/questionable/results
{.push raises: [Defect].} {.push raises: [].}
import ./maintenance import ./maintenance
import ./cache import ./cache
@ -56,30 +57,30 @@ proc getProvByKey*(self: ProvidersManager, key: Key): Future[?!SignedPeerRecord]
proc add*( proc add*(
self: ProvidersManager, self: ProvidersManager,
cid: NodeId, id: NodeId,
provider: SignedPeerRecord, provider: SignedPeerRecord,
ttl = ZeroDuration): Future[?!void] {.async.} = ttl = ZeroDuration): Future[?!void] {.async.} =
let let
peerId = provider.data.peerId peerId = provider.data.peerId
trace "Adding provider to persistent store", cid, peerId trace "Adding provider to persistent store", id, peerId
without provKey =? makeProviderKey(peerId), err: without provKey =? makeProviderKey(peerId), err:
trace "Error creating key from provider record", err = err.msg trace "Error creating key from provider record", err = err.msg
return failure err.msg return failure err.msg
without cidKey =? makeCidKey(cid, peerId), err: without cidKey =? makeCidKey(id, peerId), err:
trace "Error creating key from content id", err = err.msg trace "Error creating key from content id", err = err.msg
return failure err.msg return failure err.msg
let let
now = times.now().utc().toTime().toUnix()
expires = expires =
if ttl > ZeroDuration: if ttl > ZeroDuration:
ttl ttl.seconds + now
else: else:
Moment.fromNow(self.ttl) - ZeroMoment self.ttl.seconds + now
ttl = endians2.toBytesBE(expires.uint64)
ttl = expires.microseconds.uint64.toBytesBE
bytes: seq[byte] = bytes: seq[byte] =
if existing =? (await self.getProvByKey(provKey)) and if existing =? (await self.getProvByKey(provKey)) and
@ -93,17 +94,17 @@ proc add*(
bytes bytes
if bytes.len > 0: if bytes.len > 0:
trace "Adding or updating provider record", cid, peerId trace "Adding or updating provider record", id, peerId
if err =? (await self.store.put(provKey, bytes)).errorOption: if err =? (await self.store.put(provKey, bytes)).errorOption:
trace "Unable to store provider with key", key = provKey, err = err.msg trace "Unable to store provider with key", key = provKey, err = err.msg
trace "Adding or updating cid", cid, key = cidKey, ttl = expires.minutes trace "Adding or updating id", id, key = cidKey, ttl = expires.seconds
if err =? (await self.store.put(cidKey, @ttl)).errorOption: if err =? (await self.store.put(cidKey, @ttl)).errorOption:
trace "Unable to store provider with key", key = cidKey, err = err.msg trace "Unable to store provider with key", key = cidKey, err = err.msg
return return
self.cache.add(cid, provider) self.cache.add(id, provider)
trace "Provider for cid added", cidKey, provKey trace "Provider for id added", cidKey, provKey
return success() return success()
proc get*( proc get*(
@ -136,12 +137,10 @@ proc get*(
trace "Cleaning up query iterator" trace "Cleaning up query iterator"
discard (await cidIter.dispose()) discard (await cidIter.dispose())
var keys: seq[Key]
for item in cidIter: for item in cidIter:
# TODO: =? doesn't support tuples # TODO: =? doesn't support tuples
if pair =? (await item) and pair.key.isSome: if (maybeKey, val) =? (await item) and key =? maybeKey:
let
(key, val) = (pair.key.get, pair.data)
without pairs =? key.fromCidKey() and without pairs =? key.fromCidKey() and
provKey =? makeProviderKey(pairs.peerId), err: provKey =? makeProviderKey(pairs.peerId), err:
trace "Error creating key from provider record", err = err.msg trace "Error creating key from provider record", err = err.msg
@ -150,17 +149,24 @@ proc get*(
trace "Querying provider key", key = provKey trace "Querying provider key", key = provKey
without data =? (await self.store.get(provKey)): without data =? (await self.store.get(provKey)):
trace "Error getting provider", key = provKey trace "Error getting provider", key = provKey
keys.add(key)
continue continue
without provider =? SignedPeerRecord.decode(data).mapErr(mapFailure), err: without provider =? SignedPeerRecord.decode(data).mapErr(mapFailure), err:
trace "Unable to decode provider from store", err = err.msg trace "Unable to decode provider from store", err = err.msg
keys.add(key)
continue continue
trace "Retrieved provider with key", key = provKey trace "Retrieved provider with key", key = provKey
providers.add(provider) providers.add(provider)
self.cache.add(id, provider) self.cache.add(id, provider)
trace "Retrieved providers from persistent store", cid = id, len = providers.len trace "Deleting keys without provider from store", len = keys.len
if keys.len > 0 and err =? (await self.store.delete(keys)).errorOption:
trace "Error deleting records from persistent store", err = err.msg
return failure err
trace "Retrieved providers from persistent store", id = id, len = providers.len
return success providers return success providers
proc contains*( proc contains*(
@ -178,8 +184,8 @@ proc contains*(self: ProvidersManager, peerId: PeerId): Future[bool] {.async.} =
return (await self.store.has(provKey)) |? false return (await self.store.has(provKey)) |? false
proc contains*(self: ProvidersManager, cid: NodeId): Future[bool] {.async.} = proc contains*(self: ProvidersManager, id: NodeId): Future[bool] {.async.} =
without cidKey =? (CidKey / $cid), err: without cidKey =? (CidKey / $id), err:
return false return false
let let
@ -196,15 +202,15 @@ proc contains*(self: ProvidersManager, cid: NodeId): Future[bool] {.async.} =
discard (await iter.dispose()) discard (await iter.dispose())
for item in iter: for item in iter:
if pair =? (await item) and pair.key.isSome: if (key, _) =? (await item) and key.isSome:
return true return true
return false return false
proc remove*(self: ProvidersManager, cid: NodeId): Future[?!void] {.async.} = proc remove*(self: ProvidersManager, id: NodeId): Future[?!void] {.async.} =
self.cache.drop(cid) self.cache.drop(id)
without cidKey =? (CidKey / $cid), err: without cidKey =? (CidKey / $id), err:
return failure(err.msg) return failure(err.msg)
let let
@ -224,16 +230,14 @@ proc remove*(self: ProvidersManager, cid: NodeId): Future[?!void] {.async.} =
keys: seq[Key] keys: seq[Key]
for item in iter: for item in iter:
if pair =? (await item) and pair.key.isSome: if (maybeKey, _) =? (await item) and key =? maybeKey:
let
key = pair.key.get()
keys.add(key) keys.add(key)
without pairs =? key.fromCidKey, err: without pairs =? key.fromCidKey, err:
trace "Unable to parse peer id from key", key trace "Unable to parse peer id from key", key
return failure err return failure err
self.cache.remove(cid, pairs.peerId) self.cache.remove(id, pairs.peerId)
trace "Deleted record from store", key trace "Deleted record from store", key
if keys.len > 0 and err =? (await self.store.delete(keys)).errorOption: if keys.len > 0 and err =? (await self.store.delete(keys)).errorOption:
@ -242,57 +246,60 @@ proc remove*(self: ProvidersManager, cid: NodeId): Future[?!void] {.async.} =
return success() return success()
proc remove*(self: ProvidersManager, peerId: PeerId): Future[?!void] {.async.} = proc remove*(
without cidKey =? (CidKey / "*" / $peerId), err: self: ProvidersManager,
return failure err peerId: PeerId,
entries = false): Future[?!void] {.async.} =
let if entries:
q = Query.init(cidKey) without cidKey =? (CidKey / "*" / $peerId), err:
block:
without iter =? (await self.store.query(q)), err:
trace "Unable to obtain record for key", key = cidKey
return failure err return failure err
defer: let
if not isNil(iter): q = Query.init(cidKey)
trace "Cleaning up query iterator"
discard (await iter.dispose())
var block:
keys: seq[Key] without iter =? (await self.store.query(q)), err:
trace "Unable to obtain record for key", key = cidKey
return failure err
for item in iter: defer:
if pair =? (await item) and pair.key.isSome: if not isNil(iter):
let trace "Cleaning up query iterator"
key = pair.key.get() discard (await iter.dispose())
keys.add(key) var
keys: seq[Key]
let for item in iter:
parts = key.id.split(datastore.Separator) if (maybeKey, _) =? (await item) and key =? maybeKey:
keys.add(key)
self.cache.remove(NodeId.fromHex(parts[2]), peerId) let
parts = key.id.split(datastore.Separator)
if keys.len > 0 and err =? (await self.store.delete(keys)).errorOption: if keys.len > 0 and err =? (await self.store.delete(keys)).errorOption:
trace "Error deleting record from persistent store", err = err.msg trace "Error deleting record from persistent store", err = err.msg
return failure err return failure err
trace "Deleted records from store" trace "Deleted records from store"
without provKey =? makeProviderKey(peerId), err: without provKey =? peerId.makeProviderKey, err:
return failure err return failure err
trace "Removing provider from cache", peerId
self.cache.remove(peerId)
trace "Removing provider record", key = provKey trace "Removing provider record", key = provKey
return (await self.store.delete(provKey)) return (await self.store.delete(provKey))
proc remove*( proc remove*(
self: ProvidersManager, self: ProvidersManager,
cid: NodeId, id: NodeId,
peerId: PeerId): Future[?!void] {.async.} = peerId: PeerId): Future[?!void] {.async.} =
self.cache.remove(cid, peerId) self.cache.remove(id, peerId)
without cidKey =? makeCidKey(cid, peerId), err: without cidKey =? makeCidKey(id, peerId), err:
trace "Error creating key from content id", err = err.msg trace "Error creating key from content id", err = err.msg
return failure err.msg return failure err.msg

View File

@ -1,21 +1,26 @@
# codex-dht - Codex DHT # logos-storage-dht - Logos Storage DHT
# Copyright (c) 2022 Status Research & Development GmbH # Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of # Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms. # at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [Defect].} {.push raises: [].}
import import
std/[algorithm, times, sequtils, bitops, sets, options, tables], std/[algorithm, net, times, sequtils, bitops, sets, options, tables],
stint, chronicles, metrics, bearssl/rand, chronos, stew/shims/net as stewNet, stint, chronicles, metrics, bearssl/rand, chronos,
"."/[node, random2, spr] "."/[node, random2, spr]
export options export options
declarePublicGauge routing_table_nodes, declarePublicGauge dht_routing_table_nodes,
"Discovery routing table nodes", labels = ["state"] "Discovery routing table nodes", labels = ["state"]
declarePublicGauge dht_routing_table_buckets,
"Discovery routing table: number of buckets"
logScope:
topics = "discv5 routingtable"
type type
DistanceProc* = proc(a, b: NodeId): NodeId {.raises: [Defect], gcsafe, noSideEffect.} DistanceProc* = proc(a, b: NodeId): NodeId {.raises: [Defect], gcsafe, noSideEffect.}
@ -29,7 +34,7 @@ type
IpLimits* = object IpLimits* = object
limit*: uint limit*: uint
ips: Table[ValidIpAddress, uint] ips: Table[IpAddress, uint]
RoutingTable* = object RoutingTable* = object
@ -96,7 +101,7 @@ type
ReplacementExisting ReplacementExisting
NoAddress NoAddress
func inc*(ipLimits: var IpLimits, ip: ValidIpAddress): bool = func inc*(ipLimits: var IpLimits, ip: IpAddress): bool =
let val = ipLimits.ips.getOrDefault(ip, 0) let val = ipLimits.ips.getOrDefault(ip, 0)
if val < ipLimits.limit: if val < ipLimits.limit:
ipLimits.ips[ip] = val + 1 ipLimits.ips[ip] = val + 1
@ -104,7 +109,7 @@ func inc*(ipLimits: var IpLimits, ip: ValidIpAddress): bool =
else: else:
false false
func dec*(ipLimits: var IpLimits, ip: ValidIpAddress) = func dec*(ipLimits: var IpLimits, ip: IpAddress) =
let val = ipLimits.ips.getOrDefault(ip, 0) let val = ipLimits.ips.getOrDefault(ip, 0)
if val == 1: if val == 1:
ipLimits.ips.del(ip) ipLimits.ips.del(ip)
@ -177,6 +182,8 @@ proc midpoint(k: KBucket): NodeId =
proc len(k: KBucket): int = k.nodes.len proc len(k: KBucket): int = k.nodes.len
proc replacementLen*(k: KBucket): int = k.replacementCache.len
proc tail(k: KBucket): Node = k.nodes[high(k.nodes)] proc tail(k: KBucket): Node = k.nodes[high(k.nodes)]
proc ipLimitInc(r: var RoutingTable, b: KBucket, n: Node): bool = proc ipLimitInc(r: var RoutingTable, b: KBucket, n: Node): bool =
@ -205,14 +212,14 @@ proc ipLimitDec(r: var RoutingTable, b: KBucket, n: Node) =
proc add(k: KBucket, n: Node) = proc add(k: KBucket, n: Node) =
k.nodes.add(n) k.nodes.add(n)
routing_table_nodes.inc() dht_routing_table_nodes.inc()
proc remove(k: KBucket, n: Node): bool = proc remove(k: KBucket, n: Node): bool =
let i = k.nodes.find(n) let i = k.nodes.find(n)
if i != -1: if i != -1:
routing_table_nodes.dec() dht_routing_table_nodes.dec()
if k.nodes[i].seen: if alreadySeen(k.nodes[i]):
routing_table_nodes.dec(labelValues = ["seen"]) dht_routing_table_nodes.dec(labelValues = ["seen"])
k.nodes.delete(i) k.nodes.delete(i)
trace "removed node:", node = n trace "removed node:", node = n
true true
@ -278,11 +285,15 @@ proc computeSharedPrefixBits(nodes: openArray[NodeId]): int =
# Reaching this would mean that all node ids are equal. # Reaching this would mean that all node ids are equal.
doAssert(false, "Unable to calculate number of shared prefix bits") doAssert(false, "Unable to calculate number of shared prefix bits")
proc getDepth*(b: KBucket) : int =
computeSharedPrefixBits(@[b.istart, b.iend])
proc init*(T: type RoutingTable, localNode: Node, bitsPerHop = DefaultBitsPerHop, proc init*(T: type RoutingTable, localNode: Node, bitsPerHop = DefaultBitsPerHop,
ipLimits = DefaultTableIpLimits, rng: ref HmacDrbgContext, ipLimits = DefaultTableIpLimits, rng: ref HmacDrbgContext,
distanceCalculator = XorDistanceCalculator): T = distanceCalculator = XorDistanceCalculator): T =
## Initialize the routing table for provided `Node` and bitsPerHop value. ## Initialize the routing table for provided `Node` and bitsPerHop value.
## `bitsPerHop` is default set to 5 as recommended by original Kademlia paper. ## `bitsPerHop` is default set to 5 as recommended by original Kademlia paper.
dht_routing_table_buckets.inc()
RoutingTable( RoutingTable(
localNode: localNode, localNode: localNode,
buckets: @[KBucket.new(0.u256, high(UInt256), ipLimits.bucketIpLimit)], buckets: @[KBucket.new(0.u256, high(UInt256), ipLimits.bucketIpLimit)],
@ -296,6 +307,7 @@ proc splitBucket(r: var RoutingTable, index: int) =
let (a, b) = bucket.split() let (a, b) = bucket.split()
r.buckets[index] = a r.buckets[index] = a
r.buckets.insert(b, index + 1) r.buckets.insert(b, index + 1)
dht_routing_table_buckets.inc()
proc bucketForNode(r: RoutingTable, id: NodeId): KBucket = proc bucketForNode(r: RoutingTable, id: NodeId): KBucket =
result = binaryGetBucketForNode(r.buckets, id) result = binaryGetBucketForNode(r.buckets, id)
@ -317,15 +329,12 @@ proc addReplacement(r: var RoutingTable, k: KBucket, n: Node): NodeStatus =
# gets moved to the tail. # gets moved to the tail.
if k.replacementCache[nodeIdx].address.get().ip != n.address.get().ip: if k.replacementCache[nodeIdx].address.get().ip != n.address.get().ip:
if not ipLimitInc(r, k, n): if not ipLimitInc(r, k, n):
trace "replace: ip limit reached"
return IpLimitReached return IpLimitReached
ipLimitDec(r, k, k.replacementCache[nodeIdx]) ipLimitDec(r, k, k.replacementCache[nodeIdx])
k.replacementCache.delete(nodeIdx) k.replacementCache.delete(nodeIdx)
k.replacementCache.add(n) k.replacementCache.add(n)
trace "replace: already existed"
return ReplacementExisting return ReplacementExisting
elif not ipLimitInc(r, k, n): elif not ipLimitInc(r, k, n):
trace "replace: ip limit reached (2)"
return IpLimitReached return IpLimitReached
else: else:
doAssert(k.replacementCache.len <= REPLACEMENT_CACHE_SIZE) doAssert(k.replacementCache.len <= REPLACEMENT_CACHE_SIZE)
@ -336,7 +345,7 @@ proc addReplacement(r: var RoutingTable, k: KBucket, n: Node): NodeStatus =
k.replacementCache.delete(0) k.replacementCache.delete(0)
k.replacementCache.add(n) k.replacementCache.add(n)
trace "replace: added" debug "Node added to replacement cache", n
return ReplacementAdded return ReplacementAdded
proc addNode*(r: var RoutingTable, n: Node): NodeStatus = proc addNode*(r: var RoutingTable, n: Node): NodeStatus =
@ -403,42 +412,50 @@ proc addNode*(r: var RoutingTable, n: Node): NodeStatus =
return IpLimitReached return IpLimitReached
bucket.add(n) bucket.add(n)
else: debug "Node added to routing table", n
# Bucket must be full, but lets see if it should be split the bucket. return Added
# Calculate the prefix shared by all nodes in the bucket's range, not the # Bucket must be full, but lets see if it should be split the bucket.
# ones actually in the bucket. # Calculate the prefix shared by all nodes in the bucket's range, not the
let depth = computeSharedPrefixBits(@[bucket.istart, bucket.iend]) # ones actually in the bucket.
# Split if the bucket has the local node in its range or if the depth is not let depth = computeSharedPrefixBits(@[bucket.istart, bucket.iend])
# congruent to 0 mod `bitsPerHop` # Split if the bucket has the local node in its range or if the depth is not
if bucket.inRange(r.localNode) or # congruent to 0 mod `bitsPerHop`
(depth mod r.bitsPerHop != 0 and depth != ID_SIZE): if bucket.inRange(r.localNode) or
r.splitBucket(r.buckets.find(bucket)) (depth mod r.bitsPerHop != 0 and depth != ID_SIZE):
return r.addNode(n) # retry adding r.splitBucket(r.buckets.find(bucket))
else: return r.addNode(n) # retry adding
# When bucket doesn't get split the node is added to the replacement cache
return r.addReplacement(bucket, n) # When bucket doesn't get split the node is added to the replacement cache
return r.addReplacement(bucket, n)
proc removeNode*(r: var RoutingTable, n: Node) = proc removeNode*(r: var RoutingTable, n: Node) =
## Remove the node `n` from the routing table. ## Remove the node `n` from the routing table.
## No replemennt added, even if there is in replacement cache.
let b = r.bucketForNode(n.id) let b = r.bucketForNode(n.id)
if b.remove(n): if b.remove(n):
ipLimitDec(r, b, n) ipLimitDec(r, b, n)
proc replaceNode*(r: var RoutingTable, n: Node) = proc replaceNode*(r: var RoutingTable, n: Node, forceRemoveBelow = 1.0) =
## Replace node `n` with last entry in the replacement cache. If there are ## Replace node `n` with last entry in the replacement cache. If there are
## no entries in the replacement cache, node `n` will simply be removed. ## no entries in the replacement cache, node `n` will either be removed
# TODO: Kademlia paper recommends here to not remove nodes if there are no ## or kept based on `forceRemoveBelow`. Default: remove.
# replacements. However, that would require a bit more complexity in the ## Note: Kademlia paper recommends here to not remove nodes if there are no
# revalidation as you don't want to try pinging that node all the time. ## replacements. This might mean pinging nodes that are not reachable, but
## also avoids being too agressive because UDP losses or temporary network
## failures.
let b = r.bucketForNode(n.id) let b = r.bucketForNode(n.id)
if b.remove(n): if (b.replacementCache.len > 0 or n.seen <= forceRemoveBelow):
ipLimitDec(r, b, n) if b.remove(n):
debug "Node removed from routing table", n
ipLimitDec(r, b, n)
if b.replacementCache.len > 0: if b.replacementCache.len > 0:
# Nodes in the replacement cache are already included in the ip limits. # Nodes in the replacement cache are already included in the ip limits.
b.add(b.replacementCache[high(b.replacementCache)]) let rn = b.replacementCache[high(b.replacementCache)]
b.replacementCache.delete(high(b.replacementCache)) b.add(rn)
b.replacementCache.delete(high(b.replacementCache))
debug "Node added to routing table from replacement cache", node=rn
proc getNode*(r: RoutingTable, id: NodeId): Option[Node] = proc getNode*(r: RoutingTable, id: NodeId): Option[Node] =
## Get the `Node` with `id` as `NodeId` from the routing table. ## Get the `Node` with `id` as `NodeId` from the routing table.
@ -459,16 +476,16 @@ proc nodesByDistanceTo(r: RoutingTable, k: KBucket, id: NodeId): seq[Node] =
sortedByIt(k.nodes, r.distance(it.id, id)) sortedByIt(k.nodes, r.distance(it.id, id))
proc neighbours*(r: RoutingTable, id: NodeId, k: int = BUCKET_SIZE, proc neighbours*(r: RoutingTable, id: NodeId, k: int = BUCKET_SIZE,
seenOnly = false): seq[Node] = seenThreshold = 0.0): seq[Node] =
## Return up to k neighbours of the given node id. ## Return up to k neighbours of the given node id.
## When seenOnly is set to true, only nodes that have been contacted ## When seenThreshold is set, only nodes that have been contacted
## previously successfully will be selected. ## previously successfully and were seen enough recently will be selected.
result = newSeqOfCap[Node](k * 2) result = newSeqOfCap[Node](k * 2)
block addNodes: block addNodes:
for bucket in r.bucketsByDistanceTo(id): for bucket in r.bucketsByDistanceTo(id):
for n in r.nodesByDistanceTo(bucket, id): for n in r.nodesByDistanceTo(bucket, id):
# Only provide actively seen nodes when `seenOnly` set. # Avoid nodes with 'seen' value below threshold
if not seenOnly or n.seen: if n.seen >= seenThreshold:
result.add(n) result.add(n)
if result.len == k * 2: if result.len == k * 2:
break addNodes break addNodes
@ -480,22 +497,22 @@ proc neighbours*(r: RoutingTable, id: NodeId, k: int = BUCKET_SIZE,
result.setLen(k) result.setLen(k)
proc neighboursAtDistance*(r: RoutingTable, distance: uint16, proc neighboursAtDistance*(r: RoutingTable, distance: uint16,
k: int = BUCKET_SIZE, seenOnly = false): seq[Node] = k: int = BUCKET_SIZE, seenThreshold = 0.0): seq[Node] =
## Return up to k neighbours at given logarithmic distance. ## Return up to k neighbours at given logarithmic distance.
result = r.neighbours(r.idAtDistance(r.localNode.id, distance), k, seenOnly) result = r.neighbours(r.idAtDistance(r.localNode.id, distance), k, seenThreshold)
# This is a bit silly, first getting closest nodes then to only keep the ones # This is a bit silly, first getting closest nodes then to only keep the ones
# that are exactly the requested distance. # that are exactly the requested distance.
keepIf(result, proc(n: Node): bool = r.logDistance(n.id, r.localNode.id) == distance) keepIf(result, proc(n: Node): bool = r.logDistance(n.id, r.localNode.id) == distance)
proc neighboursAtDistances*(r: RoutingTable, distances: seq[uint16], proc neighboursAtDistances*(r: RoutingTable, distances: seq[uint16],
k: int = BUCKET_SIZE, seenOnly = false): seq[Node] = k: int = BUCKET_SIZE, seenThreshold = 0.0): seq[Node] =
## Return up to k neighbours at given logarithmic distances. ## Return up to k neighbours at given logarithmic distances.
# TODO: This will currently return nodes with neighbouring distances on the # TODO: This will currently return nodes with neighbouring distances on the
# first one prioritize. It might end up not including all the node distances # first one prioritize. It might end up not including all the node distances
# requested. Need to rework the logic here and not use the neighbours call. # requested. Need to rework the logic here and not use the neighbours call.
if distances.len > 0: if distances.len > 0:
result = r.neighbours(r.idAtDistance(r.localNode.id, distances[0]), k, result = r.neighbours(r.idAtDistance(r.localNode.id, distances[0]), k,
seenOnly) seenThreshold)
# This is a bit silly, first getting closest nodes then to only keep the ones # This is a bit silly, first getting closest nodes then to only keep the ones
# that are exactly the requested distances. # that are exactly the requested distances.
keepIf(result, proc(n: Node): bool = keepIf(result, proc(n: Node): bool =
@ -507,23 +524,30 @@ proc len*(r: RoutingTable): int =
proc moveRight[T](arr: var openArray[T], a, b: int) = proc moveRight[T](arr: var openArray[T], a, b: int) =
## In `arr` move elements in range [a, b] right by 1. ## In `arr` move elements in range [a, b] right by 1.
var t: T var t: T
shallowCopy(t, arr[b + 1]) when declared(shallowCopy):
for i in countdown(b, a): shallowCopy(t, arr[b + 1])
shallowCopy(arr[i + 1], arr[i]) for i in countdown(b, a):
shallowCopy(arr[a], t) shallowCopy(arr[i + 1], arr[i])
shallowCopy(arr[a], t)
else:
t = move arr[b + 1]
for i in countdown(b, a):
arr[i + 1] = move arr[i]
arr[a] = move t
proc setJustSeen*(r: RoutingTable, n: Node) = proc setJustSeen*(r: RoutingTable, n: Node, seen = true) =
## Move `n` to the head (most recently seen) of its bucket. ## If seen, move `n` to the head (most recently seen) of its bucket.
## If `n` is not in the routing table, do nothing. ## If `n` is not in the routing table, do nothing.
let b = r.bucketForNode(n.id) let b = r.bucketForNode(n.id)
let idx = b.nodes.find(n) if seen:
if idx >= 0: let idx = b.nodes.find(n)
if idx != 0: if idx >= 0:
b.nodes.moveRight(0, idx - 1) if idx != 0:
b.nodes.moveRight(0, idx - 1)
if not n.seen: if not alreadySeen(n): # first time seeing the node
b.nodes[0].seen = true dht_routing_table_nodes.inc(labelValues = ["seen"])
routing_table_nodes.inc(labelValues = ["seen"]) n.registerSeen(seen)
proc nodeToRevalidate*(r: RoutingTable): Node = proc nodeToRevalidate*(r: RoutingTable): Node =
## Return a node to revalidate. The least recently seen node from a random ## Return a node to revalidate. The least recently seen node from a random
@ -537,7 +561,7 @@ proc nodeToRevalidate*(r: RoutingTable): Node =
return b.nodes[^1] return b.nodes[^1]
proc randomNodes*(r: RoutingTable, maxAmount: int, proc randomNodes*(r: RoutingTable, maxAmount: int,
pred: proc(x: Node): bool {.gcsafe, noSideEffect.} = nil): seq[Node] = pred: proc(x: Node): bool {.gcsafe, noSideEffect, raises: [].} = nil): seq[Node] =
## Get a `maxAmount` of random nodes from the routing table with the `pred` ## Get a `maxAmount` of random nodes from the routing table with the `pred`
## predicate function applied as filter on the nodes selected. ## predicate function applied as filter on the nodes selected.
var maxAmount = maxAmount var maxAmount = maxAmount
@ -560,7 +584,8 @@ proc randomNodes*(r: RoutingTable, maxAmount: int,
# while it will take less total time compared to e.g. an (async) # while it will take less total time compared to e.g. an (async)
# randomLookup, the time might be wasted as all nodes are possibly seen # randomLookup, the time might be wasted as all nodes are possibly seen
# already. # already.
while len(seen) < maxAmount: # We check against the number of nodes to avoid an infinite loop in case of a filter.
while len(result) < maxAmount and len(seen) < sz:
let bucket = r.rng[].sample(r.buckets) let bucket = r.rng[].sample(r.buckets)
if bucket.nodes.len != 0: if bucket.nodes.len != 0:
let node = r.rng[].sample(bucket.nodes) let node = r.rng[].sample(bucket.nodes)

View File

@ -1,4 +1,4 @@
# codex-dht - Codex DHT # logos-storage-dht - Logos Storage DHT
# Copyright (c) 2022 Status Research & Development GmbH # Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of # Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
@ -9,11 +9,18 @@
## https://github.com/ethereum/devp2p/blob/master/discv5/discv5-theory.md#session-cache ## https://github.com/ethereum/devp2p/blob/master/discv5/discv5-theory.md#session-cache
## ##
{.push raises: [Defect].} ## A session stores encryption and decryption keys for P2P encryption.
## Since key exchange can be started both ways, and these might not get finalised with
## UDP transport, we can't be sure what encryption key will be used by the other side:
## - the one derived in the key-exchange started by us,
## - the one derived in the key-exchange started by the other node.
## To alleviate this issue, we store two decryption keys in each session.
{.push raises: [].}
import import
std/options, std/[net, options],
stint, stew/endians2, stew/shims/net, stint, stew/endians2,
node, lru node, lru
export lru export lru
@ -27,12 +34,12 @@ const
type type
AesKey* = array[aesKeySize, byte] AesKey* = array[aesKeySize, byte]
SessionKey* = array[keySize, byte] SessionKey* = array[keySize, byte]
SessionValue* = array[sizeof(AesKey) + sizeof(AesKey), byte] SessionValue* = array[3 * sizeof(AesKey), byte]
Sessions* = LRUCache[SessionKey, SessionValue] Sessions* = LRUCache[SessionKey, SessionValue]
func makeKey(id: NodeId, address: Address): SessionKey = func makeKey(id: NodeId, address: Address): SessionKey =
var pos = 0 var pos = 0
result[pos ..< pos+sizeof(id)] = toBytes(id) result[pos ..< pos+sizeof(id)] = toBytesBE(id)
pos.inc(sizeof(id)) pos.inc(sizeof(id))
case address.ip.family case address.ip.family
of IpAddressFamily.IpV4: of IpAddressFamily.IpV4:
@ -40,20 +47,39 @@ func makeKey(id: NodeId, address: Address): SessionKey =
of IpAddressFamily.IpV6: of IpAddressFamily.IpV6:
result[pos ..< pos+sizeof(address.ip.address_v6)] = address.ip.address_v6 result[pos ..< pos+sizeof(address.ip.address_v6)] = address.ip.address_v6
pos.inc(sizeof(address.ip.address_v6)) pos.inc(sizeof(address.ip.address_v6))
result[pos ..< pos+sizeof(address.port)] = toBytes(address.port.uint16) result[pos ..< pos+sizeof(address.port)] = toBytesBE(address.port.uint16)
func swapr*(s: var Sessions, id: NodeId, address: Address) =
var value: array[3 * sizeof(AesKey), byte]
let
key = makeKey(id, address)
entry = s.get(key)
if entry.isSome():
let val = entry.get()
copyMem(addr value[0], unsafeAddr val[16], sizeof(AesKey))
copyMem(addr value[16], unsafeAddr val[0], sizeof(AesKey))
copyMem(addr value[32], unsafeAddr val[32], sizeof(AesKey))
s.put(key, value)
func store*(s: var Sessions, id: NodeId, address: Address, r, w: AesKey) = func store*(s: var Sessions, id: NodeId, address: Address, r, w: AesKey) =
var value: array[sizeof(r) + sizeof(w), byte] var value: array[3 * sizeof(AesKey), byte]
value[0 .. 15] = r let
value[16 .. ^1] = w key = makeKey(id, address)
s.put(makeKey(id, address), value) entry = s.get(key)
if entry.isSome():
let val = entry.get()
copyMem(addr value[0], unsafeAddr val[16], sizeof(r))
value[16 .. 31] = r
value[32 .. ^1] = w
s.put(key, value)
func load*(s: var Sessions, id: NodeId, address: Address, r, w: var AesKey): bool = func load*(s: var Sessions, id: NodeId, address: Address, r1, r2, w: var AesKey): bool =
let res = s.get(makeKey(id, address)) let res = s.get(makeKey(id, address))
if res.isSome(): if res.isSome():
let val = res.get() let val = res.get()
copyMem(addr r[0], unsafeAddr val[0], sizeof(r)) copyMem(addr r1[0], unsafeAddr val[0], sizeof(r1))
copyMem(addr w[0], unsafeAddr val[sizeof(r)], sizeof(w)) copyMem(addr r2[0], unsafeAddr val[sizeof(r1)], sizeof(r2))
copyMem(addr w[0], unsafeAddr val[sizeof(r1) + sizeof(r2)], sizeof(w))
return true return true
else: else:
return false return false

View File

@ -6,10 +6,10 @@
# #
import import
chronicles, chronicles,
std/[options, strutils, sugar], results,
pkg/stew/[results, byteutils, arrayops], std/[net, options, strutils, sugar],
pkg/stew/[byteutils, arrayops],
stew/endians2, stew/endians2,
stew/shims/net,
stew/base64, stew/base64,
libp2p/crypto/crypto, libp2p/crypto/crypto,
libp2p/crypto/secp, libp2p/crypto/secp,
@ -58,7 +58,7 @@ proc incSeqNo*(
proc update*( proc update*(
r: var SignedPeerRecord, r: var SignedPeerRecord,
pk: crypto.PrivateKey, pk: crypto.PrivateKey,
ip: Option[ValidIpAddress], ip: Option[IpAddress],
tcpPort, udpPort: Option[Port] = none[Port]()): tcpPort, udpPort: Option[Port] = none[Port]()):
RecordResult[void] = RecordResult[void] =
## Update a `SignedPeerRecord` with given ip address, tcp port, udp port and optional ## Update a `SignedPeerRecord` with given ip address, tcp port, udp port and optional
@ -97,9 +97,8 @@ proc update*(
if udpPort.isNone and tcpPort.isNone: if udpPort.isNone and tcpPort.isNone:
return err "No existing address in SignedPeerRecord with no port provided" return err "No existing address in SignedPeerRecord with no port provided"
let ipAddr = try: ValidIpAddress.init(ip.get) let ipAddr = ip.get
except ValueError as e:
return err ("Existing address contains invalid address: " & $e.msg).cstring
if tcpPort.isSome: if tcpPort.isSome:
transProto = IpTransportProtocol.tcpProtocol transProto = IpTransportProtocol.tcpProtocol
transProtoPort = tcpPort.get transProtoPort = tcpPort.get
@ -123,9 +122,13 @@ proc update*(
.mapErr((e: string) => e.cstring) .mapErr((e: string) => e.cstring)
existingIp = existingIp =
if existingNetProtoFam == MultiCodec.codec("ip6"): if existingNetProtoFam == MultiCodec.codec("ip6"):
ipv6 array[16, byte].initCopyFrom(existingNetProtoAddr) IpAddress(
family: IPv6, address_v6: array[16, byte].initCopyFrom(existingNetProtoAddr)
)
else: else:
ipv4 array[4, byte].initCopyFrom(existingNetProtoAddr) IpAddress(
family: IPv4, address_v4: array[4, byte].initCopyFrom(existingNetProtoAddr)
)
ipAddr = ip.get(existingIp) ipAddr = ip.get(existingIp)
@ -223,7 +226,7 @@ proc init*(
T: type SignedPeerRecord, T: type SignedPeerRecord,
seqNum: uint64, seqNum: uint64,
pk: PrivateKey, pk: PrivateKey,
ip: Option[ValidIpAddress], ip: Option[IpAddress],
tcpPort, udpPort: Option[Port]): tcpPort, udpPort: Option[Port]):
RecordResult[T] = RecordResult[T] =
## Initialize a `SignedPeerRecord` with given sequence number, private key, optional ## Initialize a `SignedPeerRecord` with given sequence number, private key, optional
@ -238,9 +241,7 @@ proc init*(
tcpPort, udpPort tcpPort, udpPort
var var
ipAddr = try: ValidIpAddress.init("127.0.0.1") ipAddr = static parseIpAddress("127.0.0.1")
except ValueError as e:
return err ("Existing address contains invalid address: " & $e.msg).cstring
proto: IpTransportProtocol proto: IpTransportProtocol
protoPort: Port protoPort: Port

View File

@ -6,26 +6,40 @@
# Everything below the handling of ordinary messages # Everything below the handling of ordinary messages
import import
std/[tables, options], std/[net, tables, options, sets],
bearssl/rand, bearssl/rand,
chronos, chronos,
chronicles, chronicles,
metrics,
libp2p/crypto/crypto, libp2p/crypto/crypto,
stew/shims/net,
"."/[node, encoding, sessions] "."/[node, encoding, sessions]
const const
handshakeTimeout* = 2.seconds ## timeout for the reply on the handshakeTimeout* = 500.milliseconds ## timeout for the reply on the
## whoareyou message ## whoareyou message
responseTimeout* = 4.seconds ## timeout for the response of a request-response responseTimeout* = 1.seconds ## timeout for the response of a request-response
## call ## call
logScope:
topics = "discv5 transport"
declarePublicCounter dht_transport_tx_packets,
"Discovery transport packets sent", labels = ["state"]
declarePublicCounter dht_transport_tx_bytes,
"Discovery transport bytes sent", labels = ["state"]
declarePublicCounter dht_transport_rx_packets,
"Discovery transport packets received", labels = ["state"]
declarePublicCounter dht_transport_rx_bytes,
"Discovery transport bytes received", labels = ["state"]
type type
Transport* [Client] = ref object Transport* [Client] = ref object
client: Client client: Client
bindAddress: Address ## UDP binding address bindAddress: Address ## UDP binding address
transp: DatagramTransport transp: DatagramTransport
pendingRequests: Table[AESGCMNonce, PendingRequest] pendingRequests: Table[AESGCMNonce, (PendingRequest, Moment)]
keyexchangeInProgress: HashSet[NodeId]
pendingRequestsByNode: Table[NodeId, seq[seq[byte]]]
codec*: Codec codec*: Codec
rng: ref HmacDrbgContext rng: ref HmacDrbgContext
@ -33,29 +47,36 @@ type
node: Node node: Node
message: seq[byte] message: seq[byte]
proc sendToA(t: Transport, a: Address, data: seq[byte]) = proc sendToA(t: Transport, a: Address, msg: seq[byte]) =
trace "Send packet", myport = t.bindAddress.port, address = a
let ta = initTAddress(a.ip, a.port) let ta = initTAddress(a.ip, a.port)
let f = t.transp.sendTo(ta, data) let f = t.transp.sendTo(ta, msg)
f.callback = proc(data: pointer) {.gcsafe.} = f.addCallback(
if f.failed: proc(data: pointer) =
# Could be `TransportUseClosedError` in case the transport is already if f.failed:
# closed, or could be `TransportOsError` in case of a socket error. # Could be `TransportUseClosedError` in case the transport is already
# In the latter case this would probably mostly occur if the network # closed, or could be `TransportOsError` in case of a socket error.
# interface underneath gets disconnected or similar. # In the latter case this would probably mostly occur if the network
# TODO: Should this kind of error be propagated upwards? Probably, but # interface underneath gets disconnected or similar.
# it should not stop the process as that would reset the discovery # TODO: Should this kind of error be propagated upwards? Probably, but
# progress in case there is even a small window of no connection. # it should not stop the process as that would reset the discovery
# One case that needs this error available upwards is when revalidating # progress in case there is even a small window of no connection.
# nodes. Else the revalidation might end up clearing the routing tabl # One case that needs this error available upwards is when revalidating
# because of ping failures due to own network connection failure. # nodes. Else the revalidation might end up clearing the routing tabl
warn "Discovery send failed", msg = f.readError.msg # because of ping failures due to own network connection failure.
warn "Discovery send failed", msg = f.readError.msg
dht_transport_tx_packets.inc(labelValues = ["failed"])
dht_transport_tx_bytes.inc(msg.len.int64, labelValues = ["failed"])
)
dht_transport_tx_packets.inc()
dht_transport_tx_bytes.inc(msg.len.int64)
proc send(t: Transport, n: Node, data: seq[byte]) = proc send(t: Transport, n: Node, data: seq[byte]) =
doAssert(n.address.isSome()) doAssert(n.address.isSome())
t.sendToA(n.address.get(), data) t.sendToA(n.address.get(), data)
proc sendMessage*(t: Transport, toId: NodeId, toAddr: Address, message: seq[byte]) = proc sendMessage*(t: Transport, toId: NodeId, toAddr: Address, message: seq[byte]) =
let (data, _) = encodeMessagePacket(t.rng[], t.codec, toId, toAddr, let (data, _, _) = encodeMessagePacket(t.rng[], t.codec, toId, toAddr,
message) message)
t.sendToA(toAddr, data) t.sendToA(toAddr, data)
@ -65,7 +86,7 @@ proc sendMessage*(t: Transport, toId: NodeId, toAddr: Address, message: seq[byte
proc registerRequest(t: Transport, n: Node, message: seq[byte], proc registerRequest(t: Transport, n: Node, message: seq[byte],
nonce: AESGCMNonce) = nonce: AESGCMNonce) =
let request = PendingRequest(node: n, message: message) let request = PendingRequest(node: n, message: message)
if not t.pendingRequests.hasKeyOrPut(nonce, request): if not t.pendingRequests.hasKeyOrPut(nonce, (request, Moment.now())):
sleepAsync(responseTimeout).addCallback() do(data: pointer): sleepAsync(responseTimeout).addCallback() do(data: pointer):
t.pendingRequests.del(nonce) t.pendingRequests.del(nonce)
@ -73,11 +94,30 @@ proc registerRequest(t: Transport, n: Node, message: seq[byte],
proc sendMessage*(t: Transport, toNode: Node, message: seq[byte]) = proc sendMessage*(t: Transport, toNode: Node, message: seq[byte]) =
doAssert(toNode.address.isSome()) doAssert(toNode.address.isSome())
let address = toNode.address.get() let address = toNode.address.get()
let (data, nonce) = encodeMessagePacket(t.rng[], t.codec, let (data, nonce, haskey) = encodeMessagePacket(t.rng[], t.codec,
toNode.id, address, message) toNode.id, address, message)
t.registerRequest(toNode, message, nonce) if haskey:
t.send(toNode, data) trace "Send message: has key", myport = t.bindAddress.port , dstId = toNode
t.registerRequest(toNode, message, nonce)
t.send(toNode, data)
else:
# we don't have an encryption key for this target, so we should initiate keyexchange
if not (toNode.id in t.keyexchangeInProgress):
trace "Send message: send random to trigger Whoareyou", myport = t.bindAddress.port , dstId = toNode
t.registerRequest(toNode, message, nonce)
t.send(toNode, data)
t.keyexchangeInProgress.incl(toNode.id)
trace "keyexchangeInProgress added", myport = t.bindAddress.port , dstId = toNode
sleepAsync(handshakeTimeout).addCallback() do(data: pointer):
t.keyexchangeInProgress.excl(toNode.id)
trace "keyexchangeInProgress removed (timeout)", myport = t.bindAddress.port , dstId = toNode
else:
# delay sending this message until whoareyou is received and handshake is sent
# have to reencode once keys are clear
t.pendingRequestsByNode.mgetOrPut(toNode.id, newSeq[seq[byte]]()).add(message)
trace "Send message: Node with this id already has ongoing keyexchage, delaying packet",
myport = t.bindAddress.port , dstId = toNode, qlen=t.pendingRequestsByNode[toNode.id].len
proc sendWhoareyou(t: Transport, toId: NodeId, a: Address, proc sendWhoareyou(t: Transport, toId: NodeId, a: Address,
requestNonce: AESGCMNonce, node: Option[Node]) = requestNonce: AESGCMNonce, node: Option[Node]) =
@ -92,16 +132,33 @@ proc sendWhoareyou(t: Transport, toId: NodeId, a: Address,
let data = encodeWhoareyouPacket(t.rng[], t.codec, toId, a, requestNonce, let data = encodeWhoareyouPacket(t.rng[], t.codec, toId, a, requestNonce,
recordSeq, pubkey) recordSeq, pubkey)
sleepAsync(handshakeTimeout).addCallback() do(data: pointer): sleepAsync(handshakeTimeout).addCallback() do(data: pointer):
# TODO: should we still provide cancellation in case handshake completes # handshake key is popped in decodeHandshakePacket. if not yet popped by timeout:
# correctly? if t.codec.hasHandshake(key):
t.codec.handshakes.del(key) debug "Handshake timeout", myport = t.bindAddress.port , dstId = toId, address = a
t.codec.handshakes.del(key)
trace "Send whoareyou", dstId = toId, address = a trace "Send whoareyou", dstId = toId, address = a
t.sendToA(a, data) t.sendToA(a, data)
else: else:
debug "Node with this id already has ongoing handshake, ignoring packet" # TODO: is this reasonable to drop it? Should we allow a mini-queue here?
# Queue should be on sender side, as this is random encoded!
debug "Node with this id already has ongoing handshake, queuing packet", myport = t.bindAddress.port , dstId = toId, address = a
proc sendPending(t:Transport, toNode: Node):
Future[void] {.async.} =
if t.pendingRequestsByNode.hasKey(toNode.id):
trace "Found pending request", myport = t.bindAddress.port, src = toNode, len = t.pendingRequestsByNode[toNode.id].len
for message in t.pendingRequestsByNode[toNode.id]:
trace "Sending pending packet", myport = t.bindAddress.port, dstId = toNode.id
let address = toNode.address.get()
let (data, nonce, haskey) = encodeMessagePacket(t.rng[], t.codec, toNode.id, address, message)
t.registerRequest(toNode, message, nonce)
t.send(toNode, data)
t.pendingRequestsByNode.del(toNode.id)
proc receive*(t: Transport, a: Address, packet: openArray[byte]) = proc receive*(t: Transport, a: Address, packet: openArray[byte]) =
dht_transport_rx_packets.inc()
dht_transport_rx_bytes.inc(packet.len.int64)
let decoded = t.codec.decodePacket(a, packet) let decoded = t.codec.decodePacket(a, packet)
if decoded.isOk: if decoded.isOk:
let packet = decoded[] let packet = decoded[]
@ -109,20 +166,33 @@ proc receive*(t: Transport, a: Address, packet: openArray[byte]) =
of OrdinaryMessage: of OrdinaryMessage:
if packet.messageOpt.isSome(): if packet.messageOpt.isSome():
let message = packet.messageOpt.get() let message = packet.messageOpt.get()
trace "Received message packet", srcId = packet.srcId, address = a, trace "Received message packet", myport = t.bindAddress.port, srcId = packet.srcId, address = a,
kind = message.kind, p = $packet kind = message.kind, p = $packet
t.client.handleMessage(packet.srcId, a, message) t.client.handleMessage(packet.srcId, a, message)
else: else:
trace "Not decryptable message packet received", trace "Not decryptable message packet received", myport = t.bindAddress.port,
srcId = packet.srcId, address = a srcId = packet.srcId, address = a
# If we already have a keyexchange in progress, we have a case of simultaneous cross-connect.
# We could try to decide here which should go on, but since we are on top of UDP, a more robust
# choice is to answer here and resolve conflicts in the next stage (reception of Whoareyou), or
# even later (reception of Handshake).
if packet.srcId in t.keyexchangeInProgress:
trace "cross-connect detected, still sending Whoareyou"
t.sendWhoareyou(packet.srcId, a, packet.requestNonce, t.sendWhoareyou(packet.srcId, a, packet.requestNonce,
t.client.getNode(packet.srcId)) t.client.getNode(packet.srcId))
of Flag.Whoareyou: of Flag.Whoareyou:
trace "Received whoareyou packet", address = a trace "Received whoareyou packet", myport = t.bindAddress.port, address = a
var pr: PendingRequest var
if t.pendingRequests.take(packet.whoareyou.requestNonce, pr): prt: (PendingRequest, Moment)
let toNode = pr.node if t.pendingRequests.take(packet.whoareyou.requestNonce, prt):
let
pr = prt[0]
startTime = prt[1]
toNode = pr.node
rtt = Moment.now() - startTime
# trace "whoareyou RTT:", rtt, node = toNode
toNode.registerRtt(rtt)
# This is a node we previously contacted and thus must have an address. # This is a node we previously contacted and thus must have an address.
doAssert(toNode.address.isSome()) doAssert(toNode.address.isSome())
let address = toNode.address.get() let address = toNode.address.get()
@ -136,12 +206,17 @@ proc receive*(t: Transport, a: Address, packet: openArray[byte]) =
toNode.pubkey toNode.pubkey
).expect("Valid handshake packet to encode") ).expect("Valid handshake packet to encode")
trace "Send handshake message packet", dstId = toNode.id, address trace "Send handshake message packet", myport = t.bindAddress.port, dstId = toNode.id, address
t.send(toNode, data) t.send(toNode, data)
# keyexchange ready, we can send queued packets
t.keyexchangeInProgress.excl(toNode.id)
trace "keyexchangeInProgress removed (finished)", myport = t.bindAddress.port, dstId = toNode.id, address
discard t.sendPending(toNode)
else: else:
debug "Timed out or unrequested whoareyou packet", address = a debug "Timed out or unrequested whoareyou packet", address = a
of HandshakeMessage: of HandshakeMessage:
trace "Received handshake message packet", srcId = packet.srcIdHs, trace "Received handshake message packet", myport = t.bindAddress.port, srcId = packet.srcIdHs,
address = a, kind = packet.message.kind address = a, kind = packet.message.kind
t.client.handleMessage(packet.srcIdHs, a, packet.message) t.client.handleMessage(packet.srcIdHs, a, packet.message)
# For a handshake message it is possible that we received an newer SPR. # For a handshake message it is possible that we received an newer SPR.
@ -155,28 +230,35 @@ proc receive*(t: Transport, a: Address, packet: openArray[byte]) =
if node.address.isSome() and a == node.address.get(): if node.address.isSome() and a == node.address.get():
# TODO: maybe here we could verify that the address matches what we were # TODO: maybe here we could verify that the address matches what we were
# sending the 'whoareyou' message to. In that case, we can set 'seen' # sending the 'whoareyou' message to. In that case, we can set 'seen'
node.seen = true # TODO: verify how this works with restrictive NAT and firewall scenarios.
node.registerSeen()
if t.client.addNode(node): if t.client.addNode(node):
trace "Added new node to routing table after handshake", node trace "Added new node to routing table after handshake", node, tablesize=t.client.nodesDiscovered()
discard t.sendPending(node)
else:
trace "address mismatch, not adding seen flag", node, address = a, nodeAddress = node.address.get()
else: else:
trace "Packet decoding error", error = decoded.error, address = a dht_transport_rx_packets.inc(labelValues = ["failed_decode"])
dht_transport_rx_bytes.inc(packet.len.int64, labelValues = ["failed_decode"])
trace "Packet decoding error", myport = t.bindAddress.port, error = decoded.error, address = a
proc processClient[T](transp: DatagramTransport, raddr: TransportAddress): proc processClient[T](transp: DatagramTransport, raddr: TransportAddress):
Future[void] {.async.} = Future[void] {.async.} =
let t = getUserData[Transport[T]](transp) let t = getUserData[Transport[T]](transp)
# TODO: should we use `peekMessage()` to avoid allocation? # TODO: should we use `peekMessage()` to avoid allocation?
let buf = try: transp.getMessage() let buf = try:
except TransportOsError as e: transp.getMessage()
# This is likely to be local network connection issues. except TransportOsError as e:
warn "Transport getMessage", exception = e.name, msg = e.msg # This is likely to be local network connection issues.
return warn "Transport getMessage", exception = e.name, msg = e.msg
return
let ip = try: raddr.address() let ip = try: raddr.address()
except ValueError as e: except ValueError as e:
error "Not a valid IpAddress", exception = e.name, msg = e.msg error "Not a valid IpAddress", exception = e.name, msg = e.msg
return return
let a = Address(ip: ValidIpAddress.init(ip), port: raddr.port) let a = Address(ip: ip, port: raddr.port)
t.receive(a, buf) t.receive(a, buf)
@ -209,7 +291,7 @@ proc newTransport*[T](
Transport[T]( Transport[T](
client: client, client: client,
bindAddress: Address(ip: ValidIpAddress.init(bindIp), port: bindPort), bindAddress: Address(ip: bindIp, port: bindPort),
codec: Codec( codec: Codec(
localNode: localNode, localNode: localNode,
privKey: privKey, privKey: privKey,

View File

@ -1,8 +1,6 @@
include "build.nims" switch("define", "libp2p_pki_schemes=secp256k1")
# begin Nimble config (version 2) # begin Nimble config (version 2)
--noNimblePath
when withDir(thisDir(), system.fileExists("nimble.paths")): when withDir(thisDir(), system.fileExists("nimble.paths")):
include "nimble.paths" include "nimble.paths"
# end Nimble config # end Nimble config

View File

@ -1,335 +0,0 @@
{
"version": 2,
"packages": {
"nim": {
"version": "1.6.14",
"vcsRevision": "71ba2e7f3c5815d956b1ae0341b0743242b8fec6",
"url": "https://github.com/nim-lang/Nim.git",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "f9ce6fa986a4e75514fe26d4c773789b8897eb18"
}
},
"unittest2": {
"version": "0.0.2",
"vcsRevision": "02c49b8a994dd3f9eddfaab45262f9b8fa507f8e",
"url": "https://github.com/status-im/nim-unittest2.git",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "a7f3331cabb5fad0d04c93be0aad1f020f9c8033"
}
},
"stew": {
"version": "0.1.0",
"vcsRevision": "e18f5a62af2ade7a1fd1d39635d4e04d944def08",
"url": "https://github.com/status-im/nim-stew.git",
"downloadMethod": "git",
"dependencies": [
"unittest2"
],
"checksums": {
"sha1": "2a80972f66597bf87d820dca8164d89d3bb24c6d"
}
},
"nimcrypto": {
"version": "0.5.4",
"vcsRevision": "a5742a9a214ac33f91615f3862c7b099aec43b00",
"url": "https://github.com/cheatfate/nimcrypto.git",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "f76c87707cd4e96355b8bb6ef27e7f8b0aac1e08"
}
},
"secp256k1": {
"version": "0.5.2",
"vcsRevision": "5340cf188168d6afcafc8023770d880f067c0b2f",
"url": "https://github.com/status-im/nim-secp256k1.git",
"downloadMethod": "git",
"dependencies": [
"stew",
"nimcrypto"
],
"checksums": {
"sha1": "ae9cbea4487be94a06653ffee075a7f1bd1e231e"
}
},
"bearssl": {
"version": "0.1.5",
"vcsRevision": "f4c4233de453cb7eac0ce3f3ffad6496295f83ab",
"url": "https://github.com/status-im/nim-bearssl.git",
"downloadMethod": "git",
"dependencies": [
"unittest2"
],
"checksums": {
"sha1": "dabf4aaac8969fb10281ebd9ff51875d37eeaaa9"
}
},
"httputils": {
"version": "0.3.0",
"vcsRevision": "e88e231dfcef4585fe3b2fbd9b664dbd28a88040",
"url": "https://github.com/status-im/nim-http-utils.git",
"downloadMethod": "git",
"dependencies": [
"stew",
"unittest2"
],
"checksums": {
"sha1": "dd0dcef76616ad35922944671c49222c8a17fb1f"
}
},
"chronos": {
"version": "3.0.11",
"vcsRevision": "6525f4ce1d1a7eba146e5f1a53f6f105077ae686",
"url": "https://github.com/status-im/nim-chronos.git",
"downloadMethod": "git",
"dependencies": [
"stew",
"bearssl",
"httputils",
"unittest2"
],
"checksums": {
"sha1": "8cdf821ecc76fb91fdfb5191cad31f813822fcb2"
}
},
"metrics": {
"version": "0.0.1",
"vcsRevision": "743f81d4f6c6ebf0ac02389f2392ff8b4235bee5",
"url": "https://github.com/status-im/nim-metrics.git",
"downloadMethod": "git",
"dependencies": [
"chronos"
],
"checksums": {
"sha1": "6274c7ae424b871bc21ca3a6b6713971ff6a8095"
}
},
"testutils": {
"version": "0.5.0",
"vcsRevision": "dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34",
"url": "https://github.com/status-im/nim-testutils",
"downloadMethod": "git",
"dependencies": [
"unittest2"
],
"checksums": {
"sha1": "756d0757c4dd06a068f9d38c7f238576ba5ee897"
}
},
"faststreams": {
"version": "0.3.0",
"vcsRevision": "1b561a9e71b6bdad1c1cdff753418906037e9d09",
"url": "https://github.com/status-im/nim-faststreams.git",
"downloadMethod": "git",
"dependencies": [
"stew",
"testutils",
"chronos",
"unittest2"
],
"checksums": {
"sha1": "97edf9797924af48566a0af8267203dc21d80c77"
}
},
"serialization": {
"version": "0.1.0",
"vcsRevision": "493d18b8292fc03aa4f835fd825dea1183f97466",
"url": "https://github.com/status-im/nim-serialization.git",
"downloadMethod": "git",
"dependencies": [
"faststreams",
"unittest2",
"stew"
],
"checksums": {
"sha1": "893921d41eb4e90a635442f02dd17b5f90bcbb00"
}
},
"json_serialization": {
"version": "0.1.0",
"vcsRevision": "e5b18fb710c3d0167ec79f3b892f5a7a1bc6d1a4",
"url": "https://github.com/status-im/nim-json-serialization.git",
"downloadMethod": "git",
"dependencies": [
"serialization",
"stew"
],
"checksums": {
"sha1": "bdcdeefca4e2b31710a23cc817aa6abfa0d041e2"
}
},
"chronicles": {
"version": "0.10.3",
"vcsRevision": "7631f7b2ee03398cb1512a79923264e8f9410af6",
"url": "https://github.com/status-im/nim-chronicles.git",
"downloadMethod": "git",
"dependencies": [
"testutils",
"json_serialization"
],
"checksums": {
"sha1": "2b6795cc40a687d3716b617e70d96e5af361c4af"
}
},
"dnsclient": {
"version": "0.3.4",
"vcsRevision": "23214235d4784d24aceed99bbfe153379ea557c8",
"url": "https://github.com/ba0f3/dnsclient.nim",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "65262c7e533ff49d6aca5539da4bc6c6ce132f40"
}
},
"zlib": {
"version": "0.1.0",
"vcsRevision": "74cdeb54b21bededb5a515d36f608bc1850555a2",
"url": "https://github.com/status-im/nim-zlib",
"downloadMethod": "git",
"dependencies": [
"stew"
],
"checksums": {
"sha1": "01d330dc4c1924e56b1559ee73bc760e526f635c"
}
},
"websock": {
"version": "0.1.0",
"vcsRevision": "7b2ed397d6e4c37ea4df08ae82aeac7ff04cd180",
"url": "https://github.com/status-im/nim-websock.git",
"downloadMethod": "git",
"dependencies": [
"chronos",
"httputils",
"chronicles",
"stew",
"nimcrypto",
"bearssl",
"zlib"
],
"checksums": {
"sha1": "d27f126527be59f5a0dc35303cb37b82d4e2770b"
}
},
"libp2p": {
"version": "1.0.0",
"vcsRevision": "a3e9d1ed80c048cd5abc839cbe0863cefcedc702",
"url": "https://github.com/status-im/nim-libp2p.git",
"downloadMethod": "git",
"dependencies": [
"nimcrypto",
"dnsclient",
"bearssl",
"chronicles",
"chronos",
"metrics",
"secp256k1",
"stew",
"websock"
],
"checksums": {
"sha1": "65e473566f19f7f9a3529745e7181fb58d30b5ef"
}
},
"combparser": {
"version": "0.2.0",
"vcsRevision": "ba4464c005d7617c008e2ed2ebc1ba52feb469c6",
"url": "https://github.com/PMunch/combparser.git",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "a3635260961a893b88f69aac19f1b24e032a7e97"
}
},
"asynctest": {
"version": "0.3.2",
"vcsRevision": "a236a5f0f3031573ac2cb082b63dbf6e170e06e7",
"url": "https://github.com/status-im/asynctest.git",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "0ef50d086659835b0a23a4beb77cb11747695448"
}
},
"questionable": {
"version": "0.10.6",
"vcsRevision": "30e4184a99c8c1ba329925912d2c5d4b09acf8cc",
"url": "https://github.com/status-im/questionable.git",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "ca2d1e2e0be6566b4bf13261b29645721d01673d"
}
},
"upraises": {
"version": "0.1.0",
"vcsRevision": "ff4f8108e44fba9b35cac535ab63d3927e8fd3c2",
"url": "https://github.com/markspanbroek/upraises.git",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "a0243c8039e12d547dbb2e9c73789c16bb8bc956"
}
},
"sqlite3_abi": {
"version": "3.40.1.1",
"vcsRevision": "362e1bd9f689ad9f5380d9d27f0705b3d4dfc7d3",
"url": "https://github.com/arnetheduck/nim-sqlite3-abi",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "8e91db8156a82383d9c48f53b33e48f4e93077b1"
}
},
"protobuf_serialization": {
"version": "0.2.0",
"vcsRevision": "27b400fdf3bd8ce7120ca66fc1de39d3f1a5804a",
"url": "https://github.com/status-im/nim-protobuf-serialization",
"downloadMethod": "git",
"dependencies": [
"stew",
"faststreams",
"serialization",
"combparser"
],
"checksums": {
"sha1": "9c30c45b92900b425b147aeceae87bee6295dd80"
}
},
"datastore": {
"version": "0.0.1",
"vcsRevision": "0cde8aeb67c59fd0ac95496dc6b5e1168d6632aa",
"url": "https://github.com/status-im/nim-datastore",
"downloadMethod": "git",
"dependencies": [
"asynctest",
"chronos",
"questionable",
"sqlite3_abi",
"stew",
"unittest2",
"upraises"
],
"checksums": {
"sha1": "2c03bb47de97962d2a64be1ed0a8161cd9d65159"
}
},
"stint": {
"version": "0.0.1",
"vcsRevision": "036c71d06a6b22f8f967ba9d54afd2189c3872ca",
"url": "https://github.com/status-im/nim-stint",
"downloadMethod": "git",
"dependencies": [
"stew"
],
"checksums": {
"sha1": "0f187a2115315ca898e5f9a30c5e506cf6057062"
}
}
},
"tasks": {}
}

View File

@ -1,18 +1,32 @@
import std / [os, strutils, sequtils] import std / [os, strutils, sequtils]
switch("define", "libp2p_pki_schemes=secp256k1")
task testAll, "Run DHT tests": task testAll, "Run DHT tests":
exec "nim c -r tests/testAll.nim" exec "nim c -r test.nim"
rmFile "./test"
task compileParallelTests, "Compile parallel tests":
exec "nim c --hints:off --verbosity:0 dht/test_providers.nim"
exec "nim c --hints:off --verbosity:0 dht/test_providermngr.nim"
exec "nim c --hints:off --verbosity:0 discv5/test_discoveryv5.nim"
exec "nim c --hints:off --verbosity:0 discv5/test_discoveryv5_encoding.nim"
task test, "Run DHT tests": task test, "Run DHT tests":
exec "nim c -r -d:testsAll --verbosity:0 tests/testAllParallel.nim" # compile with trace logging to make sure it doesn't crash
exec "nim c -d:testsAll -d:chronicles_enabled=on -d:chronicles_log_level=TRACE test.nim"
rmFile "./test"
compileParallelTestsTask()
exec "nim c -r -d:testsAll --verbosity:0 testAllParallel.nim"
rmFile "./testAllParallel"
task testPart1, "Run DHT tests A": task testPart1, "Run DHT tests A":
exec "nim c -r -d:testsPart1 tests/testAllParallel.nim" compileParallelTestsTask()
exec "nim c -r -d:testsPart1 testAllParallel.nim"
rmFile "./testAllParallel"
task testPart2, "Run DHT tests B": task testPart2, "Run DHT tests B":
exec "nim c -r -d:testsPart2 tests/testAllParallel.nim" compileParallelTestsTask()
exec "nim c -r -d:testsPart2 testAllParallel.nim"
rmFile "./testAllParallel"
task coverage, "generates code coverage report": task coverage, "generates code coverage report":
var (output, exitCode) = gorgeEx("which lcov") var (output, exitCode) = gorgeEx("which lcov")
@ -45,7 +59,7 @@ task coverage, "generates code coverage report":
if f.endswith(".nim"): nimSrcs.add " " & f.absolutePath.quoteShell() if f.endswith(".nim"): nimSrcs.add " " & f.absolutePath.quoteShell()
echo "======== Running Tests ======== " echo "======== Running Tests ======== "
exec("nim c -r tests/coverage.nim") exec("nim c -r coverage.nim")
exec("rm nimcache/*.c") exec("rm nimcache/*.c")
rmDir("coverage"); mkDir("coverage") rmDir("coverage"); mkDir("coverage")
echo " ======== Running LCOV ======== " echo " ======== Running LCOV ======== "

View File

@ -1,2 +0,0 @@
include ./testAll

View File

@ -1,15 +0,0 @@
switch("define", "testsAll")
switch("debugger", "native")
switch("lineDir", "on")
switch("define", "debug")
# switch("opt", "none")
switch("verbosity", "0")
switch("hints", "off")
switch("warnings", "off")
switch("define", "chronicles_log_level=INFO")
switch("nimcache", "nimcache")
switch("passC", "-fprofile-arcs")
switch("passC", "-ftest-coverage")
switch("passL", "-fprofile-arcs")
switch("passL", "-ftest-coverage")

View File

@ -1,20 +1,17 @@
import import
std/net,
bearssl/rand, bearssl/rand,
chronos, chronos,
libp2p/crypto/[crypto, secp], libp2p/crypto/[crypto, secp],
libp2p/multiaddress, libp2p/multiaddress,
codexdht/discv5/[node, routing_table, spr], codexdht/discv5/[node, routing_table, spr],
codexdht/discv5/crypto as dhtcrypto, codexdht/discv5/protocol as discv5_protocol
codexdht/discv5/protocol as discv5_protocol,
stew/shims/net
export net
proc localAddress*(port: int): Address = proc localAddress*(port: int): Address =
Address(ip: ValidIpAddress.init("127.0.0.1"), port: Port(port)) Address(ip: IPv4_loopback(), port: Port(port))
proc example*(T: type PrivateKey, rng: ref HmacDrbgContext): PrivateKey = proc example*(T: type PrivateKey, rng: ref HmacDrbgContext): PrivateKey =
PrivateKey.random(rng[]).expect("Valid rng for private key") PrivateKey.random(PKScheme.Secp256k1, rng[]).expect("Valid rng for private key")
proc example*(T: type NodeId, rng: ref HmacDrbgContext): NodeId = proc example*(T: type NodeId, rng: ref HmacDrbgContext): NodeId =
let let
@ -54,7 +51,7 @@ proc nodeIdInNodes*(id: NodeId, nodes: openArray[Node]): bool =
if id == n.id: return true if id == n.id: return true
proc generateNode*(privKey: PrivateKey, port: int, proc generateNode*(privKey: PrivateKey, port: int,
ip: ValidIpAddress = ValidIpAddress.init("127.0.0.1")): Node = ip: IpAddress = parseIpAddress("127.0.0.1")): Node =
let let
port = Port(port) port = Port(port)
@ -72,7 +69,7 @@ proc generateNRandomNodes*(rng: ref HmacDrbgContext, n: int): seq[Node] =
res res
proc nodeAndPrivKeyAtDistance*(n: Node, rng: var HmacDrbgContext, d: uint32, proc nodeAndPrivKeyAtDistance*(n: Node, rng: var HmacDrbgContext, d: uint32,
ip: ValidIpAddress = ValidIpAddress.init("127.0.0.1")): (Node, PrivateKey) = ip: IpAddress = parseIpAddress("127.0.0.1")): (Node, PrivateKey) =
while true: while true:
let let
privKey = PrivateKey.random(rng).expect("Valid rng for private key") privKey = PrivateKey.random(rng).expect("Valid rng for private key")
@ -81,37 +78,37 @@ proc nodeAndPrivKeyAtDistance*(n: Node, rng: var HmacDrbgContext, d: uint32,
return (node, privKey) return (node, privKey)
proc nodeAtDistance*(n: Node, rng: var HmacDrbgContext, d: uint32, proc nodeAtDistance*(n: Node, rng: var HmacDrbgContext, d: uint32,
ip: ValidIpAddress = ValidIpAddress.init("127.0.0.1")): Node = ip: IpAddress = parseIpAddress("127.0.0.1")): Node =
let (node, _) = n.nodeAndPrivKeyAtDistance(rng, d, ip) let (node, _) = n.nodeAndPrivKeyAtDistance(rng, d, ip)
node node
proc nodesAtDistance*( proc nodesAtDistance*(
n: Node, rng: var HmacDrbgContext, d: uint32, amount: int, n: Node, rng: var HmacDrbgContext, d: uint32, amount: int,
ip: ValidIpAddress = ValidIpAddress.init("127.0.0.1")): seq[Node] = ip: IpAddress = parseIpAddress("127.0.0.1")): seq[Node] =
for i in 0..<amount: for i in 0..<amount:
result.add(nodeAtDistance(n, rng, d, ip)) result.add(nodeAtDistance(n, rng, d, ip))
proc nodesAtDistanceUniqueIp*( proc nodesAtDistanceUniqueIp*(
n: Node, rng: var HmacDrbgContext, d: uint32, amount: int, n: Node, rng: var HmacDrbgContext, d: uint32, amount: int,
ip: ValidIpAddress = ValidIpAddress.init("127.0.0.1")): seq[Node] = ip: IpAddress = parseIpAddress("127.0.0.1")): seq[Node] =
var ta = initTAddress(ip, Port(0)) var ta = initTAddress(ip, Port(0))
for i in 0..<amount: for i in 0..<amount:
ta.inc() ta.inc()
result.add(nodeAtDistance(n, rng, d, ValidIpAddress.init(ta.address()))) result.add(nodeAtDistance(n, rng, d, ta.address()))
proc addSeenNode*(d: discv5_protocol.Protocol, n: Node): bool = proc addSeenNode*(d: discv5_protocol.Protocol, n: Node): bool =
# Add it as a seen node, warning: for testing convenience only! # Add it as a seen node, warning: for testing convenience only!
n.seen = true n.registerSeen()
d.addNode(n) d.addNode(n)
func udpExample*(_: type MultiAddress): MultiAddress = func udpExample*(_: type MultiAddress): MultiAddress =
## creates a new udp multiaddress on a random port ## creates a new udp MultiAddress on a random port
Multiaddress.init("/ip4/0.0.0.0/udp/0") MultiAddress.init("/ip4/0.0.0.0/udp/0")
func udpExamples*(_: type MultiAddress, count: int): seq[MultiAddress] = func udpExamples*(_: type MultiAddress, count: int): seq[MultiAddress] =
var res: seq[MultiAddress] = @[] var res: seq[MultiAddress] = @[]
for i in 1..count: for i in 1..count:
res.add Multiaddress.init("/ip4/0.0.0.0/udp/" & $i).get res.add MultiAddress.init("/ip4/0.0.0.0/udp/" & $i).get
return res return res
proc toSignedPeerRecord*(privKey: PrivateKey) : SignedPeerRecord = proc toSignedPeerRecord*(privKey: PrivateKey) : SignedPeerRecord =

View File

@ -2,11 +2,10 @@
import std/sequtils import std/sequtils
import pkg/chronos import pkg/chronos
import pkg/asynctest import pkg/asynctest/chronos/unittest
import pkg/datastore import pkg/datastore
import pkg/libp2p from pkg/libp2p import PeerId
import codexdht/dht
import codexdht/private/eth/p2p/discoveryv5/spr import codexdht/private/eth/p2p/discoveryv5/spr
import codexdht/private/eth/p2p/discoveryv5/providers import codexdht/private/eth/p2p/discoveryv5/providers
import codexdht/discv5/node import codexdht/discv5/node
@ -101,10 +100,10 @@ suite "Test Providers Manager multiple":
not (await manager.contains(nodeIds[49])) not (await manager.contains(nodeIds[49]))
not (await manager.contains(nodeIds[99])) not (await manager.contains(nodeIds[99]))
test "Should remove by PeerId": test "Should remove by PeerId with associated keys":
(await (manager.remove(providers[0].data.peerId))).tryGet (await (manager.remove(providers[0].data.peerId, true))).tryGet
(await (manager.remove(providers[5].data.peerId))).tryGet (await (manager.remove(providers[5].data.peerId, true))).tryGet
(await (manager.remove(providers[9].data.peerId))).tryGet (await (manager.remove(providers[9].data.peerId, true))).tryGet
for id in nodeIds: for id in nodeIds:
check: check:
@ -117,6 +116,22 @@ suite "Test Providers Manager multiple":
not (await manager.contains(providers[5].data.peerId)) not (await manager.contains(providers[5].data.peerId))
not (await manager.contains(providers[9].data.peerId)) not (await manager.contains(providers[9].data.peerId))
test "Should not return keys without provider":
for id in nodeIds:
check:
(await manager.get(id)).tryGet.len == 10
for provider in providers:
(await (manager.remove(provider.data.peerId))).tryGet
for id in nodeIds:
check:
(await manager.get(id)).tryGet.len == 0
for provider in providers:
check:
not (await manager.contains(provider.data.peerId))
suite "Test providers with cache": suite "Test providers with cache":
let let
rng = newRng() rng = newRng()
@ -165,9 +180,9 @@ suite "Test providers with cache":
not (await manager.contains(nodeIds[99])) not (await manager.contains(nodeIds[99]))
test "Should remove by PeerId": test "Should remove by PeerId":
(await (manager.remove(providers[0].data.peerId))).tryGet (await (manager.remove(providers[0].data.peerId, true))).tryGet
(await (manager.remove(providers[5].data.peerId))).tryGet (await (manager.remove(providers[5].data.peerId, true))).tryGet
(await (manager.remove(providers[9].data.peerId))).tryGet (await (manager.remove(providers[9].data.peerId, true))).tryGet
for id in nodeIds: for id in nodeIds:
check: check:
@ -219,6 +234,24 @@ suite "Test Provider Maintenance":
for id in nodeIds: for id in nodeIds:
check: (await manager.get(id)).tryGet.len == 0 check: (await manager.get(id)).tryGet.len == 0
test "Should not cleanup unexpired":
let
unexpired = PrivateKey.example(rng).toSignedPeerRecord()
(await manager.add(nodeIds[0], unexpired, ttl = 1.minutes)).tryGet
await sleepAsync(500.millis)
await manager.store.cleanupExpired()
let
unexpiredProvs = (await manager.get(nodeIds[0])).tryGet
check:
unexpiredProvs.len == 1
await (unexpired.data.peerId in manager)
(await manager.remove(nodeIds[0])).tryGet
test "Should cleanup orphaned": test "Should cleanup orphaned":
for id in nodeIds: for id in nodeIds:
check: (await manager.get(id)).tryGet.len == 0 check: (await manager.get(id)).tryGet.len == 0

View File

@ -10,18 +10,15 @@
{.used.} {.used.}
import import
std/[options, sequtils], std/[options],
asynctest, asynctest/chronos/unittest2,
bearssl/rand, bearssl/rand,
chronicles, chronicles,
chronos, chronos,
nimcrypto,
libp2p/crypto/[crypto, secp], libp2p/crypto/[crypto, secp],
libp2p/[multiaddress, multicodec, multihash, routing_record, signed_envelope], libp2p/[multiaddress, multicodec, multihash, routing_record, signed_envelope],
codexdht/dht,
codexdht/discv5/crypto as dhtcrypto, codexdht/discv5/crypto as dhtcrypto,
codexdht/discv5/protocol as discv5_protocol, codexdht/discv5/protocol as discv5_protocol,
stew/byteutils,
test_helper test_helper
proc bootstrapNodes( proc bootstrapNodes(
@ -59,7 +56,7 @@ proc bootstrapNetwork(
#waitFor bootNode.bootstrap() # immediate, since no bootnodes are defined above #waitFor bootNode.bootstrap() # immediate, since no bootnodes are defined above
var res = await bootstrapNodes(nodecount - 1, var res = await bootstrapNodes(nodecount - 1,
@[bootnode.localNode.record], @[bootNode.localNode.record],
rng, rng,
delay) delay)
res.insert((bootNode, bootNodeKey), 0) res.insert((bootNode, bootNodeKey), 0)
@ -125,7 +122,6 @@ suite "Providers Tests: node alone":
debug "Providers:", providers debug "Providers:", providers
check (providers.len == 0) check (providers.len == 0)
suite "Providers Tests: two nodes": suite "Providers Tests: two nodes":
var var

View File

@ -2,7 +2,7 @@
import import
std/tables, std/tables,
chronos, chronicles, stint, asynctest, stew/shims/net, chronos, chronicles, stint, asynctest/chronos/unittest,
stew/byteutils, bearssl/rand, stew/byteutils, bearssl/rand,
libp2p/crypto/crypto, libp2p/crypto/crypto,
codexdht/discv5/[transport, spr, node, routing_table, encoding, sessions, nodes_verification], codexdht/discv5/[transport, spr, node, routing_table, encoding, sessions, nodes_verification],
@ -287,7 +287,7 @@ suite "Discovery v5 Tests":
await mainNode.closeWait() await mainNode.closeWait()
await testNode.closeWait() await testNode.closeWait()
proc testLookupTargets(fast: bool = false) {.async.} = proc testLookupTargets(fast: bool = false): Future[bool] {.async.} =
const const
nodeCount = 17 nodeCount = 17
@ -306,9 +306,9 @@ suite "Discovery v5 Tests":
for t in nodes: for t in nodes:
if n != t: if n != t:
let pong = await n.ping(t.localNode) let pong = await n.ping(t.localNode)
check pong.isOk()
if pong.isErr(): if pong.isErr():
echo pong.error echo pong.error
return false
# check (await n.ping(t.localNode)).isOk() # check (await n.ping(t.localNode)).isOk()
for i in 1 ..< nodeCount: for i in 1 ..< nodeCount:
@ -318,16 +318,19 @@ suite "Discovery v5 Tests":
let target = nodes[i] let target = nodes[i]
let discovered = await nodes[nodeCount-1].lookup(target.localNode.id, fast = fast) let discovered = await nodes[nodeCount-1].lookup(target.localNode.id, fast = fast)
debug "Lookup result", target = target.localNode, discovered debug "Lookup result", target = target.localNode, discovered
check discovered[0] == target.localNode if discovered[0] != target.localNode:
return false
for node in nodes: for node in nodes:
await node.closeWait() await node.closeWait()
return true
test "Lookup targets": test "Lookup targets":
await testLookupTargets() check await testLookupTargets()
test "Lookup targets using traditional findNode": test "Lookup targets using traditional findNode":
await testLookupTargets(fast = true) check await testLookupTargets(fast = true)
test "Resolve target": test "Resolve target":
let let
@ -412,31 +415,37 @@ suite "Discovery v5 Tests":
await mainNode.closeWait() await mainNode.closeWait()
await lookupNode.closeWait() await lookupNode.closeWait()
# We no longer support field filtering test "Random nodes, also with filter":
# test "Random nodes with spr field filter": let
# let lookupNode = initDiscoveryNode(rng, PrivateKey.example(rng), localAddress(20301))
# lookupNode = initDiscoveryNode(rng, PrivateKey.example(rng), localAddress(20301)) targetNode = initDiscoveryNode(rng, PrivateKey.example(rng), localAddress(20302))
# targetNode = generateNode(PrivateKey.example(rng)) otherNode = initDiscoveryNode(rng, PrivateKey.example(rng), localAddress(20303))
# otherNode = generateNode(PrivateKey.example(rng)) anotherNode = initDiscoveryNode(rng, PrivateKey.example(rng), localAddress(20304))
# anotherNode = generateNode(PrivateKey.example(rng))
# check: check:
# lookupNode.addNode(targetNode) lookupNode.addNode(targetNode.localNode.record)
# lookupNode.addNode(otherNode) lookupNode.addNode(otherNode.localNode.record)
# lookupNode.addNode(anotherNode) lookupNode.addNode(anotherNode.localNode.record)
# let discovered = lookupNode.randomNodes(10) let discovered = lookupNode.randomNodes(10)
# check discovered.len == 3 check discovered.len == 3
# let discoveredFiltered = lookupNode.randomNodes(10, let discoveredFiltered = lookupNode.randomNodes(10,
# ("test", @[byte 1,2,3,4])) proc(n: Node) : bool = n.address.get.port == Port(20302))
# check discoveredFiltered.len == 1 and discoveredFiltered.contains(targetNode) check discoveredFiltered.len == 1 and discoveredFiltered.contains(targetNode.localNode)
let discoveredEmpty = lookupNode.randomNodes(10,
proc(n: Node) : bool = n.address.get.port == Port(20305))
check discoveredEmpty.len == 0
await lookupNode.closeWait()
await targetNode.closeWait()
await otherNode.closeWait()
await anotherNode.closeWait()
# await lookupNode.closeWait()
test "New protocol with spr": test "New protocol with spr":
let let
privKey = PrivateKey.example(rng) privKey = PrivateKey.example(rng)
ip = some(ValidIpAddress.init("127.0.0.1")) ip = some(parseIpAddress("127.0.0.1"))
port = Port(20301) port = Port(20301)
node = newProtocol(privKey, ip, some(port), some(port), bindPort = port, node = newProtocol(privKey, ip, some(port), some(port), bindPort = port,
rng = rng) rng = rng)
@ -531,7 +540,7 @@ suite "Discovery v5 Tests":
let let
port = Port(9000) port = Port(9000)
fromNoderecord = SignedPeerRecord.init(1, PrivateKey.example(rng), fromNoderecord = SignedPeerRecord.init(1, PrivateKey.example(rng),
some(ValidIpAddress.init("11.12.13.14")), some(parseIpAddress("11.12.13.14")),
some(port), some(port))[] some(port), some(port))[]
fromNode = newNode(fromNoderecord)[] fromNode = newNode(fromNoderecord)[]
privKey = PrivateKey.example(rng) privKey = PrivateKey.example(rng)
@ -543,7 +552,7 @@ suite "Discovery v5 Tests":
block: # Duplicates block: # Duplicates
let let
record = SignedPeerRecord.init( record = SignedPeerRecord.init(
1, privKey, some(ValidIpAddress.init("12.13.14.15")), 1, privKey, some(parseIpAddress("12.13.14.15")),
some(port), some(port))[] some(port), some(port))[]
# Exact duplicates # Exact duplicates
@ -553,7 +562,7 @@ suite "Discovery v5 Tests":
# Node id duplicates # Node id duplicates
let recordSameId = SignedPeerRecord.init( let recordSameId = SignedPeerRecord.init(
1, privKey, some(ValidIpAddress.init("212.13.14.15")), 1, privKey, some(parseIpAddress("212.13.14.15")),
some(port), some(port))[] some(port), some(port))[]
records.add(recordSameId) records.add(recordSameId)
nodes = verifyNodesRecords(records, fromNode, limit, targetDistance) nodes = verifyNodesRecords(records, fromNode, limit, targetDistance)
@ -562,7 +571,7 @@ suite "Discovery v5 Tests":
block: # No address block: # No address
let let
recordNoAddress = SignedPeerRecord.init( recordNoAddress = SignedPeerRecord.init(
1, privKey, none(ValidIpAddress), some(port), some(port))[] 1, privKey, none(IpAddress), some(port), some(port))[]
records = [recordNoAddress] records = [recordNoAddress]
test = verifyNodesRecords(records, fromNode, limit, targetDistance) test = verifyNodesRecords(records, fromNode, limit, targetDistance)
check test.len == 0 check test.len == 0
@ -570,7 +579,7 @@ suite "Discovery v5 Tests":
block: # Invalid address - site local block: # Invalid address - site local
let let
recordInvalidAddress = SignedPeerRecord.init( recordInvalidAddress = SignedPeerRecord.init(
1, privKey, some(ValidIpAddress.init("10.1.2.3")), 1, privKey, some(parseIpAddress("10.1.2.3")),
some(port), some(port))[] some(port), some(port))[]
records = [recordInvalidAddress] records = [recordInvalidAddress]
test = verifyNodesRecords(records, fromNode, limit, targetDistance) test = verifyNodesRecords(records, fromNode, limit, targetDistance)
@ -579,7 +588,7 @@ suite "Discovery v5 Tests":
block: # Invalid address - loopback block: # Invalid address - loopback
let let
recordInvalidAddress = SignedPeerRecord.init( recordInvalidAddress = SignedPeerRecord.init(
1, privKey, some(ValidIpAddress.init("127.0.0.1")), 1, privKey, some(parseIpAddress("127.0.0.1")),
some(port), some(port))[] some(port), some(port))[]
records = [recordInvalidAddress] records = [recordInvalidAddress]
test = verifyNodesRecords(records, fromNode, limit, targetDistance) test = verifyNodesRecords(records, fromNode, limit, targetDistance)
@ -588,7 +597,7 @@ suite "Discovery v5 Tests":
block: # Invalid distance block: # Invalid distance
let let
recordInvalidDistance = SignedPeerRecord.init( recordInvalidDistance = SignedPeerRecord.init(
1, privKey, some(ValidIpAddress.init("12.13.14.15")), 1, privKey, some(parseIpAddress("12.13.14.15")),
some(port), some(port))[] some(port), some(port))[]
records = [recordInvalidDistance] records = [recordInvalidDistance]
test = verifyNodesRecords(records, fromNode, limit, @[0'u16]) test = verifyNodesRecords(records, fromNode, limit, @[0'u16])
@ -597,7 +606,7 @@ suite "Discovery v5 Tests":
block: # Invalid distance but distance validation is disabled block: # Invalid distance but distance validation is disabled
let let
recordInvalidDistance = SignedPeerRecord.init( recordInvalidDistance = SignedPeerRecord.init(
1, privKey, some(ValidIpAddress.init("12.13.14.15")), 1, privKey, some(parseIpAddress("12.13.14.15")),
some(port), some(port))[] some(port), some(port))[]
records = [recordInvalidDistance] records = [recordInvalidDistance]
test = verifyNodesRecords(records, fromNode, limit) test = verifyNodesRecords(records, fromNode, limit)
@ -624,12 +633,12 @@ suite "Discovery v5 Tests":
let let
privKey = PrivateKey.example(rng) privKey = PrivateKey.example(rng)
enrRec = SignedPeerRecord.init(1, privKey, enrRec = SignedPeerRecord.init(1, privKey,
some(ValidIpAddress.init("127.0.0.1")), some(Port(9000)), some(parseIpAddress("127.0.0.1")), some(Port(9000)),
some(Port(9000))).expect("Properly intialized private key") some(Port(9000))).expect("Properly intialized private key")
sendNode = newNode(enrRec).expect("Properly initialized record") sendNode = newNode(enrRec).expect("Properly initialized record")
var codec = Codec(localNode: sendNode, privKey: privKey, sessions: Sessions.init(5)) var codec = Codec(localNode: sendNode, privKey: privKey, sessions: Sessions.init(5))
let (packet, _) = encodeMessagePacket(rng[], codec, let (packet, _, _) = encodeMessagePacket(rng[], codec,
receiveNode.localNode.id, receiveNode.localNode.address.get(), @[]) receiveNode.localNode.id, receiveNode.localNode.address.get(), @[])
receiveNode.transport.receive(a, packet) receiveNode.transport.receive(a, packet)
@ -653,13 +662,13 @@ suite "Discovery v5 Tests":
let let
privKey = PrivateKey.example(rng) privKey = PrivateKey.example(rng)
enrRec = SignedPeerRecord.init(1, privKey, enrRec = SignedPeerRecord.init(1, privKey,
some(ValidIpAddress.init("127.0.0.1")), some(Port(9000)), some(parseIpAddress("127.0.0.1")), some(Port(9000)),
some(Port(9000))).expect("Properly intialized private key") some(Port(9000))).expect("Properly intialized private key")
sendNode = newNode(enrRec).expect("Properly initialized record") sendNode = newNode(enrRec).expect("Properly initialized record")
var codec = Codec(localNode: sendNode, privKey: privKey, sessions: Sessions.init(5)) var codec = Codec(localNode: sendNode, privKey: privKey, sessions: Sessions.init(5))
for i in 0 ..< 5: for i in 0 ..< 5:
let a = localAddress(20303 + i) let a = localAddress(20303 + i)
let (packet, _) = encodeMessagePacket(rng[], codec, let (packet, _, _) = encodeMessagePacket(rng[], codec,
receiveNode.localNode.id, receiveNode.localNode.address.get(), @[]) receiveNode.localNode.id, receiveNode.localNode.address.get(), @[])
receiveNode.transport.receive(a, packet) receiveNode.transport.receive(a, packet)
@ -684,14 +693,14 @@ suite "Discovery v5 Tests":
a = localAddress(20303) a = localAddress(20303)
privKey = PrivateKey.example(rng) privKey = PrivateKey.example(rng)
enrRec = SignedPeerRecord.init(1, privKey, enrRec = SignedPeerRecord.init(1, privKey,
some(ValidIpAddress.init("127.0.0.1")), some(Port(9000)), some(parseIpAddress("127.0.0.1")), some(Port(9000)),
some(Port(9000))).expect("Properly intialized private key") some(Port(9000))).expect("Properly intialized private key")
sendNode = newNode(enrRec).expect("Properly initialized record") sendNode = newNode(enrRec).expect("Properly initialized record")
var codec = Codec(localNode: sendNode, privKey: privKey, sessions: Sessions.init(5)) var codec = Codec(localNode: sendNode, privKey: privKey, sessions: Sessions.init(5))
var firstRequestNonce: AESGCMNonce var firstRequestNonce: AESGCMNonce
for i in 0 ..< 5: for i in 0 ..< 5:
let (packet, requestNonce) = encodeMessagePacket(rng[], codec, let (packet, requestNonce, _) = encodeMessagePacket(rng[], codec,
receiveNode.localNode.id, receiveNode.localNode.address.get(), @[]) receiveNode.localNode.id, receiveNode.localNode.address.get(), @[])
receiveNode.transport.receive(a, packet) receiveNode.transport.receive(a, packet)
if i == 0: if i == 0:

View File

@ -2,14 +2,13 @@
import import
std/[options, sequtils, tables], std/[options, sequtils, tables],
asynctest/unittest2, asynctest/chronos/unittest2,
bearssl/rand, bearssl/rand,
chronos, chronos,
libp2p/crypto/secp, libp2p/crypto/secp,
codexdht/discv5/[messages, messages_encoding, encoding, spr, node, sessions], codexdht/discv5/[messages, messages_encoding, encoding, spr, node, sessions],
codexdht/discv5/crypto, codexdht/discv5/crypto,
stew/byteutils, stew/byteutils,
stew/shims/net,
stint, stint,
../dht/test_helper ../dht/test_helper
@ -275,11 +274,11 @@ suite "Discovery v5.1 Packet Encodings Test Vectors":
let let
enrRecA = SignedPeerRecord.init(1, privKeyA, enrRecA = SignedPeerRecord.init(1, privKeyA,
some(ValidIpAddress.init("127.0.0.1")), some(Port(9001)), some(parseIpAddress("127.0.0.1")), some(Port(9001)),
some(Port(9001))).expect("Properly intialized private key") some(Port(9001))).expect("Properly intialized private key")
enrRecB = SignedPeerRecord.init(1, privKeyB, enrRecB = SignedPeerRecord.init(1, privKeyB,
some(ValidIpAddress.init("127.0.0.1")), some(Port(9001)), some(parseIpAddress("127.0.0.1")), some(Port(9001)),
some(Port(9001))).expect("Properly intialized private key") some(Port(9001))).expect("Properly intialized private key")
nodeA = newNode(enrRecA).expect("Properly initialized record") nodeA = newNode(enrRecA).expect("Properly initialized record")
@ -508,11 +507,11 @@ suite "Discovery v5.1 Additional Encode/Decode":
let let
enrRecA = SignedPeerRecord.init(1, privKeyA, enrRecA = SignedPeerRecord.init(1, privKeyA,
some(ValidIpAddress.init("127.0.0.1")), some(Port(9001)), some(parseIpAddress("127.0.0.1")), some(Port(9001)),
some(Port(9001))).expect("Properly intialized private key") some(Port(9001))).expect("Properly intialized private key")
enrRecB = SignedPeerRecord.init(1, privKeyB, enrRecB = SignedPeerRecord.init(1, privKeyB,
some(ValidIpAddress.init("127.0.0.1")), some(Port(9001)), some(parseIpAddress("127.0.0.1")), some(Port(9001)),
some(Port(9001))).expect("Properly intialized private key") some(Port(9001))).expect("Properly intialized private key")
nodeA = newNode(enrRecA).expect("Properly initialized record") nodeA = newNode(enrRecA).expect("Properly initialized record")
@ -526,7 +525,7 @@ suite "Discovery v5.1 Additional Encode/Decode":
reqId = RequestId.init(rng[]) reqId = RequestId.init(rng[])
message = encodeMessage(m, reqId) message = encodeMessage(m, reqId)
let (data, nonce) = encodeMessagePacket(rng[], codecA, nodeB.id, let (data, nonce, _) = encodeMessagePacket(rng[], codecA, nodeB.id,
nodeB.address.get(), message) nodeB.address.get(), message)
let decoded = codecB.decodePacket(nodeA.address.get(), data) let decoded = codecB.decodePacket(nodeA.address.get(), data)
@ -642,7 +641,7 @@ suite "Discovery v5.1 Additional Encode/Decode":
codecB.sessions.store(nodeA.id, nodeA.address.get(), secrets.initiatorKey, codecB.sessions.store(nodeA.id, nodeA.address.get(), secrets.initiatorKey,
secrets.recipientKey) secrets.recipientKey)
let (data, nonce) = encodeMessagePacket(rng[], codecA, nodeB.id, let (data, nonce, _) = encodeMessagePacket(rng[], codecA, nodeB.id,
nodeB.address.get(), message) nodeB.address.get(), message)
let decoded = codecB.decodePacket(nodeA.address.get(), data) let decoded = codecB.decodePacket(nodeA.address.get(), data)

13
tests/test.nimble Normal file
View File

@ -0,0 +1,13 @@
# Package
version = "0.4.0"
author = "Status Research & Development GmbH"
description = "Tests for Logos Storage DHT"
license = "MIT"
installFiles = @["build.nims"]
# Dependencies
requires "asynctest >= 0.5.2 & < 0.6.0"
requires "unittest2 <= 0.0.9"
include "build.nims"

View File

@ -8,13 +8,13 @@ var cmds: seq[string]
when defined(testsPart1) or defined(testsAll): when defined(testsPart1) or defined(testsAll):
cmds.add [ cmds.add [
"nim c -r tests/dht/test_providers.nim", "nim c -r --hints:off --verbosity:0 dht/test_providers.nim",
"nim c -r tests/dht/test_providermngr.nim", "nim c -r --hints:off --verbosity:0 dht/test_providermngr.nim",
] ]
when defined(testsPart2) or defined(testsAll): when defined(testsPart2) or defined(testsAll):
cmds.add [ cmds.add [
"nim c -r tests/discv5/test_discoveryv5.nim", "nim c -r --hints:off --verbosity:0 discv5/test_discoveryv5.nim",
"nim c -r tests/discv5/test_discoveryv5_encoding.nim", "nim c -r --hints:off --verbosity:0 discv5/test_discoveryv5_encoding.nim",
] ]
echo "Running Test Commands: ", cmds echo "Running Test Commands: ", cmds

View File

@ -1,2 +0,0 @@
deps=""
resolver="MaxVer"