Compare commits

...

96 Commits

Author SHA1 Message Date
Chrysostomos Nanakos
754765ba31
chore: orc support (#110)
Support ORC memory model (https://github.com/logos-storage/logos-storage-nim-dht/issues/109)

Signed-off-by: Chrysostomos Nanakos <chris@include.gr>
2025-12-23 23:23:31 +02:00
Arnaud
99884b5971
Rename Codex to Logos Storage (#108) 2025-12-15 13:46:04 +01:00
Jacek Sieka
6c7de03622
chore: bump stew et al (#107)
* fix use of deprecated imports
* bump stew
* `results` is its own package
* drop protobuf_serialization
* force leveldb version
2025-12-11 13:47:10 +01:00
Eric
f6eef1ac95
Merge pull request #104 from codex-storage/update-to-nim-2-x
Update to nim 2 x
2025-02-14 12:01:54 +11:00
Arnaud
fb17db8187
Update dependencies versions 2025-02-13 20:54:49 +01:00
Eric
d435c6945f
Merge pull request #105 from codex-storage/fix/deps/2.0-deps
fix(deps): remove deps pinned to commit hash
2025-02-13 15:25:50 +11:00
Eric
89d22c156e
bump nimcrypto 2025-02-13 15:07:51 +11:00
Eric
ee33946afb
bump nimcrypto 2025-02-13 12:19:43 +11:00
Eric
c777106e7f
set non-major range for remaining deps 2025-02-13 12:15:22 +11:00
Eric
14d4dd97e9
toBytes -> toBytesBE 2025-02-13 12:15:00 +11:00
Eric
a9e17f4a33
remove nim 1.6 from ci 2025-02-13 12:09:50 +11:00
Eric
bc27eebb85
fix pinned deps
Leaving nim-datastore as a commit hash until it has a relevant release tag
2025-02-13 12:08:09 +11:00
Ben
0f67d21bbc
updates nim-datastore 2025-02-10 11:25:40 +01:00
Arnaud
4bd3a39e00
Update to Nim 2.0.14 2025-01-07 10:51:55 +01:00
Arnaud
5f22be0420
Remove useless comment 2024-12-18 10:52:06 +01:00
Arnaud
4eb4e9126a
Use IpAddress instead of ValidAddress; remove unused import 2024-12-18 10:50:02 +01:00
Arnaud
5320e8c81e
Remove .lock and file and direct dependency to nim-results 2024-12-10 10:05:59 +01:00
Arnaud
cc54a4f0ec
Set dependencies versions and commit hashes and introduce nimble.lock 2024-12-09 18:57:41 +01:00
Arnaud
e7e45de75f
Nim 2 config auto generated 2024-12-09 18:57:03 +01:00
Arnaud
a3f203bbea
Add nimbledeps to gitignore 2024-12-09 18:56:18 +01:00
Arnaud
de39c2006e
Add Nim version 2.0.12 to CI matrix 2024-12-09 12:56:14 +01:00
Arnaud
cafb6ffe53
Update version 2024-12-09 12:55:24 +01:00
Arnaud
570fb9a936
Update dependencies 2024-12-09 12:48:03 +01:00
Arnaud
9fdf0eca8a
Add Nim 2.x specific configuration 2024-12-09 12:47:35 +01:00
Arnaud
d73dc48515
Add pragma for exception raises 2024-12-09 12:47:08 +01:00
Csaba Kiraly
57f4b6f7cb
Merge pull request #103 from codex-storage/fix-randomNodes
fix potential infinite loop in randomNodes
2024-10-18 20:45:49 +02:00
Csaba Kiraly
ee4e2102d9
Merge pull request #99 from codex-storage/fix-removal
add link reliability metrics, fix aggressive node removal on first packet loss
2024-10-18 20:14:23 +02:00
Csaba Kiraly
a6cfe1a084
fix potential infinite loop in randomNodes
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-17 12:38:54 +02:00
Csaba Kiraly
1a344f1fd7
log reliability based on loss statistics
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-15 18:17:49 +02:00
Csaba Kiraly
fee5a9ced2
set NoreplyRemoveThreshold to 0.5
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-14 15:35:15 +02:00
Csaba Kiraly
6310c50ce0
introduce NoreplyRemoveThreshold
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

# Conflicts:
#	codexdht/private/eth/p2p/discoveryv5/protocol.nim
2024-10-14 15:35:10 +02:00
Csaba Kiraly
7507e99c96
register "not seen" when missing replies
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-14 15:33:34 +02:00
Csaba Kiraly
02bc12e639
change node seen flag to an exponential moving average
keep defaults as before

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

# Conflicts:
#	codexdht/private/eth/p2p/discoveryv5/node.nim
#	codexdht/private/eth/p2p/discoveryv5/routing_table.nim
2024-10-14 15:33:29 +02:00
Csaba Kiraly
e1c1089e4f
fix aggressive node removal from on first packet loss
UDP packets get lost easily. We can't just remove
nodes from the routing table at first loss, as it can
create issues in small networks and in cases of temporary
connection failures.

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-14 15:28:09 +02:00
Csaba Kiraly
c1d2ea410d
Merge pull request #102 from codex-storage/measure-rtt-bw
Measure rtt, estimate bw, and log every 5 minutes
2024-10-14 14:19:35 +02:00
Csaba Kiraly
8b1660464d
don't log bandwidth estimates
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-14 13:57:52 +02:00
Csaba Kiraly
7057663f81
fixup: remove excessive debug
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-14 11:19:36 +02:00
Csaba Kiraly
ff5391a35e
Merge pull request #100 from codex-storage/metrics
rename and add more dht metrics
2024-10-10 12:51:09 +02:00
Csaba Kiraly
4ccaaee721
rename metrics to dht_ from discovery_
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

# Conflicts:
#	codexdht/private/eth/p2p/discoveryv5/transport.nim
2024-10-10 11:44:26 +02:00
Csaba Kiraly
80cc069c5e
metrics: add transport byte counters
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

# Conflicts:
#	codexdht/private/eth/p2p/discoveryv5/transport.nim
2024-10-10 11:43:23 +02:00
Csaba Kiraly
ffeeeeb3fb
transport: add metrics
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

# Conflicts:
#	codexdht/private/eth/p2p/discoveryv5/transport.nim
2024-10-10 11:42:11 +02:00
Csaba Kiraly
4d2250477e
metrics: add discovery_routing_table_buckets
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-10 11:40:45 +02:00
Csaba Kiraly
b7b04ed9e4
metrics: rename routing_table_nodes to discovery_routing_table_nodes
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-10 11:40:44 +02:00
Csaba Kiraly
6e180af4aa
Merge pull request #101 from codex-storage/logging
Logging updates
2024-10-10 11:22:23 +02:00
Csaba Kiraly
706cb50041
add debugPrintLoop to print neighborhood info
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-08 11:31:06 +02:00
Csaba Kiraly
0825d887ea
add bandwidth estimate
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-08 11:31:06 +02:00
Csaba Kiraly
ec4f0d4a84
add transport level RTT measurement
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-08 11:18:02 +02:00
Csaba Kiraly
0b69de242f
add rtt measurement
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-08 11:17:58 +02:00
Csaba Kiraly
f3eec2a202
node: add RTT and bandwidth measurement holders
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-08 11:17:29 +02:00
Csaba Kiraly
f6971cc947
logging: better logging of SPR update
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-10-08 11:15:50 +02:00
Csaba Kiraly
4d9e39d86c
transport: improve logging
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

# Conflicts:
#	codexdht/private/eth/p2p/discoveryv5/transport.nim
2024-10-08 11:15:20 +02:00
Csaba Kiraly
b8bcb2d08d
Merge pull request #95 from codex-storage/factorize
Factorize code
2024-10-07 14:06:59 +02:00
Csaba Kiraly
f121d080e7
Merge pull request #96 from codex-storage/reduce-timeouts
Reduce timeouts
2024-10-03 10:54:44 +02:00
Csaba Kiraly
fef297c622
Merge pull request #94 from codex-storage/feature-FindNodeFastResultLimit
Add separate limit for results returned in FindNodeFast
2024-10-01 15:04:26 +02:00
Csaba Kiraly
936a5ec6fa
Merge pull request #93 from codex-storage/fix-FindNodeResultLimit
fix returning too many nodes when FindNodeResultLimit!=BUCKET_SIZE
2024-10-01 14:51:33 +02:00
Ben Bierens
9acdca795b
routing table logging update (#97)
* Clear logs for adding and removing of nodes. routingtable log topic for filtering.

* Makes node ID shortening consistent with other short-id formats

* redundant else block

* fixes dependencies
2024-09-23 15:49:08 +02:00
Ben Bierens
5f38fd9570
GCC-14 (#98)
* bumps bearssl

* updates version of bearssl in lockfiles

* fixes that checksum

* attempt to bump various dependencies

* updates asynctest version tag

* asynctest sha

* bumps to working version of nim-datastore

* adjusts asynctest imports for chronos

* chronos checksum

* checksum for datastore

* libp2p version tag

* libp2p checksum

* moves libp2p from codex-branch to latest master

* libp2p checksum

* splits the test dependencies from the dev dependencies (example nim-ethers)

* sets path

* pathing in tests

* oops wrong version

* adds build.nims to installfiles for test module

* attempt to fix import paths

* bumps nim-datastore

* datastore checksum

* greatly simplify CI

* fixes asynctest import

* builds parallel tests before running

* bumps datastore

* turns nim-stable back off

* pins nim-datastore version

* bumps checkout to v4

* Review comment by Mark

Co-authored-by: markspanbroek <mark@spanbroek.net>

* Review comment by Mark

Co-authored-by: markspanbroek <mark@spanbroek.net>

---------

Co-authored-by: markspanbroek <mark@spanbroek.net>
2024-08-20 11:04:48 +02:00
Csaba Kiraly
5624700855
reduce default timeouts
We really don't need these to be 2 and 4 seconds.
Later we should tune it better based on measurements
or estimates. We should also check the relation between
these three values.

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-07-01 04:34:10 +02:00
Csaba Kiraly
76da855725
use handshakeTimeout if handshake starting in sendMessage
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-07-01 04:20:20 +02:00
Csaba Kiraly
4c9c92232b
remove unused sendRequest call
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-07-01 04:14:17 +02:00
Csaba Kiraly
148b10908d
trace log: do not log binary encoding
Even at trace level this feels too much.

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-07-01 04:14:13 +02:00
Csaba Kiraly
f299c23e2e
remove lookupWorkerFast duplicate code
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-07-01 04:14:03 +02:00
Csaba Kiraly
bdf57381e3
introduce FindNodeFastResultLimit
We do not need that many responses with FindNodeFast, since the
reposes can be ordered by distance

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-07-01 04:06:43 +02:00
Csaba Kiraly
4b82bdc2f9
fix returning too many nodes when FindNodeResultLimit!=BUCKET_SIZE
Code assumed these two values to be the same, resulting in
reception errors.

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-07-01 03:55:03 +02:00
Csaba Kiraly
d8160ff0f7
add logging helper for Protocol
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-06-28 17:39:13 +02:00
Csaba Kiraly
f766cb39b1
encoding: introducing type cipher=aes128
Introducing the cipher type to ease changing cipher.
No functional change

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-06-28 17:37:26 +02:00
Csaba Kiraly
316464fc71
dht: waitMessage: expose timeout as parameter, keeping default
defults to ResponseTimeout as before

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-06-28 17:35:29 +02:00
Csaba Kiraly
6e61e02091
fixup: move sendRequest forward
Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-06-28 17:34:49 +02:00
Csaba Kiraly
dfff39091b
introduce waitResponse wrapper
initialize wait for response before sending request.
This is needed in cases where the response arrives before
moving to the next instruction, such as a directly connected
test.

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2024-06-28 17:33:56 +02:00
Giuliano Mega
63822e8356
Update nim-codex-dht to Chronos V4 (#90)
Update nim-codex-dht to Chronos v4
2024-05-23 17:49:44 -03:00
Giuliano Mega
2299317116
Merge pull request #91 from codex-storage/chore/update-libp2p-repo
Update repo for libp2p
2024-03-18 19:40:01 -03:00
gmega
717cd0a50c
Merge branch 'master' into chore/update-libp2p-repo 2024-03-18 19:11:57 -03:00
Giuliano Mega
223ce9240b
Merge pull request #92 from codex-storage/fix/nimble-archive-osx
fix name for nimble archive for OSX
2024-03-18 19:11:28 -03:00
gmega
709a873862
fix name for nimble archive for OSX 2024-03-18 18:48:04 -03:00
gmega
b3d01245e9
update repo for libp2p 2024-03-18 18:42:48 -03:00
Dmitriy Ryajov
beefafcc6f
Update CleanupInterval to 24 hours (#88) 2023-11-21 17:14:15 -08:00
Dmitriy Ryajov
a7f14bc9b7
Fix logging format (#87)
* add shortLog for Address

* compile with trace logging to catch errors
2023-11-20 09:34:40 -08:00
Dmitriy Ryajov
dd4985435a
Fix timeout and delete (#86)
* use unix time for ttl

* don't remove all entries on peer removal

* cleanup questionable tuple destructure

* ignore vscode

* fix endians decoding

* allow removing by peerId

* invalidate cache by peerId on remove

* update test
2023-11-17 14:01:16 -08:00
Csaba Kiraly
91b2eaec89
Fix: arrive to working keys in case of simultaneous cross connect (#84)
* improve tracing of message exchange

run e.g. as
```
nim c -r -d:debug -d:chronicles_enabled=on -d:chronicles_log_level=TRACE -d:chronicles_sinks=textlines[nocolors,stdout] tests/dht/test_providers.nim >err
```

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

* add debug on Handshake timeour

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

* queue messages during handshake and send later

If a handshake was already in progress, messages were dropped.
Instead of this, it is better to queue these and send as soon
as the handshake is finished and thus the encryption key is known.

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

* rename handshakeInProgress to keyexchangeInProgress

Handshake is also a name of a message, which makes previous
name less clear.

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

* keyexchangeInProgress: do not remove on handshake received

This is the wrong direction, not needed

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

* fix cross-connect key exchange

Since key exchange can be started both ways simultaneously, and
these might not get finalised with UDP transport, we can't be
sure what encryption key will be used by the other side:
- the one derived in the key-exchange started by us,
- the one derived in the key-exchange started by the other node.
To alleviate this issue, we store two decryption keys in each session.

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

---------

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2023-11-17 11:50:28 -08:00
Csaba Kiraly
66116b9bf6
Fix: queue messages when there is no encryption key (#83)
* encodeMessagePacket: expose haskey

encodeMessagePacket checks for session and behaves differently
based on that. Exposing this difference in behavior.

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

* improve tracing of message exchange

run e.g. as
```
nim c -r -d:debug -d:chronicles_enabled=on -d:chronicles_log_level=TRACE -d:chronicles_sinks=textlines[nocolors,stdout] tests/dht/test_providers.nim >err
```

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

* add debug on Handshake timeour

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

* queue messages during handshake and send later

If a handshake was already in progress, messages were dropped.
Instead of this, it is better to queue these and send as soon
as the handshake is finished and thus the encryption key is known.

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

* rename handshakeInProgress to keyexchangeInProgress

Handshake is also a name of a message, which makes previous
name less clear.

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

* keyexchangeInProgress: do not remove on handshake received

This is the wrong direction, not needed

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>

---------

Signed-off-by: Csaba Kiraly <csaba.kiraly@gmail.com>
2023-11-17 11:18:48 -08:00
Jaremy Creechley
60dc4e764c
Fix changes from stint (#81)
* some formatting tweaks to make errors easier to grok

* stint removed overloads for regular ints - use stew versions instead

* various name style fixes

* ignore vscode stuff

* revert style changes

* revert unneeded var rename changes

---------

Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
2023-11-17 11:08:45 -08:00
Dmitriy Ryajov
ee5d8acb05
cleanup and avoid lockups (#85) 2023-09-20 09:20:26 -07:00
Slava
ed7caa119d
Update CI workflow trigger branch (#82)
* Update CI workflow trigger branch

* Update Codecov workflow trigger branch and badges
2023-08-26 09:31:31 +03:00
Jaremy Creechley
fdd02450aa
bump release (#79) 2023-07-25 19:56:25 -07:00
Jaremy Creechley
b585290397
Bump deps (#77)
Updates all Status IM and Codex Storage deps to the latest except for nim-stint which is held back due to some compiler issue. 

* fix nimble name
* don't override nimble
* update all deps
* import nimble.lock and fix urls
* don't forget nim
* bump to chronos with async notifications
2023-07-21 15:29:38 -07:00
Dmitriy Ryajov
9ae0bfb1c3
Fix nimble install (#78)
* make encryption scheme explicit

* supress compiler noise

* make `nimble install` without `-d` work

* move `libp2p_pki_schemes=secp256k1` to config.nims

* fix include
2023-07-21 15:51:42 -06:00
Jaremy Creechley
1f27eb4aff
Upgrade secp256k1 deps (#76)
* import full secret hash from upstream
* use full secret hash including prefix byte 0x02 / 0x03
* import nimble lock
* fix atlas lock
* update stint
* cleanup urls
* bump lock files
* match lockfiles
2023-07-19 17:04:28 -07:00
Jaremy Creechley
fc7d7ef80c
version bump - stable ci version 2023-07-17 15:34:59 -07:00
Ivan Yonchovski
ae844ec4c5
Restore coverage (#74)
* Restore coverage
* use nimble setup for now
* Update codecov.yml
* fix lcov
* re-add nim binary to lockfile

---------

Co-authored-by: Jaremy Creechley <creechley@gmail.com>
2023-07-17 15:32:55 -07:00
Jaremy Creechley
d4331f8062
Remove nimbus build (#73)
* Remove Nimbus
* adds docs for Nimble 0.14
* cleanup
2023-07-17 12:43:14 -07:00
Jaremy Creechley
7464c8e9a6
Split windows tests (#70)
Changes the CI to properly cache Nimble deps *including* Nim binaries. 

* parallelize tests
* speed up tests
* cache whole nimble
* use nimble install without -d
* bump version
* new nimble cache
* fix github_env
* compare speed
* readd msys2
* don't need make for nimble
* ugh renames
2023-07-13 20:42:43 -07:00
Jaremy Creechley
18dbe4cc04
Merge pull request #69 from codex-storage/rename-library
Finish renaming library
2023-07-12 16:13:13 -07:00
Jaremy Creechley
127d7be4e5
bump version 2023-07-12 15:47:16 -07:00
Jaremy Creechley
d7cc0ae783
renaming imports 2023-07-12 14:58:29 -07:00
Jaremy Creechley
c3d073c3da
finishing renaming 2023-07-12 14:57:26 -07:00
Jaremy Creechley
96515d6d45
Merge pull request #67 from codex-storage/nimble-build-light
* import nimbus build tools
* add nimbus build
* updates
* add nat traversal to make nimbus happy
* clone nimbus vendor using atlas
* disable ci-nimbus
* only run ci-nimbus on lockfile change
* add nim setup
* caching - change names
* restore names
* cleanup & test
* bump version
2023-07-12 14:53:52 -07:00
62 changed files with 1349 additions and 1814 deletions

View File

@ -1,42 +0,0 @@
name: Install Nimble
description: install nimble
inputs:
nimble_version:
description: "install nimble"
# TODO: make sure to change to tagged release when available
default: "latest"
os:
description: "operating system"
default: "linux"
cpu:
description: "cpu architecture"
default: "amd64"
runs:
using: "composite"
steps:
- uses: actions/checkout@v3
- name: Build Nimble
shell: bash
run: |
set -x
mkdir -p .nimble
cd .nimble
if [[ '${{ inputs.cpu }}' == 'amd64' ]]; then
CPU=x64
elif [[ '${{ inputs.cpu }}' == 'i386' ]]; then
CPU=x32
else
CPU=${{ inputs.cpu }}
fi
if [[ '${{ inputs.os }}' == 'macos' ]]; then
OS=apple
else
OS='${{ inputs.os }}'
fi
URL=https://github.com/nim-lang/nimble/releases/download/${{ inputs.nimble_version }}/nimble-"$OS"_"$CPU".tar.gz
curl -o nimble.tar.gz -L -s -S "$URL"
tar -xvf nimble.tar.gz
- name: Derive environment variables
shell: bash
run: echo '${{ github.workspace }}/.nimble/' >> $GITHUB_PATH

View File

@ -1,173 +0,0 @@
name: CI-nimbus
on:
push:
paths:
- atlas.lock
- .github/workflows/ci-nimbus.yml
jobs:
build:
timeout-minutes: 90
strategy:
fail-fast: false
matrix:
target:
- os: linux
cpu: amd64
# - os: linux
# cpu: i386
- os: macos
cpu: amd64
- os: windows
cpu: amd64
#- os: windows
#cpu: i386
branch: [version-1-6]
include:
- target:
os: linux
builder: ubuntu-20.04
shell: bash
- target:
os: macos
builder: macos-12
shell: bash
- target:
os: windows
builder: windows-2019
shell: msys2 {0}
defaults:
run:
shell: ${{ matrix.shell }}
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
runs-on: ${{ matrix.builder }}
continue-on-error: ${{ matrix.branch == 'version-1-6' || matrix.branch == 'devel' }}
steps:
- name: Checkout
uses: actions/checkout@v2
with:
submodules: true
- name: Install build dependencies (Linux i386)
if: runner.os == 'Linux' && matrix.target.cpu == 'i386'
run: |
sudo dpkg --add-architecture i386
sudo apt-get update -qq
sudo DEBIAN_FRONTEND='noninteractive' apt-get install \
--no-install-recommends -yq gcc-multilib g++-multilib \
libssl-dev:i386
mkdir -p external/bin
cat << EOF > external/bin/gcc
#!/bin/bash
exec $(which gcc) -m32 "\$@"
EOF
cat << EOF > external/bin/g++
#!/bin/bash
exec $(which g++) -m32 "\$@"
EOF
chmod 755 external/bin/gcc external/bin/g++
echo '${{ github.workspace }}/external/bin' >> $GITHUB_PATH
- name: MSYS2 (Windows i386)
if: runner.os == 'Windows' && matrix.target.cpu == 'i386'
uses: msys2/setup-msys2@v2
with:
path-type: inherit
msystem: MINGW32
install: >-
base-devel
git
mingw-w64-i686-toolchain
- name: MSYS2 (Windows amd64)
if: runner.os == 'Windows' && matrix.target.cpu == 'amd64'
uses: msys2/setup-msys2@v2
with:
path-type: inherit
install: >-
base-devel
git
mingw-w64-x86_64-toolchain
- name: Restore Nim DLLs dependencies (Windows) from cache
if: runner.os == 'Windows'
id: windows-dlls-cache
uses: actions/cache@v2
with:
path: external/dlls
key: 'dlls'
- name: Install DLL dependencies (Windows)
if: >
steps.windows-dlls-cache.outputs.cache-hit != 'true' &&
runner.os == 'Windows'
run: |
mkdir external
curl -L "https://nim-lang.org/download/windeps.zip" -o external/windeps.zip
7z x external/windeps.zip -oexternal/dlls
- name: Path to cached dependencies (Windows)
if: >
runner.os == 'Windows'
run: |
echo '${{ github.workspace }}'"/external/dlls" >> $GITHUB_PATH
- name: Derive environment variables
run: |
if [[ '${{ matrix.target.cpu }}' == 'amd64' ]]; then
PLATFORM=x64
else
PLATFORM=x86
fi
echo "PLATFORM=$PLATFORM" >> $GITHUB_ENV
ncpu=
MAKE_CMD="make"
case '${{ runner.os }}' in
'Linux')
ncpu=$(nproc)
;;
'macOS')
ncpu=$(sysctl -n hw.ncpu)
;;
'Windows')
ncpu=$NUMBER_OF_PROCESSORS
MAKE_CMD="mingw32-make"
;;
esac
[[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1
echo "ncpu=$ncpu" >> $GITHUB_ENV
echo "MAKE_CMD=${MAKE_CMD}" >> $GITHUB_ENV
- uses: jiro4989/setup-nim-action@v1
with:
nim-version: 1.6.14
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Restore Nim toolchain binaries from cache
id: nim-cache
uses: actions/cache@v3
with:
path: NimBinaries
key: ${{ matrix.target.os }}-${{ matrix.target.cpu }}-nim-${{ hashFiles('atlas.lock') }}
- name: Restore Vendor Clones from cache
id: vendor-cache
uses: actions/cache@v3
with:
path: vendor/*/
key: ${{ matrix.target.os }}-${{ matrix.target.cpu }}-vendor-${{ hashFiles('atlas.lock') }}
- name: Run tests
run: |
if [[ "${{ matrix.target.os }}" == "windows" ]]; then
# https://github.com/status-im/nimbus-eth2/issues/3121
export NIMFLAGS="-d:nimRawSetjmp"
fi
echo "BUILD: "
export NIM_COMMIT=${{ matrix.branch }}
make -j${ncpu} CI_CACHE=NimBinaries ARCH_OVERRIDE=${PLATFORM} QUICK_AND_DIRTY_COMPILER=1
make test -j${ncpu}

View File

@ -1,170 +1,22 @@
name: CI
on:
push:
branches:
- main
pull_request:
workflow_dispatch:
on: [push, pull_request]
jobs:
build:
timeout-minutes: 90
test:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
target:
- os: linux
cpu: amd64
- os: macos
cpu: amd64
- os: windows
cpu: amd64
#- os: windows
#cpu: i386
branch: [version-1-6]
include:
- target:
os: linux
builder: ubuntu-20.04
shell: bash
- target:
os: macos
builder: macos-12
shell: bash
- target:
os: windows
builder: windows-2019
shell: msys2 {0}
defaults:
run:
shell: ${{ matrix.shell }}
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
runs-on: ${{ matrix.builder }}
continue-on-error: ${{ matrix.branch == 'version-1-6' || matrix.branch == 'devel' }}
nim: [2.2.4]
os: [ubuntu-latest, macos-latest, windows-latest]
steps:
- name: Checkout
uses: actions/checkout@v2
with:
submodules: true
- name: Install build dependencies (Linux i386)
if: runner.os == 'Linux' && matrix.target.cpu == 'i386'
run: |
sudo dpkg --add-architecture i386
sudo apt-get update -qq
sudo DEBIAN_FRONTEND='noninteractive' apt-get install \
--no-install-recommends -yq gcc-multilib g++-multilib \
libssl-dev:i386
mkdir -p external/bin
cat << EOF > external/bin/gcc
#!/bin/bash
exec $(which gcc) -m32 "\$@"
EOF
cat << EOF > external/bin/g++
#!/bin/bash
exec $(which g++) -m32 "\$@"
EOF
chmod 755 external/bin/gcc external/bin/g++
echo '${{ github.workspace }}/external/bin' >> $GITHUB_PATH
- name: MSYS2 (Windows i386)
if: runner.os == 'Windows' && matrix.target.cpu == 'i386'
uses: msys2/setup-msys2@v2
with:
path-type: inherit
msystem: MINGW32
install: >-
base-devel
git
mingw-w64-i686-toolchain
- name: MSYS2 (Windows amd64)
if: runner.os == 'Windows' && matrix.target.cpu == 'amd64'
uses: msys2/setup-msys2@v2
with:
path-type: inherit
install: >-
base-devel
git
mingw-w64-x86_64-toolchain
- name: Restore Nim DLLs dependencies (Windows) from cache
if: runner.os == 'Windows'
id: windows-dlls-cache
uses: actions/cache@v2
with:
path: external/dlls
key: 'dlls'
- name: Install DLL dependencies (Windows)
if: >
steps.windows-dlls-cache.outputs.cache-hit != 'true' &&
runner.os == 'Windows'
run: |
mkdir external
curl -L "https://nim-lang.org/download/windeps.zip" -o external/windeps.zip
7z x external/windeps.zip -oexternal/dlls
- name: Path to cached dependencies (Windows)
if: >
runner.os == 'Windows'
run: |
echo '${{ github.workspace }}'"/external/dlls" >> $GITHUB_PATH
- name: Derive environment variables
run: |
if [[ '${{ matrix.target.cpu }}' == 'amd64' ]]; then
PLATFORM=x64
else
PLATFORM=x86
fi
echo "PLATFORM=$PLATFORM" >> $GITHUB_ENV
ncpu=
MAKE_CMD="make"
case '${{ runner.os }}' in
'Linux')
ncpu=$(nproc)
;;
'macOS')
ncpu=$(sysctl -n hw.ncpu)
;;
'Windows')
ncpu=$NUMBER_OF_PROCESSORS
MAKE_CMD="mingw32-make"
;;
esac
[[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1
echo "ncpu=$ncpu" >> $GITHUB_ENV
echo "MAKE_CMD=${MAKE_CMD}" >> $GITHUB_ENV
- name: Restore nimble dependencies from cache
id: nimble_deps
uses: actions/cache@v3
with:
path: |
~/.nimble/pkgs2
~/.nimble/packages_official.json
key: ${{ matrix.target.os }}-${{ matrix.target.cpu }}-nimble-${{ hashFiles('nimble.lock') }}
- name: Setup Nimble
uses: "./.github/actions/install_nimble"
with:
os: ${{ matrix.target.os }}
cpu: ${{ matrix.target.cpu }}
- name: Run tests
run: |
rm -rf ~/.nimble/
if [[ "${{ matrix.target.os }}" == "windows" ]]; then
# https://github.com/status-im/nimbus-eth2/issues/3121
export NIMFLAGS="-d:nimRawSetjmp"
fi
nimble test -y
if [[ "${{ matrix.branch }}" == "version-1-6" || "${{ matrix.branch }}" == "devel" ]]; then
echo -e "\nTesting with '--gc:orc':\n"
export NIMFLAGS="${NIMFLAGS} --gc:orc"
nimble test -y
fi;
- name: Checkout
uses: actions/checkout@v4
- uses: jiro4989/setup-nim-action@v2
with:
nim-version: ${{matrix.nim}}
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Build
run: nimble install -y
- name: Test
run: nimble test -y

6
.gitignore vendored
View File

@ -1,3 +1,6 @@
*
!*.*
!*/
coverage
nimcache
tests/testAll
@ -8,3 +11,6 @@ nimbus-build-system.paths
vendor/*
NimBinaries
.update.timestamp
*.dSYM
.vscode/*
nimbledeps

View File

@ -1,71 +0,0 @@
# Copyright (c) 2020 Status Research & Development GmbH. Licensed under
# either of:
# - Apache License, version 2.0
# - MIT license
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
SHELL := bash # the shell used internally by Make
# used inside the included makefiles
BUILD_SYSTEM_DIR := vendor/nimbus-build-system
# -d:insecure - Necessary to enable Prometheus HTTP endpoint for metrics
# -d:chronicles_colors:none - Necessary to disable colors in logs for Docker
DOCKER_IMAGE_NIM_PARAMS ?= -d:chronicles_colors:none -d:insecure
LINK_PCRE := 0
# we don't want an error here, so we can handle things later, in the ".DEFAULT" target
-include $(BUILD_SYSTEM_DIR)/makefiles/variables.mk
.PHONY: \
all \
clean \
coverage \
deps \
libbacktrace \
test \
update
ifeq ($(NIM_PARAMS),)
# "variables.mk" was not included, so we update the submodules.
GIT_SUBMODULE_UPDATE := nimble install https://github.com/elcritch/atlas && atlas rep --noexec atlas.lock
.DEFAULT:
+@ echo -e "Git submodules not found. Running '$(GIT_SUBMODULE_UPDATE)'.\n"; \
$(GIT_SUBMODULE_UPDATE); \
echo
# Now that the included *.mk files appeared, and are newer than this file, Make will restart itself:
# https://www.gnu.org/software/make/manual/make.html#Remaking-Makefiles
#
# After restarting, it will execute its original goal, so we don't have to start a child Make here
# with "$(MAKE) $(MAKECMDGOALS)". Isn't hidden control flow great?
else # "variables.mk" was included. Business as usual until the end of this file.
# default target, because it's the first one that doesn't start with '.'
# Builds the codex binary
all: | build deps
echo -e $(BUILD_MSG) "$@" && \
$(ENV_SCRIPT) nim test $(NIM_PARAMS)
# must be included after the default target
-include $(BUILD_SYSTEM_DIR)/makefiles/targets.mk
deps: | deps-common nat-libs
#- deletes and recreates "codexdht.nims" which on Windows is a copy instead of a proper symlink
update: | update-common
rm -rf codexdht.nims && \
$(MAKE) codexdht.nims $(HANDLE_OUTPUT)
# Builds and run a part of the test suite
test: | build deps
echo -e $(BUILD_MSG) "$@" && \
$(ENV_SCRIPT) nim testAll $(NIM_PARAMS) config.nims
# usual cleaning
clean: | clean-common
endif # "variables.mk" was not included

View File

@ -1,12 +1,12 @@
# A DHT implementation for Codex
# A DHT implementation for Logos Storage
[![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
[![Stability: experimental](https://img.shields.io/badge/stability-experimental-orange.svg)](#stability)
[![CI (GitHub Actions)](https://github.com/status-im/nim-libp2p-dht/workflows/CI/badge.svg?branch=main)](https://github.com/status-im/nim-libp2p-dht/actions?query=workflow%3ACI+branch%3Amain)
[![codecov](https://codecov.io/gh/status-im/nim-libp2p-dht/branch/main/graph/badge.svg?token=tlmMJgU4l7)](https://codecov.io/gh/status-im/nim-libp2p-dht)
[![CI (GitHub Actions)](https://github.com/logos-storage/logos-storage-nim-dht/workflows/CI/badge.svg?branch=master)](https://github.com/logos-storage/logos-storage-nim-dht/actions/workflows/ci.yml?query=workflow%3ACI+branch%3Amaster)
[![codecov](https://codecov.io/gh/logos-storage/logos-storage-nim-dht/branch/master/graph/badge.svg?token=tlmMJgU4l7)](https://codecov.io/gh/logos-storage/logos-storage-nim-dht)
This DHT implementation is aiming to provide a DHT for Codex with the following properties
This DHT implementation is aiming to provide a DHT for Logos Storage with the following properties
* flexible secure transport usage with
* fast UDP based operation
* eventual fallback to TCP-based operation (maybe though libp2p)
@ -19,3 +19,26 @@ This DHT implementation is aiming to provide a DHT for Codex with the following
Current implementation is based on nim-eth's Discovery v5 implementation.
Base files were copied from [`status-im/nim-eth@779d767b024175a51cf74c79ec7513301ebe2f46`](https://github.com/status-im/nim-eth/commit/779d767b024175a51cf74c79ec7513301ebe2f46)
## Building
This repo is setup to use Nimble lockfiles. This requires Nimble 0.14+ which isn't installed by default when this was written. If `nimble -v` reports `0.13.x` then you will need to install Nimble 0.14. Note that using Nimble 0.14 changes how Nimble behaves!
Nimble 0.14 can be install by:
```sh
nimble install nimble@0.14.2
```
After this you can setup your Nimble environment. Note that this will build the pinned version of Nim! The first run can take ~15 minutes.
```sh
nimble setup # creates a nimble.paths used for rest of Nimble commands
nimble testAll
```
You can also run tasks directly:
```sh
nim testAll
```

View File

@ -1,143 +0,0 @@
{
"items": {
"nimbus-build-system": {
"dir": "vendor/nimbus-build-system",
"url": "https://github.com/status-im/nimbus-build-system",
"commit": "239c3a7fbb88fd241da0ade3246fd2e5fcff4f25"
},
"nim-nat-traversal": {
"dir": "vendor/nim-nat-traversal",
"url": "https://github.com/status-im/nim-nat-traversal",
"commit": "802d75edcc656e616120fb27f950ff1285ddcbba"
},
"nim-zlib": {
"dir": "vendor/nim-zlib",
"url": "https://github.com/status-im/nim-zlib",
"commit": "f34ca261efd90f118dc1647beefd2f7a69b05d93"
},
"nim-stew": {
"dir": "vendor/nim-stew",
"url": "https://github.com/status-im/nim-stew.git",
"commit": "e18f5a62af2ade7a1fd1d39635d4e04d944def08"
},
"nim-http-utils": {
"dir": "vendor/nim-http-utils",
"url": "https://github.com/status-im/nim-http-utils.git",
"commit": "3b491a40c60aad9e8d3407443f46f62511e63b18"
},
"nim-chronos": {
"dir": "vendor/nim-chronos",
"url": "https://github.com/status-im/nim-chronos.git",
"commit": "6525f4ce1d1a7eba146e5f1a53f6f105077ae686"
},
"upraises": {
"dir": "vendor/upraises",
"url": "https://github.com/markspanbroek/upraises.git",
"commit": "bc2628989b63854d980e92dadbd58f83e34b6f25"
},
"nim-sqlite3-abi": {
"dir": "vendor/nim-sqlite3-abi",
"url": "https://github.com/arnetheduck/nim-sqlite3-abi.git",
"commit": "362e1bd9f689ad9f5380d9d27f0705b3d4dfc7d3"
},
"questionable": {
"dir": "vendor/questionable",
"url": "https://github.com/status-im/questionable.git",
"commit": "0d7ce8efdedaf184680cb7268721fca0af947a74"
},
"nim-websock": {
"dir": "vendor/nim-websock",
"url": "https://github.com/status-im/nim-websock.git",
"commit": "2c3ae3137f3c9cb48134285bd4a47186fa51f0e8"
},
"nim-secp256k1": {
"dir": "vendor/nim-secp256k1",
"url": "https://github.com/status-im/nim-secp256k1.git",
"commit": "5340cf188168d6afcafc8023770d880f067c0b2f"
},
"nim-bearssl": {
"dir": "vendor/nim-bearssl",
"url": "https://github.com/status-im/nim-bearssl.git",
"commit": "f4c4233de453cb7eac0ce3f3ffad6496295f83ab"
},
"dnsclient.nim": {
"dir": "vendor/dnsclient.nim",
"url": "https://github.com/ba0f3/dnsclient.nim",
"commit": "23214235d4784d24aceed99bbfe153379ea557c8"
},
"nimcrypto": {
"dir": "vendor/nimcrypto",
"url": "https://github.com/status-im/nimcrypto.git",
"commit": "a5742a9a214ac33f91615f3862c7b099aec43b00"
},
"nim-json-serialization": {
"dir": "vendor/nim-json-serialization",
"url": "https://github.com/status-im/nim-json-serialization.git",
"commit": "e5b18fb710c3d0167ec79f3b892f5a7a1bc6d1a4"
},
"nim-testutils": {
"dir": "vendor/nim-testutils",
"url": "https://github.com/status-im/nim-testutils",
"commit": "b56a5953e37fc5117bd6ea6dfa18418c5e112815"
},
"nim-unittest2": {
"dir": "vendor/nim-unittest2",
"url": "https://github.com/status-im/nim-unittest2.git",
"commit": "b178f47527074964f76c395ad0dfc81cf118f379"
},
"npeg": {
"dir": "vendor/npeg",
"url": "https://github.com/zevv/npeg",
"commit": "b15a10e388b91b898c581dbbcb6a718d46b27d2f"
},
"nim-serialization": {
"dir": "vendor/nim-serialization",
"url": "https://github.com/status-im/nim-serialization.git",
"commit": "493d18b8292fc03aa4f835fd825dea1183f97466"
},
"nim-faststreams": {
"dir": "vendor/nim-faststreams",
"url": "https://github.com/status-im/nim-faststreams.git",
"commit": "1b561a9e71b6bdad1c1cdff753418906037e9d09"
},
"nim-datastore": {
"dir": "vendor/nim-datastore",
"url": "https://github.com/codex-storage/nim-datastore.git",
"commit": "0cde8aeb67c59fd0ac95496dc6b5e1168d6632aa"
},
"asynctest": {
"dir": "vendor/asynctest",
"url": "https://github.com/markspanbroek/asynctest",
"commit": "a236a5f0f3031573ac2cb082b63dbf6e170e06e7"
},
"nim-stint": {
"dir": "vendor/nim-stint",
"url": "https://github.com/status-im/nim-stint.git",
"commit": "036c71d06a6b22f8f967ba9d54afd2189c3872ca"
},
"nim-metrics": {
"dir": "vendor/nim-metrics",
"url": "https://github.com/status-im/nim-metrics.git",
"commit": "743f81d4f6c6ebf0ac02389f2392ff8b4235bee5"
},
"nim-libp2p": {
"dir": "vendor/nim-libp2p",
"url": "https://github.com/status-im/nim-libp2p.git",
"commit": "a3e9d1ed80c048cd5abc839cbe0863cefcedc702"
},
"nim-chronicles": {
"dir": "vendor/nim-chronicles",
"url": "https://github.com/status-im/nim-chronicles.git",
"commit": "7631f7b2ee03398cb1512a79923264e8f9410af6"
},
"nim-protobuf-serialization": {
"dir": "vendor/nim-protobuf-serialization",
"url": "https://github.com/status-im/nim-protobuf-serialization",
"commit": "28214b3e40c755a9886d2ec8f261ec48fbb6bec6"
}
},
"nimcfg": "############# begin Atlas config section ##########\n--noNimblePath\n--path:\"vendor/nim-secp256k1\"\n--path:\"vendor/nim-protobuf-serialization\"\n--path:\"vendor/nimcrypto\"\n--path:\"vendor/nim-bearssl\"\n--path:\"vendor/nim-chronicles\"\n--path:\"vendor/nim-chronos\"\n--path:\"vendor/nim-libp2p\"\n--path:\"vendor/nim-metrics\"\n--path:\"vendor/nim-stew\"\n--path:\"vendor/nim-stint\"\n--path:\"vendor/asynctest\"\n--path:\"vendor/nim-datastore\"\n--path:\"vendor/questionable\"\n--path:\"vendor/nim-faststreams\"\n--path:\"vendor/nim-serialization\"\n--path:\"vendor/npeg/src\"\n--path:\"vendor/nim-unittest2\"\n--path:\"vendor/nim-testutils\"\n--path:\"vendor/nim-json-serialization\"\n--path:\"vendor/nim-http-utils\"\n--path:\"vendor/dnsclient.nim/src\"\n--path:\"vendor/nim-websock\"\n--path:\"vendor/nim-sqlite3-abi\"\n--path:\"vendor/upraises\"\n--path:\"vendor/nim-zlib\"\n############# end Atlas config section ##########\n",
"nimVersion": "1.6.14",
"gccVersion": "",
"clangVersion": ""
}

View File

@ -1,22 +0,0 @@
coverage:
status:
project:
default:
# advanced settings
# Prevents PR from being blocked with a reduction in coverage.
# Note, if we want to re-enable this, a `threshold` value can be used
# allow coverage to drop by x% while still posting a success status.
# `informational`: https://docs.codecov.com/docs/commit-status#informational
# `threshold`: https://docs.codecov.com/docs/commit-status#threshold
informational: true
patch:
default:
# advanced settings
# Prevents PR from being blocked with a reduction in coverage.
# Note, if we want to re-enable this, a `threshold` value can be used
# allow coverage to drop by x% while still posting a success status.
# `informational`: https://docs.codecov.com/docs/commit-status#informational
# `threshold`: https://docs.codecov.com/docs/commit-status#threshold
informational: true

View File

@ -1,5 +1,5 @@
import
./libp2pdht/dht,
./libp2pdht/discv5
./codexdht/dht,
./codexdht/discv5
export dht, discv5

View File

@ -1,65 +1,42 @@
# Package
version = "0.2.1"
version = "0.6.0"
author = "Status Research & Development GmbH"
description = "DHT based on the libp2p Kademlia spec"
description = "DHT based on Eth discv5 implementation"
license = "MIT"
skipDirs = @["tests"]
# Dependencies
requires "nim >= 1.2.0"
requires "secp256k1#b3f38e2795e805743b299dc5d96d332db375b520" # >= 0.5.2 & < 0.6.0
requires "protobuf_serialization#27b400fdf3bd8ce7120ca66fc1de39d3f1a5804a" # >= 0.2.0 & < 0.3.0
requires "nimcrypto == 0.5.4"
requires "bearssl#head"
requires "chronicles >= 0.10.2 & < 0.11.0"
requires "chronos#1394c9e04957928afc1db33d2e0965cfb677a1e0" # >= 3.0.11 & < 3.1.0
requires "libp2p#unstable"
requires "metrics"
requires "stew#head"
requires "stint"
requires "asynctest >= 0.3.1 & < 0.4.0"
requires "https://github.com/status-im/nim-datastore#head"
requires "questionable"
requires "nim >= 2.2.4 & < 3.0.0"
requires "secp256k1 >= 0.6.0 & < 0.7.0"
requires "nimcrypto >= 0.6.2 & < 0.8.0"
requires "bearssl >= 0.2.5 & < 0.3.0"
requires "chronicles >= 0.11.2 & < 0.13.0"
requires "chronos >= 4.0.4 & < 4.1.0"
requires "libp2p >= 1.14.1 & < 2.0.0"
requires "metrics >= 0.1.0 & < 0.2.0"
requires "stew >= 0.4.2"
requires "stint >= 0.8.1 & < 0.9.0"
requires "https://github.com/logos-storage/nim-datastore >= 0.2.1 & < 0.3.0"
requires "questionable >= 0.10.15 & < 0.11.0"
requires "leveldbstatic >= 0.2.1 & < 0.3.0"
task testAll, "Run DHT tests":
exec "nim c -r tests/testAll.nim"
task testAll, "Run all test suites":
exec "nimble install -d -y"
withDir "tests":
exec "nimble testAll"
# task coverage, "generates code coverage report":
# var (output, exitCode) = gorgeEx("which lcov")
# if exitCode != 0:
# echo ""
# echo " ************************** ⛔️ ERROR ⛔️ **************************"
# echo " ** **"
# echo " ** ERROR: lcov not found, it must be installed to run code **"
# echo " ** coverage locally **"
# echo " ** **"
# echo " *****************************************************************"
# echo ""
# quit 1
task test, "Run the test suite":
exec "nimble install -d -y"
withDir "tests":
exec "nimble test"
# (output, exitCode) = gorgeEx("gcov --version")
# if output.contains("Apple LLVM"):
# echo ""
# echo " ************************* ⚠️ WARNING ⚠️ *************************"
# echo " ** **"
# echo " ** WARNING: Using Apple's llvm-cov in place of gcov, which **"
# echo " ** emulates an old version of gcov (4.2.0) and therefore **"
# echo " ** coverage results will differ than those on CI (which **"
# echo " ** uses a much newer version of gcov). **"
# echo " ** **"
# echo " *****************************************************************"
# echo ""
# exec("nimble --verbose test --opt:speed -d:debug --verbosity:0 --hints:off --lineDir:on -d:chronicles_log_level=INFO --nimcache:nimcache --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage")
# exec("cd nimcache; rm *.c; cd ..")
# mkDir("coverage")
# exec("lcov --capture --directory nimcache --output-file coverage/coverage.info")
# exec("$(which bash) -c 'shopt -s globstar; ls $(pwd)/libp2pdht/{*,**/*}.nim'")
# exec("$(which bash) -c 'shopt -s globstar; lcov --extract coverage/coverage.info $(pwd)/libp2pdht/{*,**/*}.nim --output-file coverage/coverage.f.info'")
# echo "Generating HTML coverage report"
# exec("genhtml coverage/coverage.f.info --output-directory coverage/report")
# echo "Opening HTML coverage report in browser..."
# exec("open coverage/report/index.html")
task testPart1, "Run the test suite part 1":
exec "nimble install -d -y"
withDir "tests":
exec "nimble testPart1"
task testPart2, "Run the test suite part 2":
exec "nimble install -d -y"
withDir "tests":
exec "nimble testPart2"

View File

@ -0,0 +1,104 @@
import
std/sugar,
libp2p/crypto/[crypto, secp],
stew/[byteutils, objects, ptrops],
results
import secp256k1
const
KeyLength* = secp256k1.SkEcdhSecretSize
## Ecdh shared secret key length without leading byte
## (publicKey * privateKey).x, where length of x is 32 bytes
FullKeyLength* = KeyLength + 1
## Ecdh shared secret with leading byte 0x02 or 0x03
type
SharedSecret* = object
## Representation of ECDH shared secret, without leading `y` byte
data*: array[KeyLength, byte]
SharedSecretFull* = object
## Representation of ECDH shared secret, with leading `y` byte
## (`y` is 0x02 when (publicKey * privateKey).y is even or 0x03 when odd)
data*: array[FullKeyLength, byte]
proc fromHex*(T: type PrivateKey, data: string): Result[PrivateKey, cstring] =
let skKey = ? secp.SkPrivateKey.init(data).mapErr(e =>
("Failed to init private key from hex string: " & $e).cstring)
ok PrivateKey.init(skKey)
proc fromHex*(T: type PublicKey, data: string): Result[PublicKey, cstring] =
let skKey = ? secp.SkPublicKey.init(data).mapErr(e =>
("Failed to init public key from hex string: " & $e).cstring)
ok PublicKey.init(skKey)
proc ecdhSharedSecretHash(output: ptr byte, x32, y32: ptr byte, data: pointer): cint
{.cdecl, raises: [].} =
## Hash function used by `ecdhSharedSecret` below
##
## `x32` and `y32` are result of scalar multiplication of publicKey * privateKey.
## Both `x32` and `y32` are 32 bytes length.
##
## Take the `x32` part as ecdh shared secret.
## output length is derived from x32 length and taken from ecdh
## generic parameter `KeyLength`
copyMem(output, x32, KeyLength)
return 1
func ecdhSharedSecret(seckey: SkPrivateKey, pubkey: secp.SkPublicKey): SharedSecret =
## Compute ecdh agreed shared secret.
let res = secp256k1.ecdh[KeyLength](
secp256k1.SkSecretKey(seckey),
secp256k1.SkPublicKey(pubkey),
ecdhSharedSecretHash,
nil,
)
# This function only fail if the hash function return zero.
# Because our hash function always success, we can turn the error into defect
doAssert res.isOk, $res.error
SharedSecret(data: res.get)
proc toRaw*(pubkey: PublicKey): seq[byte] =
secp256k1.SkPublicKey(pubkey.skkey).toRaw()[1..^1]
proc ecdhSharedSecretFullHash(output: ptr byte, x32, y32: ptr byte, data: pointer): cint
{.cdecl, raises: [].} =
## Hash function used by `ecdhSharedSecretFull` below
# `x32` and `y32` are result of scalar multiplication of publicKey * privateKey.
# Leading byte is 0x02 if `y32` is even and 0x03 if odd. Then concat with `x32`.
# output length is derived from `x32` length + 1 and taken from ecdh
# generic parameter `FullKeyLength`
# output[0] = 0x02 | (y32[31] & 1)
output[] = 0x02 or (y32.offset(31)[] and 0x01)
copyMem(output.offset(1), x32, KeyLength)
return 1
func ecdhSharedSecretFull*(seckey: PrivateKey, pubkey: PublicKey): SharedSecretFull =
## Compute ecdh agreed shared secret with leading byte.
##
let res = ecdh[FullKeyLength](secp256k1.SkSecretKey(seckey.skkey),
secp256k1.SkPublicKey(pubkey.skkey),
ecdhSharedSecretFullHash, nil)
# This function only fail if the hash function return zero.
# Because our hash function always success, we can turn the error into defect
doAssert res.isOk, $res.error
SharedSecretFull(data: res.get)
proc ecdhRaw*(
priv: PrivateKey,
pub: PublicKey
): Result[SharedSecretFull, cstring] =
## emulate old ecdhRaw style keys
##
## this includes a leading 0x02 or 0x03
##
# TODO: Do we need to support non-secp256k1 schemes?
if priv.scheme != Secp256k1 or pub.scheme != Secp256k1:
return err "Must use secp256k1 scheme".cstring
ok ecdhSharedSecretFull(priv, pub)

View File

@ -1,4 +1,4 @@
# codex-dht - Codex DHT
# logos-storage-dht - Logos Storage DHT
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
@ -11,19 +11,21 @@
## https://github.com/ethereum/devp2p/blob/master/discv5/discv5-theory.md#sessions
##
{.push raises: [Defect].}
{.push raises: [].}
import
std/[hashes, net, options, sugar, tables],
stew/endians2,
bearssl/rand,
chronicles,
stew/[results, byteutils],
stew/[byteutils],
stint,
libp2p/crypto/crypto as libp2p_crypto,
libp2p/crypto/secp,
libp2p/signed_envelope,
metrics,
nimcrypto,
results,
"."/[messages, messages_encoding, node, spr, hkdf, sessions],
"."/crypto
@ -32,13 +34,16 @@ from stew/objects import checkedEnumAssign
export crypto
declareCounter discovery_session_lru_cache_hits, "Session LRU cache hits"
declareCounter discovery_session_lru_cache_misses, "Session LRU cache misses"
declareCounter discovery_session_decrypt_failures, "Session decrypt failures"
declareCounter dht_session_lru_cache_hits, "Session LRU cache hits"
declareCounter dht_session_lru_cache_misses, "Session LRU cache misses"
declareCounter dht_session_decrypt_failures, "Session decrypt failures"
logScope:
topics = "discv5"
type
cipher = aes128
const
version: uint16 = 1
idSignatureText = "discovery v5 identity proof"
@ -161,7 +166,7 @@ proc deriveKeys*(n1, n2: NodeId, priv: PrivateKey, pub: PublicKey,
ok secrets
proc encryptGCM*(key: AesKey, nonce, pt, authData: openArray[byte]): seq[byte] =
var ectx: GCM[aes128]
var ectx: GCM[cipher]
ectx.init(key, nonce, authData)
result = newSeq[byte](pt.len + gcmTagSize)
ectx.encrypt(pt, result)
@ -174,7 +179,7 @@ proc decryptGCM*(key: AesKey, nonce, ct, authData: openArray[byte]):
debug "cipher is missing tag", len = ct.len
return
var dctx: GCM[aes128]
var dctx: GCM[cipher]
dctx.init(key, nonce, authData)
var res = newSeq[byte](ct.len - gcmTagSize)
var tag: array[gcmTagSize, byte]
@ -188,7 +193,7 @@ proc decryptGCM*(key: AesKey, nonce, ct, authData: openArray[byte]):
return some(res)
proc encryptHeader*(id: NodeId, iv, header: openArray[byte]): seq[byte] =
var ectx: CTR[aes128]
var ectx: CTR[cipher]
ectx.init(id.toByteArrayBE().toOpenArray(0, 15), iv)
result = newSeq[byte](header.len)
ectx.encrypt(header, result)
@ -200,7 +205,7 @@ proc hasHandshake*(c: Codec, key: HandshakeKey): bool =
proc encodeStaticHeader*(flag: Flag, nonce: AESGCMNonce, authSize: int):
seq[byte] =
result.add(protocolId)
result.add(version.toBytesBE())
result.add(endians2.toBytesBE(version))
result.add(byte(flag))
result.add(nonce)
# TODO: assert on authSize of > 2^16?
@ -208,8 +213,9 @@ proc encodeStaticHeader*(flag: Flag, nonce: AESGCMNonce, authSize: int):
proc encodeMessagePacket*(rng: var HmacDrbgContext, c: var Codec,
toId: NodeId, toAddr: Address, message: openArray[byte]):
(seq[byte], AESGCMNonce) =
(seq[byte], AESGCMNonce, bool) =
var nonce: AESGCMNonce
var haskey: bool
hmacDrbgGenerate(rng, nonce) # Random AESGCM nonce
var iv: array[ivSize, byte]
hmacDrbgGenerate(rng, iv) # Random IV
@ -225,10 +231,11 @@ proc encodeMessagePacket*(rng: var HmacDrbgContext, c: var Codec,
# message
var messageEncrypted: seq[byte]
var initiatorKey, recipientKey: AesKey
if c.sessions.load(toId, toAddr, recipientKey, initiatorKey):
var initiatorKey, recipientKey1, recipientKey2: AesKey
if c.sessions.load(toId, toAddr, recipientKey1, recipientKey2, initiatorKey):
haskey = true
messageEncrypted = encryptGCM(initiatorKey, nonce, message, @iv & header)
discovery_session_lru_cache_hits.inc()
dht_session_lru_cache_hits.inc()
else:
# We might not have the node's keys if the handshake hasn't been performed
# yet. That's fine, we send a random-packet and we will be responded with
@ -237,10 +244,11 @@ proc encodeMessagePacket*(rng: var HmacDrbgContext, c: var Codec,
# message. 16 bytes for the gcm tag and 4 bytes for ping with requestId of
# 1 byte (e.g "01c20101"). Could increase to 27 for 8 bytes requestId in
# case this must not look like a random packet.
haskey = false
var randomData: array[gcmTagSize + 4, byte]
hmacDrbgGenerate(rng, randomData)
messageEncrypted.add(randomData)
discovery_session_lru_cache_misses.inc()
dht_session_lru_cache_misses.inc()
let maskedHeader = encryptHeader(toId, iv, header)
@ -249,7 +257,7 @@ proc encodeMessagePacket*(rng: var HmacDrbgContext, c: var Codec,
packet.add(maskedHeader)
packet.add(messageEncrypted)
return (packet, nonce)
return (packet, nonce, haskey)
proc encodeWhoareyouPacket*(rng: var HmacDrbgContext, c: var Codec,
toId: NodeId, toAddr: Address, requestNonce: AESGCMNonce, recordSeq: uint64,
@ -307,7 +315,7 @@ proc encodeHandshakePacket*(rng: var HmacDrbgContext, c: var Codec,
authdataHead.add(c.localNode.id.toByteArrayBE())
let ephKeys = ? KeyPair.random(rng)
let ephKeys = ? KeyPair.random(PKScheme.Secp256k1, rng)
.mapErr((e: CryptoError) =>
("Failed to create random key pair: " & $e).cstring)
@ -370,7 +378,7 @@ proc decodeHeader*(id: NodeId, iv, maskedHeader: openArray[byte]):
DecodeResult[(StaticHeader, seq[byte])] =
# No need to check staticHeader size as that is included in minimum packet
# size check in decodePacket
var ectx: CTR[aes128]
var ectx: CTR[cipher]
ectx.init(id.toByteArrayBE().toOpenArray(0, aesKeySize - 1), iv)
# Decrypt static-header part of the header
var staticHeader = newSeq[byte](staticHeaderSize)
@ -419,26 +427,35 @@ proc decodeMessagePacket(c: var Codec, fromAddr: Address, nonce: AESGCMNonce,
let srcId = NodeId.fromBytesBE(header.toOpenArray(staticHeaderSize,
header.high))
var initiatorKey, recipientKey: AesKey
if not c.sessions.load(srcId, fromAddr, recipientKey, initiatorKey):
var initiatorKey, recipientKey1, recipientKey2: AesKey
if not c.sessions.load(srcId, fromAddr, recipientKey1, recipientKey2, initiatorKey):
# Don't consider this an error, simply haven't done a handshake yet or
# the session got removed.
trace "Decrypting failed (no keys)"
discovery_session_lru_cache_misses.inc()
dht_session_lru_cache_misses.inc()
return ok(Packet(flag: Flag.OrdinaryMessage, requestNonce: nonce,
srcId: srcId))
discovery_session_lru_cache_hits.inc()
dht_session_lru_cache_hits.inc()
let pt = decryptGCM(recipientKey, nonce, ct, @iv & @header)
var pt = decryptGCM(recipientKey2, nonce, ct, @iv & @header)
if pt.isNone():
# Don't consider this an error, the session got probably removed at the
# peer's side and a random message is send.
trace "Decrypting failed (invalid keys)"
c.sessions.del(srcId, fromAddr)
discovery_session_decrypt_failures.inc()
return ok(Packet(flag: Flag.OrdinaryMessage, requestNonce: nonce,
srcId: srcId))
trace "Decrypting failed, trying other key"
pt = decryptGCM(recipientKey1, nonce, ct, @iv & @header)
if pt.isNone():
# Don't consider this an error, the session got probably removed at the
# peer's side and a random message is send.
# This might also be a cross-connect. Not deleting key, as it might be
# needed later, depending on message order.
trace "Decrypting failed (invalid keys)", address = fromAddr
#c.sessions.del(srcId, fromAddr)
dht_session_decrypt_failures.inc()
return ok(Packet(flag: Flag.OrdinaryMessage, requestNonce: nonce,
srcId: srcId))
# Most probably the same decryption key will work next time. We should
# elevate it's priority.
c.sessions.swapr(srcId, fromAddr)
let message = ? decodeMessage(pt.get())

View File

@ -1,4 +1,4 @@
# codex-dht - Codex DHT
# logos-storage-dht - Logos Storage DHT
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
@ -15,7 +15,7 @@
## To select the right address, a majority count is done. This is done over a
## sort of moving window as votes expire after `IpVoteTimeout`.
{.push raises: [Defect].}
{.push raises: [].}
import
std/[tables, options],

View File

@ -1,6 +1,6 @@
import std/[tables, lists, options]
{.push raises: [Defect].}
{.push raises: [].}
export tables, lists, options
@ -55,3 +55,10 @@ iterator items*[K, V](lru: LRUCache[K, V]): V =
for item in lru.list:
yield item[1]
iterator keys*[K, V](lru: LRUCache[K, V]): K =
## Get cached keys - this doesn't touch the cache
##
for item in lru.table.keys:
yield item

View File

@ -1,4 +1,4 @@
# codex-dht - Codex DHT
# logos-storage-dht - Logos Storage DHT
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
@ -10,7 +10,7 @@
## These messages get protobuf encoded, while in the spec they get RLP encoded.
##
{.push raises: [Defect].}
{.push raises: [].}
import
std/[hashes, net],

View File

@ -1,4 +1,4 @@
# codex-dht - Codex DHT
# logos-storage-dht - Logos Storage DHT
# Copyright (c) 2020-2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
@ -11,8 +11,10 @@
import
std/net,
chronicles,
stew/endians2,
libp2p/routing_record,
libp2p/signed_envelope,
libp2p/protobuf/minprotobuf,
"."/[messages, spr, node],
../../../../dht/providers_encoding
@ -98,7 +100,7 @@ proc getField*(pb: ProtoBuffer, field: int,
if not(res):
ok(false)
else:
family = uint8.fromBytesBE(buffer).IpAddressFamily
family = endians2.fromBytesBE(uint8, buffer).IpAddressFamily
ok(true)
proc write*(pb: var ProtoBuffer, field: int, family: IpAddressFamily) =
@ -324,7 +326,7 @@ proc encodeMessage*[T: SomeMessage](p: T, reqId: RequestId): seq[byte] =
pb.write(2, encoded)
pb.finish()
result.add(pb.buffer)
trace "Encoded protobuf message", typ = $T, encoded
trace "Encoded protobuf message", typ = $T
proc decodeMessage*(body: openArray[byte]): DecodeResult[Message] =
## Decodes to the specific `Message` type.

View File

@ -1,40 +1,51 @@
# codex-dht - Codex DHT
# logos-storage-dht - Logos Storage DHT
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [Defect].}
{.push raises: [].}
import
std/hashes,
std/[hashes, net],
bearssl/rand,
chronicles,
chronos,
nimcrypto,
stew/shims/net,
stint,
./crypto,
./spr
export stint
const
avgSmoothingFactor = 0.9
seenSmoothingFactor = 0.9
type
NodeId* = UInt256
Address* = object
ip*: ValidIpAddress
ip*: IpAddress
port*: Port
Stats* = object
rttMin*: float #millisec
rttAvg*: float #millisec
bwAvg*: float #bps
bwMax*: float #bps
Node* = ref object
id*: NodeId
pubkey*: PublicKey
address*: Option[Address]
record*: SignedPeerRecord
seen*: bool ## Indicates if there was at least one successful
seen*: float ## Indicates if there was at least one successful
## request-response with this node, or if the nde was verified
## through the underlying transport mechanisms.
## through the underlying transport mechanisms. After first contact
## it tracks how reliable is the communication with the node.
stats*: Stats # traffic measurements and statistics
func toNodeId*(pid: PeerId): NodeId =
## Convert public key to a node identifier.
@ -57,7 +68,7 @@ func newNode*(
id: ? pk.toNodeId(),
pubkey: pk,
record: record,
address: Address(ip: ValidIpAddress.init(ip), port: port).some)
address: Address(ip: ip, port: port).some)
ok node
@ -77,7 +88,9 @@ func newNode*(r: SignedPeerRecord): Result[Node, cstring] =
nodeId = ? pk.get().toNodeId()
if r.ip.isSome() and r.udp.isSome():
let a = Address(ip: ipv4(r.ip.get()), port: Port(r.udp.get()))
let a = Address(
ip: IpAddress(family: IPv4, address_v4: r.ip.get()), port: Port(r.udp.get())
)
ok(Node(
id: nodeId,
@ -91,7 +104,7 @@ func newNode*(r: SignedPeerRecord): Result[Node, cstring] =
record: r,
address: none(Address)))
proc update*(n: Node, pk: PrivateKey, ip: Option[ValidIpAddress],
proc update*(n: Node, pk: PrivateKey, ip: Option[IpAddress],
tcpPort, udpPort: Option[Port] = none[Port]()): Result[void, cstring] =
? n.record.update(pk, ip, tcpPort, udpPort)
@ -135,14 +148,14 @@ func shortLog*(id: NodeId): string =
result = sid
else:
result = newStringOfCap(10)
for i in 0..<2:
for i in 0..<3:
result.add(sid[i])
result.add("*")
for i in (len(sid) - 6)..sid.high:
result.add(sid[i])
chronicles.formatIt(NodeId): shortLog(it)
func hash*(ip: ValidIpAddress): Hash =
func hash*(ip: IpAddress): Hash =
case ip.family
of IpAddressFamily.IPv6: hash(ip.address_v6)
of IpAddressFamily.IPv4: hash(ip.address_v4)
@ -177,3 +190,38 @@ func shortLog*(nodes: seq[Node]): string =
result.add("]")
chronicles.formatIt(seq[Node]): shortLog(it)
func shortLog*(address: Address): string =
$address
chronicles.formatIt(Address): shortLog(it)
func registerSeen*(n:Node, seen = true) =
## Register event of seeing (getting message from) or not seeing (missing message) node
## Note: interpretation might depend on NAT type
if n.seen == 0: # first time seeing the node
n.seen = 1
else:
n.seen = seenSmoothingFactor * n.seen + (1.0 - seenSmoothingFactor) * seen.float
func alreadySeen*(n:Node) : bool =
## Was the node seen at least once?
n.seen > 0
# collecting performane metrics
func registerRtt*(n: Node, rtt: Duration) =
## register an RTT measurement
let rttMs = rtt.nanoseconds.float / 1e6
n.stats.rttMin =
if n.stats.rttMin == 0: rttMs
else: min(n.stats.rttMin, rttMs)
n.stats.rttAvg =
if n.stats.rttAvg == 0: rttMs
else: avgSmoothingFactor * n.stats.rttAvg + (1.0 - avgSmoothingFactor) * rttMs
func registerBw*(n: Node, bw: float) =
## register an bandwidth measurement
n.stats.bwMax = max(n.stats.bwMax, bw)
n.stats.bwAvg =
if n.stats.bwAvg == 0: bw
else: avgSmoothingFactor * n.stats.bwAvg + (1.0 - avgSmoothingFactor) * bw

View File

@ -1,8 +1,8 @@
{.push raises: [Defect].}
{.push raises: [].}
import
std/[sets, options],
stew/results, stew/shims/net, chronicles, chronos,
std/[net, sets, options],
results, chronicles, chronos,
"."/[node, spr, routing_table]
logScope:

View File

@ -1,4 +1,4 @@
# codex-dht - Codex DHT
# logos-storage-dht - Logos Storage DHT
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
@ -71,18 +71,18 @@
## more requests will be needed for a lookup (adding bandwidth and latency).
## This might be a concern for mobile devices.
{.push raises: [Defect].}
{.push raises: [].}
import
std/[tables, sets, options, math, sequtils, algorithm, strutils],
stew/shims/net as stewNet,
std/[net, tables, sets, options, math, sequtils, algorithm, strutils],
json_serialization/std/net,
stew/[base64, endians2, results],
stew/[base64, endians2],
pkg/[chronicles, chronicles/chronos_tools],
pkg/chronos,
pkg/stint,
pkg/bearssl/rand,
pkg/metrics
pkg/metrics,
pkg/results
import "."/[
messages,
@ -100,13 +100,13 @@ import nimcrypto except toHex
export options, results, node, spr, providers
declareCounter discovery_message_requests_outgoing,
declareCounter dht_message_requests_outgoing,
"Discovery protocol outgoing message requests", labels = ["response"]
declareCounter discovery_message_requests_incoming,
declareCounter dht_message_requests_incoming,
"Discovery protocol incoming message requests", labels = ["response"]
declareCounter discovery_unsolicited_messages,
declareCounter dht_unsolicited_messages,
"Discovery protocol unsolicited or timed-out messages"
declareCounter discovery_enr_auto_update,
declareCounter dht_enr_auto_update,
"Amount of discovery IP:port address SPR auto updates"
logScope:
@ -117,6 +117,7 @@ const
LookupRequestLimit = 3 ## Amount of distances requested in a single Findnode
## message for a lookup or query
FindNodeResultLimit = 16 ## Maximum amount of SPRs in the total Nodes messages
FindNodeFastResultLimit = 6 ## Maximum amount of SPRs in response to findNodeFast
## that will be processed
MaxNodesPerMessage = 3 ## Maximum amount of SPRs per individual Nodes message
RefreshInterval = 5.minutes ## Interval of launching a random query to
@ -125,12 +126,17 @@ const
RevalidateMax = 10000 ## Revalidation of a peer is done between min and max milliseconds.
## value in milliseconds
IpMajorityInterval = 5.minutes ## Interval for checking the latest IP:Port
DebugPrintInterval = 5.minutes ## Interval to print neighborhood with stats
## majority and updating this when SPR auto update is set.
InitialLookups = 1 ## Amount of lookups done when populating the routing table
ResponseTimeout* = 4.seconds ## timeout for the response of a request-response
ResponseTimeout* = 1.seconds ## timeout for the response of a request-response
MaxProvidersEntries* = 1_000_000 # one million records
MaxProvidersPerEntry* = 20 # providers per entry
## call
FindnodeSeenThreshold = 1.0 ## threshold used as findnode response filter
LookupSeenThreshold = 0.0 ## threshold used for lookup nodeset selection
QuerySeenThreshold = 0.0 ## threshold used for query nodeset selection
NoreplyRemoveThreshold = 0.5 ## remove node on no reply if 'seen' is below this value
func shortLog*(record: SignedPeerRecord): string =
## Returns compact string representation of ``SignedPeerRecord``.
@ -166,6 +172,7 @@ type
refreshLoop: Future[void]
revalidateLoop: Future[void]
ipMajorityLoop: Future[void]
debugPrintLoop: Future[void]
lastLookup: chronos.Moment
bootstrapRecords*: seq[SignedPeerRecord]
ipVote: IpVote
@ -182,6 +189,9 @@ type
DiscResult*[T] = Result[T, cstring]
func `$`*(p: Protocol): string =
$p.localNode.id
const
defaultDiscoveryConfig* = DiscoveryConfig(
tableIpLimits: DefaultTableIpLimits,
@ -231,7 +241,7 @@ proc randomNodes*(d: Protocol, maxAmount: int): seq[Node] =
d.routingTable.randomNodes(maxAmount)
proc randomNodes*(d: Protocol, maxAmount: int,
pred: proc(x: Node): bool {.gcsafe, noSideEffect.}): seq[Node] =
pred: proc(x: Node): bool {.gcsafe, noSideEffect, raises: [].}): seq[Node] =
## Get a `maxAmount` of random nodes from the local routing table with the
## `pred` predicate function applied as filter on the nodes selected.
d.routingTable.randomNodes(maxAmount, pred)
@ -243,14 +253,14 @@ proc randomNodes*(d: Protocol, maxAmount: int,
d.randomNodes(maxAmount, proc(x: Node): bool = x.record.contains(enrField))
proc neighbours*(d: Protocol, id: NodeId, k: int = BUCKET_SIZE,
seenOnly = false): seq[Node] =
seenThreshold = 0.0): seq[Node] =
## Return up to k neighbours (closest node ids) of the given node id.
d.routingTable.neighbours(id, k, seenOnly)
d.routingTable.neighbours(id, k, seenThreshold)
proc neighboursAtDistances*(d: Protocol, distances: seq[uint16],
k: int = BUCKET_SIZE, seenOnly = false): seq[Node] =
k: int = BUCKET_SIZE, seenThreshold = 0.0): seq[Node] =
## Return up to k neighbours (closest node ids) at given distances.
d.routingTable.neighboursAtDistances(distances, k, seenOnly)
d.routingTable.neighboursAtDistances(distances, k, seenThreshold)
proc nodesDiscovered*(d: Protocol): int = d.routingTable.len
@ -272,7 +282,7 @@ proc updateRecord*(
newSpr = spr.get()
seqNo = d.localNode.record.seqNum
info "Updated discovery SPR", uri = newSpr.toURI()
info "Updated discovery SPR", uri = newSpr.toURI(), newSpr = newSpr.data
d.localNode.record = newSpr
d.localNode.record.data.seqNo = seqNo
@ -338,7 +348,7 @@ proc handleFindNode(d: Protocol, fromId: NodeId, fromAddr: Address,
# TODO: Still deduplicate also?
if fn.distances.all(proc (x: uint16): bool = return x <= 256):
d.sendNodes(fromId, fromAddr, reqId,
d.routingTable.neighboursAtDistances(fn.distances, seenOnly = true))
d.routingTable.neighboursAtDistances(fn.distances, FindNodeResultLimit, FindnodeSeenThreshold))
else:
# At least one invalid distance, but the polite node we are, still respond
# with empty nodes.
@ -347,7 +357,7 @@ proc handleFindNode(d: Protocol, fromId: NodeId, fromAddr: Address,
proc handleFindNodeFast(d: Protocol, fromId: NodeId, fromAddr: Address,
fnf: FindNodeFastMessage, reqId: RequestId) =
d.sendNodes(fromId, fromAddr, reqId,
d.routingTable.neighbours(fnf.target, seenOnly = true))
d.routingTable.neighbours(fnf.target, FindNodeFastResultLimit, FindnodeSeenThreshold))
# TODO: if known, maybe we should add exact target even if not yet "seen"
proc handleTalkReq(d: Protocol, fromId: NodeId, fromAddr: Address,
@ -369,7 +379,7 @@ proc handleTalkReq(d: Protocol, fromId: NodeId, fromAddr: Address,
proc addProviderLocal(p: Protocol, cId: NodeId, prov: SignedPeerRecord) {.async.} =
trace "adding provider to local db", n = p.localNode, cId, prov
if (let res = (await p.providers.add(cid, prov)); res.isErr):
if (let res = (await p.providers.add(cId, prov)); res.isErr):
trace "Unable to add provider", cid, peerId = prov.data.peerId
proc handleAddProvider(
@ -403,27 +413,27 @@ proc handleMessage(d: Protocol, srcId: NodeId, fromAddr: Address,
message: Message) =
case message.kind
of ping:
discovery_message_requests_incoming.inc()
dht_message_requests_incoming.inc()
d.handlePing(srcId, fromAddr, message.ping, message.reqId)
of findNode:
discovery_message_requests_incoming.inc()
dht_message_requests_incoming.inc()
d.handleFindNode(srcId, fromAddr, message.findNode, message.reqId)
of findNodeFast:
discovery_message_requests_incoming.inc()
dht_message_requests_incoming.inc()
d.handleFindNodeFast(srcId, fromAddr, message.findNodeFast, message.reqId)
of talkReq:
discovery_message_requests_incoming.inc()
dht_message_requests_incoming.inc()
d.handleTalkReq(srcId, fromAddr, message.talkReq, message.reqId)
of addProvider:
discovery_message_requests_incoming.inc()
discovery_message_requests_incoming.inc(labelValues = ["no_response"])
dht_message_requests_incoming.inc()
dht_message_requests_incoming.inc(labelValues = ["no_response"])
d.handleAddProvider(srcId, fromAddr, message.addProvider, message.reqId)
of getProviders:
discovery_message_requests_incoming.inc()
dht_message_requests_incoming.inc()
asyncSpawn d.handleGetProviders(srcId, fromAddr, message.getProviders, message.reqId)
of regTopic, topicQuery:
discovery_message_requests_incoming.inc()
discovery_message_requests_incoming.inc(labelValues = ["no_response"])
dht_message_requests_incoming.inc()
dht_message_requests_incoming.inc(labelValues = ["no_response"])
trace "Received unimplemented message kind", kind = message.kind,
origin = fromAddr
else:
@ -431,7 +441,7 @@ proc handleMessage(d: Protocol, srcId: NodeId, fromAddr: Address,
if d.awaitedMessages.take((srcId, message.reqId), waiter):
waiter.complete(some(message))
else:
discovery_unsolicited_messages.inc()
dht_unsolicited_messages.inc()
trace "Timed out or unrequested message", kind = message.kind,
origin = fromAddr
@ -443,27 +453,50 @@ proc registerTalkProtocol*(d: Protocol, protocolId: seq[byte],
else:
ok()
proc replaceNode(d: Protocol, n: Node) =
proc replaceNode(d: Protocol, n: Node, forceRemoveBelow = 1.0) =
if n.record notin d.bootstrapRecords:
d.routingTable.replaceNode(n)
d.routingTable.replaceNode(n, forceRemoveBelow)
else:
# For now we never remove bootstrap nodes. It might make sense to actually
# do so and to retry them only in case we drop to a really low amount of
# peers in the routing table.
debug "Message request to bootstrap node failed", src=d.localNode, dst=n
proc sendRequest*[T: SomeMessage](d: Protocol, toNode: Node, m: T,
reqId: RequestId) =
doAssert(toNode.address.isSome())
let
message = encodeMessage(m, reqId)
proc waitMessage(d: Protocol, fromNode: Node, reqId: RequestId):
trace "Send message packet", dstId = toNode.id,
address = toNode.address, kind = messageKind(T)
dht_message_requests_outgoing.inc()
d.transport.sendMessage(toNode, message)
proc waitResponse*[T: SomeMessage](d: Protocol, node: Node, msg: T):
Future[Option[Message]] =
let reqId = RequestId.init(d.rng[])
result = d.waitMessage(node, reqId)
sendRequest(d, node, msg, reqId)
proc waitMessage(d: Protocol, fromNode: Node, reqId: RequestId, timeout = ResponseTimeout):
Future[Option[Message]] =
result = newFuture[Option[Message]]("waitMessage")
let res = result
let key = (fromNode.id, reqId)
sleepAsync(ResponseTimeout).addCallback() do(data: pointer):
sleepAsync(timeout).addCallback() do(data: pointer):
d.awaitedMessages.del(key)
if not res.finished:
res.complete(none(Message))
d.awaitedMessages[key] = result
proc waitNodeResponses*[T: SomeMessage](d: Protocol, node: Node, msg: T):
Future[DiscResult[seq[SignedPeerRecord]]] =
let reqId = RequestId.init(d.rng[])
result = d.waitNodes(node, reqId)
sendRequest(d, node, msg, reqId)
proc waitNodes(d: Protocol, fromNode: Node, reqId: RequestId):
Future[DiscResult[seq[SignedPeerRecord]]] {.async.} =
## Wait for one or more nodes replies.
@ -472,72 +505,70 @@ proc waitNodes(d: Protocol, fromNode: Node, reqId: RequestId):
## on that, more replies will be awaited.
## If one reply is lost here (timed out), others are ignored too.
## Same counts for out of order receival.
let startTime = Moment.now()
var op = await d.waitMessage(fromNode, reqId)
if op.isSome:
if op.get.kind == MessageKind.nodes:
var res = op.get.nodes.sprs
let total = op.get.nodes.total
let
total = op.get.nodes.total
firstTime = Moment.now()
rtt = firstTime - startTime
# trace "nodes RTT:", rtt, node = fromNode
fromNode.registerRtt(rtt)
for i in 1 ..< total:
op = await d.waitMessage(fromNode, reqId)
if op.isSome and op.get.kind == MessageKind.nodes:
res.add(op.get.nodes.sprs)
# Estimate bandwidth based on UDP packet train received, assuming these were
# released fast and spaced in time by bandwidth bottleneck. This is just a rough
# packet-pair based estimate, far from being perfect.
# TODO: get message size from lower layer for better bandwidth estimate
# TODO: get better reception timestamp from lower layers
let
deltaT = Moment.now() - firstTime
bwBps = 500.0 * 8.0 / (deltaT.nanoseconds.float / i.float / 1e9)
# trace "bw estimate:", deltaT = deltaT, i, bw_mbps = bwBps / 1e6, node = fromNode
fromNode.registerBw(bwBps)
else:
# No error on this as we received some nodes.
break
return ok(res)
else:
discovery_message_requests_outgoing.inc(labelValues = ["invalid_response"])
dht_message_requests_outgoing.inc(labelValues = ["invalid_response"])
return err("Invalid response to find node message")
else:
discovery_message_requests_outgoing.inc(labelValues = ["no_response"])
dht_message_requests_outgoing.inc(labelValues = ["no_response"])
return err("Nodes message not received in time")
proc sendRequest*[T: SomeMessage](d: Protocol, toId: NodeId, toAddr: Address, m: T):
RequestId =
let
reqId = RequestId.init(d.rng[])
message = encodeMessage(m, reqId)
trace "Send message packet", dstId = toId, toAddr, kind = messageKind(T)
discovery_message_requests_outgoing.inc()
d.transport.sendMessage(toId, toAddr, message)
return reqId
proc sendRequest*[T: SomeMessage](d: Protocol, toNode: Node, m: T):
RequestId =
doAssert(toNode.address.isSome())
let
reqId = RequestId.init(d.rng[])
message = encodeMessage(m, reqId)
trace "Send message packet", dstId = toNode.id,
address = toNode.address, kind = messageKind(T)
discovery_message_requests_outgoing.inc()
d.transport.sendMessage(toNode, message)
return reqId
proc ping*(d: Protocol, toNode: Node):
Future[DiscResult[PongMessage]] {.async.} =
## Send a discovery ping message.
##
## Returns the received pong message or an error.
let reqId = d.sendRequest(toNode,
PingMessage(sprSeq: d.localNode.record.seqNum))
let resp = await d.waitMessage(toNode, reqId)
let
msg = PingMessage(sprSeq: d.localNode.record.seqNum)
startTime = Moment.now()
resp = await d.waitResponse(toNode, msg)
rtt = Moment.now() - startTime
# trace "ping RTT:", rtt, node = toNode
toNode.registerRtt(rtt)
d.routingTable.setJustSeen(toNode, resp.isSome())
if resp.isSome():
if resp.get().kind == pong:
d.routingTable.setJustSeen(toNode)
return ok(resp.get().pong)
else:
d.replaceNode(toNode)
discovery_message_requests_outgoing.inc(labelValues = ["invalid_response"])
dht_message_requests_outgoing.inc(labelValues = ["invalid_response"])
return err("Invalid response to ping message")
else:
d.replaceNode(toNode)
discovery_message_requests_outgoing.inc(labelValues = ["no_response"])
# A ping (or the pong) was lost, what should we do? Previous implementation called
# d.replaceNode(toNode) immediately, which removed the node. This is too aggressive,
# especially if we have a temporary network outage. Although bootstrap nodes are protected
# from being removed, everything else would slowly be removed.
d.replaceNode(toNode, NoreplyRemoveThreshold)
dht_message_requests_outgoing.inc(labelValues = ["no_response"])
return err("Pong message not received in time")
proc findNode*(d: Protocol, toNode: Node, distances: seq[uint16]):
@ -546,12 +577,13 @@ proc findNode*(d: Protocol, toNode: Node, distances: seq[uint16]):
##
## Returns the received nodes or an error.
## Received SPRs are already validated and converted to `Node`.
let reqId = d.sendRequest(toNode, FindNodeMessage(distances: distances))
let nodes = await d.waitNodes(toNode, reqId)
let
msg = FindNodeMessage(distances: distances)
nodes = await d.waitNodeResponses(toNode, msg)
d.routingTable.setJustSeen(toNode, nodes.isOk)
if nodes.isOk:
let res = verifyNodesRecords(nodes.get(), toNode, FindNodeResultLimit, distances)
d.routingTable.setJustSeen(toNode)
return ok(res)
else:
trace "findNode nodes not OK."
@ -564,12 +596,13 @@ proc findNodeFast*(d: Protocol, toNode: Node, target: NodeId):
##
## Returns the received nodes or an error.
## Received SPRs are already validated and converted to `Node`.
let reqId = d.sendRequest(toNode, FindNodeFastMessage(target: target))
let nodes = await d.waitNodes(toNode, reqId)
let
msg = FindNodeFastMessage(target: target)
nodes = await d.waitNodeResponses(toNode, msg)
d.routingTable.setJustSeen(toNode, nodes.isOk)
if nodes.isOk:
let res = verifyNodesRecords(nodes.get(), toNode, FindNodeResultLimit)
d.routingTable.setJustSeen(toNode)
let res = verifyNodesRecords(nodes.get(), toNode, FindNodeFastResultLimit)
return ok(res)
else:
d.replaceNode(toNode)
@ -581,21 +614,26 @@ proc talkReq*(d: Protocol, toNode: Node, protocol, request: seq[byte]):
## Send a discovery talkreq message.
##
## Returns the received talkresp message or an error.
let reqId = d.sendRequest(toNode,
TalkReqMessage(protocol: protocol, request: request))
let resp = await d.waitMessage(toNode, reqId)
let
msg = TalkReqMessage(protocol: protocol, request: request)
startTime = Moment.now()
resp = await d.waitResponse(toNode, msg)
rtt = Moment.now() - startTime
# trace "talk RTT:", rtt, node = toNode
toNode.registerRtt(rtt)
d.routingTable.setJustSeen(toNode, resp.isSome())
if resp.isSome():
if resp.get().kind == talkResp:
d.routingTable.setJustSeen(toNode)
return ok(resp.get().talkResp.response)
else:
d.replaceNode(toNode)
discovery_message_requests_outgoing.inc(labelValues = ["invalid_response"])
dht_message_requests_outgoing.inc(labelValues = ["invalid_response"])
return err("Invalid response to talk request message")
else:
d.replaceNode(toNode)
discovery_message_requests_outgoing.inc(labelValues = ["no_response"])
# remove on loss only if there is a replacement
d.replaceNode(toNode, NoreplyRemoveThreshold)
dht_message_requests_outgoing.inc(labelValues = ["no_response"])
return err("Talk response message not received in time")
proc lookupDistances*(target, dest: NodeId): seq[uint16] =
@ -610,25 +648,18 @@ proc lookupDistances*(target, dest: NodeId): seq[uint16] =
result.add(td - uint16(i))
inc i
proc lookupWorker(d: Protocol, destNode: Node, target: NodeId):
proc lookupWorker(d: Protocol, destNode: Node, target: NodeId, fast: bool):
Future[seq[Node]] {.async.} =
let dists = lookupDistances(target, destNode.id)
# Instead of doing max `LookupRequestLimit` findNode requests, make use
# of the discv5.1 functionality to request nodes for multiple distances.
let r = await d.findNode(destNode, dists)
if r.isOk:
result.add(r[])
let r =
if fast:
await d.findNodeFast(destNode, target)
else:
# Instead of doing max `LookupRequestLimit` findNode requests, make use
# of the discv5.1 functionality to request nodes for multiple distances.
let dists = lookupDistances(target, destNode.id)
await d.findNode(destNode, dists)
# Attempt to add all nodes discovered
for n in result:
discard d.addNode(n)
proc lookupWorkerFast(d: Protocol, destNode: Node, target: NodeId):
Future[seq[Node]] {.async.} =
## use terget NodeId based find_node
let r = await d.findNodeFast(destNode, target)
if r.isOk:
result.add(r[])
@ -642,7 +673,7 @@ proc lookup*(d: Protocol, target: NodeId, fast: bool = false): Future[seq[Node]]
# `closestNodes` holds the k closest nodes to target found, sorted by distance
# Unvalidated nodes are used for requests as a form of validation.
var closestNodes = d.routingTable.neighbours(target, BUCKET_SIZE,
seenOnly = false)
LookupSeenThreshold)
var asked, seen = initHashSet[NodeId]()
asked.incl(d.localNode.id) # No need to ask our own node
@ -659,10 +690,7 @@ proc lookup*(d: Protocol, target: NodeId, fast: bool = false): Future[seq[Node]]
while i < closestNodes.len and pendingQueries.len < Alpha:
let n = closestNodes[i]
if not asked.containsOrIncl(n.id):
if fast:
pendingQueries.add(d.lookupWorkerFast(n, target))
else:
pendingQueries.add(d.lookupWorker(n, target))
pendingQueries.add(d.lookupWorker(n, target, fast))
inc i
trace "discv5 pending queries", total = pendingQueries.len
@ -707,7 +735,8 @@ proc addProvider*(
res.add(d.localNode)
for toNode in res:
if toNode != d.localNode:
discard d.sendRequest(toNode, AddProviderMessage(cId: cId, prov: pr))
let reqId = RequestId.init(d.rng[])
d.sendRequest(toNode, AddProviderMessage(cId: cId, prov: pr), reqId)
else:
asyncSpawn d.addProviderLocal(cId, pr)
@ -720,22 +749,21 @@ proc sendGetProviders(d: Protocol, toNode: Node,
trace "sendGetProviders", toNode, msg
let
reqId = d.sendRequest(toNode, msg)
resp = await d.waitMessage(toNode, reqId)
resp = await d.waitResponse(toNode, msg)
d.routingTable.setJustSeen(toNode, resp.isSome())
if resp.isSome():
if resp.get().kind == MessageKind.providers:
d.routingTable.setJustSeen(toNode)
return ok(resp.get().provs)
else:
# TODO: do we need to do something when there is an invalid response?
d.replaceNode(toNode)
discovery_message_requests_outgoing.inc(labelValues = ["invalid_response"])
dht_message_requests_outgoing.inc(labelValues = ["invalid_response"])
return err("Invalid response to GetProviders message")
else:
# TODO: do we need to do something when there is no response?
d.replaceNode(toNode)
discovery_message_requests_outgoing.inc(labelValues = ["no_response"])
# remove on loss only if there is a replacement
d.replaceNode(toNode, NoreplyRemoveThreshold)
dht_message_requests_outgoing.inc(labelValues = ["no_response"])
return err("GetProviders response message not received in time")
proc getProvidersLocal*(
@ -808,7 +836,7 @@ proc query*(d: Protocol, target: NodeId, k = BUCKET_SIZE): Future[seq[Node]]
## This will take k nodes from the routing table closest to target and
## query them for nodes closest to target. If there are less than k nodes in
## the routing table, nodes returned by the first queries will be used.
var queryBuffer = d.routingTable.neighbours(target, k, seenOnly = false)
var queryBuffer = d.routingTable.neighbours(target, k, QuerySeenThreshold)
var asked, seen = initHashSet[NodeId]()
asked.incl(d.localNode.id) # No need to ask our own node
@ -823,7 +851,7 @@ proc query*(d: Protocol, target: NodeId, k = BUCKET_SIZE): Future[seq[Node]]
while i < min(queryBuffer.len, k) and pendingQueries.len < Alpha:
let n = queryBuffer[i]
if not asked.containsOrIncl(n.id):
pendingQueries.add(d.lookupWorker(n, target))
pendingQueries.add(d.lookupWorker(n, target, false))
inc i
trace "discv5 pending queries", total = pendingQueries.len
@ -934,7 +962,8 @@ proc revalidateNode*(d: Protocol, n: Node) {.async.} =
discard d.addNode(nodes[][0])
# Get IP and port from pong message and add it to the ip votes
let a = Address(ip: ValidIpAddress.init(res.ip), port: Port(res.port))
trace "pong rx", n, myip = res.ip, myport = res.port
let a = Address(ip: res.ip, port: Port(res.port))
d.ipVote.insert(n.id, a)
proc revalidateLoop(d: Protocol) {.async.} =
@ -1004,7 +1033,7 @@ proc ipMajorityLoop(d: Protocol) {.async.} =
warn "Failed updating SPR with newly discovered external address",
majority, previous, error = res.error
else:
discovery_enr_auto_update.inc()
dht_enr_auto_update.inc()
info "Updated SPR with newly discovered external address",
majority, previous, uri = toURI(d.localNode.record)
else:
@ -1019,6 +1048,19 @@ proc ipMajorityLoop(d: Protocol) {.async.} =
trace "ipMajorityLoop canceled"
trace "ipMajorityLoop exited!"
proc debugPrintLoop(d: Protocol) {.async.} =
## Loop which prints the neighborhood with stats
while true:
await sleepAsync(DebugPrintInterval)
for b in d.routingTable.buckets:
debug "bucket", depth = b.getDepth,
len = b.nodes.len, standby = b.replacementLen
for n in b.nodes:
debug "node", n, rttMin = n.stats.rttMin.int, rttAvg = n.stats.rttAvg.int,
reliability = n.seen.round(3)
# bandwidth estimates are based on limited information, so not logging it yet to avoid confusion
# trace "node", n, bwMaxMbps = (n.stats.bwMax / 1e6).round(3), bwAvgMbps = (n.stats.bwAvg / 1e6).round(3)
func init*(
T: type DiscoveryConfig,
tableIpLimit: uint,
@ -1034,7 +1076,7 @@ func init*(
proc newProtocol*(
privKey: PrivateKey,
enrIp: Option[ValidIpAddress],
enrIp: Option[IpAddress],
enrTcpPort, enrUdpPort: Option[Port],
localEnrFields: openArray[(string, seq[byte])] = [],
bootstrapRecords: openArray[SignedPeerRecord] = [],
@ -1156,6 +1198,7 @@ proc start*(d: Protocol) {.async.} =
d.refreshLoop = refreshLoop(d)
d.revalidateLoop = revalidateLoop(d)
d.ipMajorityLoop = ipMajorityLoop(d)
d.debugPrintLoop = debugPrintLoop(d)
await d.providers.start()

View File

@ -1,16 +1,17 @@
# codex-dht - Codex DHT
# logos-storage-dht - Logos Storage DHT
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [Defect].}
{.push raises: [].}
import std/sequtils
import pkg/chronicles
import pkg/libp2p
import pkg/questionable
import ../node
import ../lru
@ -35,22 +36,21 @@ type
func add*(
self: var ProvidersCache,
id: NodeId,
provider: SignedPeerRecord) =
record: SignedPeerRecord) =
## Add providers for an id
## to the cache
if self.disable:
return
var providers =
if id notin self.cache:
Providers.init(self.maxProviders.int)
else:
self.cache.get(id).get()
without var providers =? self.cache.get(id):
providers = Providers.init(self.maxProviders.int)
let
peerId = provider.data.peerId
peerId = record.data.peerId
trace "Adding provider to cache", id, peerId
providers.put(peerId, provider)
trace "Adding provider record to cache", id, peerId
providers.put(peerId, record)
self.cache.put(id, providers)
proc get*(
@ -58,14 +58,13 @@ proc get*(
id: NodeId,
start = 0,
stop = MaxProvidersPerEntry.int): seq[SignedPeerRecord] =
## Get providers for an id
## from the cache
if self.disable:
return
if id in self.cache:
let
recs = self.cache.get(id).get
if recs =? self.cache.get(id):
let
providers = toSeq(recs)[start..<min(recs.len, stop)]
@ -74,23 +73,40 @@ proc get*(
func remove*(
self: var ProvidersCache,
id: NodeId,
peerId: PeerId) =
## Remove a provider record from an id
## from the cache
##
if self.disable:
return
if id notin self.cache:
for id in self.cache.keys:
if var providers =? self.cache.get(id):
trace "Removing provider from cache", id, peerId
providers.del(peerId)
self.cache.put(id, providers)
func remove*(
self: var ProvidersCache,
id: NodeId,
peerId: PeerId) =
## Remove a provider record from an id
## from the cache
##
if self.disable:
return
var
providers = self.cache.get(id).get()
trace "Removing provider from cache", id
providers.del(peerId)
self.cache.put(id, providers)
if var providers =? self.cache.get(id):
trace "Removing record from cache", id
providers.del(peerId)
self.cache.put(id, providers)
func drop*(self: var ProvidersCache, id: NodeId) =
## Drop all the providers for an entry
##
if self.disable:
return

View File

@ -1,11 +1,11 @@
# codex-dht - Codex DHT
# logos-storage-dht - Logos Storage DHT
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [Defect].}
{.push raises: [].}
import std/sequtils
import std/strutils

View File

@ -1,15 +1,17 @@
# codex-dht - Codex DHT
# logos-storage-dht - Logos Storage DHT
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [Defect].}
{.push raises: [].}
import std/options
import std/sequtils
from std/times import now, utc, toTime, toUnix
import pkg/stew/endians2
import pkg/chronos
import pkg/libp2p
import pkg/datastore
@ -21,16 +23,13 @@ import ./common
const
ExpiredCleanupBatch* = 1000
CleanupInterval* = 5.minutes
CleanupInterval* = 24.hours
proc cleanupExpired*(
store: Datastore,
batchSize = ExpiredCleanupBatch) {.async.} =
trace "Cleaning up expired records"
let
now = Moment.now()
let
q = Query.init(CidKey, limit = batchSize)
@ -47,11 +46,13 @@ proc cleanupExpired*(
var
keys = newSeq[Key]()
let
now = times.now().utc().toTime().toUnix()
for item in iter:
if pair =? (await item) and pair.key.isSome:
if (maybeKey, data) =? (await item) and key =? maybeKey:
let
(key, data) = (pair.key.get(), pair.data)
expired = Moment.init(uint64.fromBytesBE(data).int64, Microsecond)
expired = endians2.fromBytesBE(uint64, data).int64
if now >= expired:
trace "Found expired record", key
@ -74,7 +75,7 @@ proc cleanupOrphaned*(
trace "Cleaning up orphaned records"
let
providersQuery = Query.init(ProvidersKey, limit = batchSize)
providersQuery = Query.init(ProvidersKey, limit = batchSize, value = false)
block:
without iter =? (await store.query(providersQuery)), err:
@ -83,7 +84,7 @@ proc cleanupOrphaned*(
defer:
if not isNil(iter):
trace "Cleaning up query iterator"
trace "Cleaning up orphaned query iterator"
discard (await iter.dispose())
var count = 0
@ -92,10 +93,7 @@ proc cleanupOrphaned*(
trace "Batch cleaned up", size = batchSize
count.inc
if pair =? (await item) and pair.key.isSome:
let
key = pair.key.get()
if (maybeKey, _) =? (await item) and key =? maybeKey:
without peerId =? key.fromProvKey(), err:
trace "Error extracting parts from cid key", key
continue
@ -104,15 +102,17 @@ proc cleanupOrphaned*(
trace "Error building cid key", err = err.msg
continue
without cidIter =? (await store.query(Query.init(cidKey, limit = 1))), err:
trace "Error querying key", cidKey
without cidIter =? (await store.query(Query.init(cidKey, limit = 1, value = false))), err:
trace "Error querying key", cidKey, err = err.msg
continue
let
res = (await allFinished(toSeq(cidIter)))
.filterIt( it.completed )
.mapIt( it.read.get )
.filterIt( it.key.isSome ).len
res = block:
var count = 0
for item in cidIter:
if (key, _) =? (await item) and key.isSome:
count.inc
count
if not isNil(cidIter):
trace "Disposing cid iter"

View File

@ -1,4 +1,4 @@
# codex-dht - Codex DHT
# logos-storage-dht - Logos Storage DHT
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
@ -7,17 +7,18 @@
import std/sequtils
import std/strutils
from std/times import now, utc, toTime, toUnix
import pkg/stew/endians2
import pkg/datastore
import pkg/chronos
import pkg/libp2p
import pkg/chronicles
import pkg/stew/results as rs
import pkg/stew/byteutils
import pkg/questionable
import pkg/questionable/results
{.push raises: [Defect].}
{.push raises: [].}
import ./maintenance
import ./cache
@ -56,30 +57,30 @@ proc getProvByKey*(self: ProvidersManager, key: Key): Future[?!SignedPeerRecord]
proc add*(
self: ProvidersManager,
cid: NodeId,
id: NodeId,
provider: SignedPeerRecord,
ttl = ZeroDuration): Future[?!void] {.async.} =
let
peerId = provider.data.peerId
trace "Adding provider to persistent store", cid, peerId
trace "Adding provider to persistent store", id, peerId
without provKey =? makeProviderKey(peerId), err:
trace "Error creating key from provider record", err = err.msg
return failure err.msg
without cidKey =? makeCidKey(cid, peerId), err:
without cidKey =? makeCidKey(id, peerId), err:
trace "Error creating key from content id", err = err.msg
return failure err.msg
let
now = times.now().utc().toTime().toUnix()
expires =
if ttl > ZeroDuration:
ttl
ttl.seconds + now
else:
Moment.fromNow(self.ttl) - ZeroMoment
ttl = expires.microseconds.uint64.toBytesBE
self.ttl.seconds + now
ttl = endians2.toBytesBE(expires.uint64)
bytes: seq[byte] =
if existing =? (await self.getProvByKey(provKey)) and
@ -93,17 +94,17 @@ proc add*(
bytes
if bytes.len > 0:
trace "Adding or updating provider record", cid, peerId
trace "Adding or updating provider record", id, peerId
if err =? (await self.store.put(provKey, bytes)).errorOption:
trace "Unable to store provider with key", key = provKey, err = err.msg
trace "Adding or updating cid", cid, key = cidKey, ttl = expires.minutes
trace "Adding or updating id", id, key = cidKey, ttl = expires.seconds
if err =? (await self.store.put(cidKey, @ttl)).errorOption:
trace "Unable to store provider with key", key = cidKey, err = err.msg
return
self.cache.add(cid, provider)
trace "Provider for cid added", cidKey, provKey
self.cache.add(id, provider)
trace "Provider for id added", cidKey, provKey
return success()
proc get*(
@ -136,12 +137,10 @@ proc get*(
trace "Cleaning up query iterator"
discard (await cidIter.dispose())
var keys: seq[Key]
for item in cidIter:
# TODO: =? doesn't support tuples
if pair =? (await item) and pair.key.isSome:
let
(key, val) = (pair.key.get, pair.data)
if (maybeKey, val) =? (await item) and key =? maybeKey:
without pairs =? key.fromCidKey() and
provKey =? makeProviderKey(pairs.peerId), err:
trace "Error creating key from provider record", err = err.msg
@ -150,17 +149,24 @@ proc get*(
trace "Querying provider key", key = provKey
without data =? (await self.store.get(provKey)):
trace "Error getting provider", key = provKey
keys.add(key)
continue
without provider =? SignedPeerRecord.decode(data).mapErr(mapFailure), err:
trace "Unable to decode provider from store", err = err.msg
keys.add(key)
continue
trace "Retrieved provider with key", key = provKey
providers.add(provider)
self.cache.add(id, provider)
trace "Retrieved providers from persistent store", cid = id, len = providers.len
trace "Deleting keys without provider from store", len = keys.len
if keys.len > 0 and err =? (await self.store.delete(keys)).errorOption:
trace "Error deleting records from persistent store", err = err.msg
return failure err
trace "Retrieved providers from persistent store", id = id, len = providers.len
return success providers
proc contains*(
@ -178,8 +184,8 @@ proc contains*(self: ProvidersManager, peerId: PeerId): Future[bool] {.async.} =
return (await self.store.has(provKey)) |? false
proc contains*(self: ProvidersManager, cid: NodeId): Future[bool] {.async.} =
without cidKey =? (CidKey / $cid), err:
proc contains*(self: ProvidersManager, id: NodeId): Future[bool] {.async.} =
without cidKey =? (CidKey / $id), err:
return false
let
@ -196,15 +202,15 @@ proc contains*(self: ProvidersManager, cid: NodeId): Future[bool] {.async.} =
discard (await iter.dispose())
for item in iter:
if pair =? (await item) and pair.key.isSome:
if (key, _) =? (await item) and key.isSome:
return true
return false
proc remove*(self: ProvidersManager, cid: NodeId): Future[?!void] {.async.} =
proc remove*(self: ProvidersManager, id: NodeId): Future[?!void] {.async.} =
self.cache.drop(cid)
without cidKey =? (CidKey / $cid), err:
self.cache.drop(id)
without cidKey =? (CidKey / $id), err:
return failure(err.msg)
let
@ -224,16 +230,14 @@ proc remove*(self: ProvidersManager, cid: NodeId): Future[?!void] {.async.} =
keys: seq[Key]
for item in iter:
if pair =? (await item) and pair.key.isSome:
let
key = pair.key.get()
if (maybeKey, _) =? (await item) and key =? maybeKey:
keys.add(key)
without pairs =? key.fromCidKey, err:
trace "Unable to parse peer id from key", key
return failure err
self.cache.remove(cid, pairs.peerId)
self.cache.remove(id, pairs.peerId)
trace "Deleted record from store", key
if keys.len > 0 and err =? (await self.store.delete(keys)).errorOption:
@ -242,57 +246,60 @@ proc remove*(self: ProvidersManager, cid: NodeId): Future[?!void] {.async.} =
return success()
proc remove*(self: ProvidersManager, peerId: PeerId): Future[?!void] {.async.} =
without cidKey =? (CidKey / "*" / $peerId), err:
return failure err
proc remove*(
self: ProvidersManager,
peerId: PeerId,
entries = false): Future[?!void] {.async.} =
let
q = Query.init(cidKey)
block:
without iter =? (await self.store.query(q)), err:
trace "Unable to obtain record for key", key = cidKey
if entries:
without cidKey =? (CidKey / "*" / $peerId), err:
return failure err
defer:
if not isNil(iter):
trace "Cleaning up query iterator"
discard (await iter.dispose())
let
q = Query.init(cidKey)
var
keys: seq[Key]
block:
without iter =? (await self.store.query(q)), err:
trace "Unable to obtain record for key", key = cidKey
return failure err
for item in iter:
if pair =? (await item) and pair.key.isSome:
let
key = pair.key.get()
defer:
if not isNil(iter):
trace "Cleaning up query iterator"
discard (await iter.dispose())
keys.add(key)
var
keys: seq[Key]
let
parts = key.id.split(datastore.Separator)
for item in iter:
if (maybeKey, _) =? (await item) and key =? maybeKey:
keys.add(key)
self.cache.remove(NodeId.fromHex(parts[2]), peerId)
let
parts = key.id.split(datastore.Separator)
if keys.len > 0 and err =? (await self.store.delete(keys)).errorOption:
trace "Error deleting record from persistent store", err = err.msg
return failure err
if keys.len > 0 and err =? (await self.store.delete(keys)).errorOption:
trace "Error deleting record from persistent store", err = err.msg
return failure err
trace "Deleted records from store"
trace "Deleted records from store"
without provKey =? makeProviderKey(peerId), err:
without provKey =? peerId.makeProviderKey, err:
return failure err
trace "Removing provider from cache", peerId
self.cache.remove(peerId)
trace "Removing provider record", key = provKey
return (await self.store.delete(provKey))
proc remove*(
self: ProvidersManager,
cid: NodeId,
id: NodeId,
peerId: PeerId): Future[?!void] {.async.} =
self.cache.remove(cid, peerId)
without cidKey =? makeCidKey(cid, peerId), err:
self.cache.remove(id, peerId)
without cidKey =? makeCidKey(id, peerId), err:
trace "Error creating key from content id", err = err.msg
return failure err.msg

View File

@ -1,21 +1,26 @@
# codex-dht - Codex DHT
# logos-storage-dht - Logos Storage DHT
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [Defect].}
{.push raises: [].}
import
std/[algorithm, times, sequtils, bitops, sets, options, tables],
stint, chronicles, metrics, bearssl/rand, chronos, stew/shims/net as stewNet,
std/[algorithm, net, times, sequtils, bitops, sets, options, tables],
stint, chronicles, metrics, bearssl/rand, chronos,
"."/[node, random2, spr]
export options
declarePublicGauge routing_table_nodes,
declarePublicGauge dht_routing_table_nodes,
"Discovery routing table nodes", labels = ["state"]
declarePublicGauge dht_routing_table_buckets,
"Discovery routing table: number of buckets"
logScope:
topics = "discv5 routingtable"
type
DistanceProc* = proc(a, b: NodeId): NodeId {.raises: [Defect], gcsafe, noSideEffect.}
@ -29,7 +34,7 @@ type
IpLimits* = object
limit*: uint
ips: Table[ValidIpAddress, uint]
ips: Table[IpAddress, uint]
RoutingTable* = object
@ -96,7 +101,7 @@ type
ReplacementExisting
NoAddress
func inc*(ipLimits: var IpLimits, ip: ValidIpAddress): bool =
func inc*(ipLimits: var IpLimits, ip: IpAddress): bool =
let val = ipLimits.ips.getOrDefault(ip, 0)
if val < ipLimits.limit:
ipLimits.ips[ip] = val + 1
@ -104,7 +109,7 @@ func inc*(ipLimits: var IpLimits, ip: ValidIpAddress): bool =
else:
false
func dec*(ipLimits: var IpLimits, ip: ValidIpAddress) =
func dec*(ipLimits: var IpLimits, ip: IpAddress) =
let val = ipLimits.ips.getOrDefault(ip, 0)
if val == 1:
ipLimits.ips.del(ip)
@ -177,6 +182,8 @@ proc midpoint(k: KBucket): NodeId =
proc len(k: KBucket): int = k.nodes.len
proc replacementLen*(k: KBucket): int = k.replacementCache.len
proc tail(k: KBucket): Node = k.nodes[high(k.nodes)]
proc ipLimitInc(r: var RoutingTable, b: KBucket, n: Node): bool =
@ -205,14 +212,14 @@ proc ipLimitDec(r: var RoutingTable, b: KBucket, n: Node) =
proc add(k: KBucket, n: Node) =
k.nodes.add(n)
routing_table_nodes.inc()
dht_routing_table_nodes.inc()
proc remove(k: KBucket, n: Node): bool =
let i = k.nodes.find(n)
if i != -1:
routing_table_nodes.dec()
if k.nodes[i].seen:
routing_table_nodes.dec(labelValues = ["seen"])
dht_routing_table_nodes.dec()
if alreadySeen(k.nodes[i]):
dht_routing_table_nodes.dec(labelValues = ["seen"])
k.nodes.delete(i)
trace "removed node:", node = n
true
@ -278,11 +285,15 @@ proc computeSharedPrefixBits(nodes: openArray[NodeId]): int =
# Reaching this would mean that all node ids are equal.
doAssert(false, "Unable to calculate number of shared prefix bits")
proc getDepth*(b: KBucket) : int =
computeSharedPrefixBits(@[b.istart, b.iend])
proc init*(T: type RoutingTable, localNode: Node, bitsPerHop = DefaultBitsPerHop,
ipLimits = DefaultTableIpLimits, rng: ref HmacDrbgContext,
distanceCalculator = XorDistanceCalculator): T =
## Initialize the routing table for provided `Node` and bitsPerHop value.
## `bitsPerHop` is default set to 5 as recommended by original Kademlia paper.
dht_routing_table_buckets.inc()
RoutingTable(
localNode: localNode,
buckets: @[KBucket.new(0.u256, high(UInt256), ipLimits.bucketIpLimit)],
@ -296,6 +307,7 @@ proc splitBucket(r: var RoutingTable, index: int) =
let (a, b) = bucket.split()
r.buckets[index] = a
r.buckets.insert(b, index + 1)
dht_routing_table_buckets.inc()
proc bucketForNode(r: RoutingTable, id: NodeId): KBucket =
result = binaryGetBucketForNode(r.buckets, id)
@ -317,15 +329,12 @@ proc addReplacement(r: var RoutingTable, k: KBucket, n: Node): NodeStatus =
# gets moved to the tail.
if k.replacementCache[nodeIdx].address.get().ip != n.address.get().ip:
if not ipLimitInc(r, k, n):
trace "replace: ip limit reached"
return IpLimitReached
ipLimitDec(r, k, k.replacementCache[nodeIdx])
k.replacementCache.delete(nodeIdx)
k.replacementCache.add(n)
trace "replace: already existed"
return ReplacementExisting
elif not ipLimitInc(r, k, n):
trace "replace: ip limit reached (2)"
return IpLimitReached
else:
doAssert(k.replacementCache.len <= REPLACEMENT_CACHE_SIZE)
@ -336,7 +345,7 @@ proc addReplacement(r: var RoutingTable, k: KBucket, n: Node): NodeStatus =
k.replacementCache.delete(0)
k.replacementCache.add(n)
trace "replace: added"
debug "Node added to replacement cache", n
return ReplacementAdded
proc addNode*(r: var RoutingTable, n: Node): NodeStatus =
@ -403,42 +412,50 @@ proc addNode*(r: var RoutingTable, n: Node): NodeStatus =
return IpLimitReached
bucket.add(n)
else:
# Bucket must be full, but lets see if it should be split the bucket.
debug "Node added to routing table", n
return Added
# Calculate the prefix shared by all nodes in the bucket's range, not the
# ones actually in the bucket.
let depth = computeSharedPrefixBits(@[bucket.istart, bucket.iend])
# Split if the bucket has the local node in its range or if the depth is not
# congruent to 0 mod `bitsPerHop`
if bucket.inRange(r.localNode) or
(depth mod r.bitsPerHop != 0 and depth != ID_SIZE):
r.splitBucket(r.buckets.find(bucket))
return r.addNode(n) # retry adding
else:
# When bucket doesn't get split the node is added to the replacement cache
return r.addReplacement(bucket, n)
# Bucket must be full, but lets see if it should be split the bucket.
# Calculate the prefix shared by all nodes in the bucket's range, not the
# ones actually in the bucket.
let depth = computeSharedPrefixBits(@[bucket.istart, bucket.iend])
# Split if the bucket has the local node in its range or if the depth is not
# congruent to 0 mod `bitsPerHop`
if bucket.inRange(r.localNode) or
(depth mod r.bitsPerHop != 0 and depth != ID_SIZE):
r.splitBucket(r.buckets.find(bucket))
return r.addNode(n) # retry adding
# When bucket doesn't get split the node is added to the replacement cache
return r.addReplacement(bucket, n)
proc removeNode*(r: var RoutingTable, n: Node) =
## Remove the node `n` from the routing table.
## No replemennt added, even if there is in replacement cache.
let b = r.bucketForNode(n.id)
if b.remove(n):
ipLimitDec(r, b, n)
proc replaceNode*(r: var RoutingTable, n: Node) =
proc replaceNode*(r: var RoutingTable, n: Node, forceRemoveBelow = 1.0) =
## Replace node `n` with last entry in the replacement cache. If there are
## no entries in the replacement cache, node `n` will simply be removed.
# TODO: Kademlia paper recommends here to not remove nodes if there are no
# replacements. However, that would require a bit more complexity in the
# revalidation as you don't want to try pinging that node all the time.
## no entries in the replacement cache, node `n` will either be removed
## or kept based on `forceRemoveBelow`. Default: remove.
## Note: Kademlia paper recommends here to not remove nodes if there are no
## replacements. This might mean pinging nodes that are not reachable, but
## also avoids being too agressive because UDP losses or temporary network
## failures.
let b = r.bucketForNode(n.id)
if b.remove(n):
ipLimitDec(r, b, n)
if (b.replacementCache.len > 0 or n.seen <= forceRemoveBelow):
if b.remove(n):
debug "Node removed from routing table", n
ipLimitDec(r, b, n)
if b.replacementCache.len > 0:
# Nodes in the replacement cache are already included in the ip limits.
b.add(b.replacementCache[high(b.replacementCache)])
b.replacementCache.delete(high(b.replacementCache))
if b.replacementCache.len > 0:
# Nodes in the replacement cache are already included in the ip limits.
let rn = b.replacementCache[high(b.replacementCache)]
b.add(rn)
b.replacementCache.delete(high(b.replacementCache))
debug "Node added to routing table from replacement cache", node=rn
proc getNode*(r: RoutingTable, id: NodeId): Option[Node] =
## Get the `Node` with `id` as `NodeId` from the routing table.
@ -459,16 +476,16 @@ proc nodesByDistanceTo(r: RoutingTable, k: KBucket, id: NodeId): seq[Node] =
sortedByIt(k.nodes, r.distance(it.id, id))
proc neighbours*(r: RoutingTable, id: NodeId, k: int = BUCKET_SIZE,
seenOnly = false): seq[Node] =
seenThreshold = 0.0): seq[Node] =
## Return up to k neighbours of the given node id.
## When seenOnly is set to true, only nodes that have been contacted
## previously successfully will be selected.
## When seenThreshold is set, only nodes that have been contacted
## previously successfully and were seen enough recently will be selected.
result = newSeqOfCap[Node](k * 2)
block addNodes:
for bucket in r.bucketsByDistanceTo(id):
for n in r.nodesByDistanceTo(bucket, id):
# Only provide actively seen nodes when `seenOnly` set.
if not seenOnly or n.seen:
# Avoid nodes with 'seen' value below threshold
if n.seen >= seenThreshold:
result.add(n)
if result.len == k * 2:
break addNodes
@ -480,22 +497,22 @@ proc neighbours*(r: RoutingTable, id: NodeId, k: int = BUCKET_SIZE,
result.setLen(k)
proc neighboursAtDistance*(r: RoutingTable, distance: uint16,
k: int = BUCKET_SIZE, seenOnly = false): seq[Node] =
k: int = BUCKET_SIZE, seenThreshold = 0.0): seq[Node] =
## Return up to k neighbours at given logarithmic distance.
result = r.neighbours(r.idAtDistance(r.localNode.id, distance), k, seenOnly)
result = r.neighbours(r.idAtDistance(r.localNode.id, distance), k, seenThreshold)
# This is a bit silly, first getting closest nodes then to only keep the ones
# that are exactly the requested distance.
keepIf(result, proc(n: Node): bool = r.logDistance(n.id, r.localNode.id) == distance)
proc neighboursAtDistances*(r: RoutingTable, distances: seq[uint16],
k: int = BUCKET_SIZE, seenOnly = false): seq[Node] =
k: int = BUCKET_SIZE, seenThreshold = 0.0): seq[Node] =
## Return up to k neighbours at given logarithmic distances.
# TODO: This will currently return nodes with neighbouring distances on the
# first one prioritize. It might end up not including all the node distances
# requested. Need to rework the logic here and not use the neighbours call.
if distances.len > 0:
result = r.neighbours(r.idAtDistance(r.localNode.id, distances[0]), k,
seenOnly)
seenThreshold)
# This is a bit silly, first getting closest nodes then to only keep the ones
# that are exactly the requested distances.
keepIf(result, proc(n: Node): bool =
@ -507,23 +524,30 @@ proc len*(r: RoutingTable): int =
proc moveRight[T](arr: var openArray[T], a, b: int) =
## In `arr` move elements in range [a, b] right by 1.
var t: T
shallowCopy(t, arr[b + 1])
for i in countdown(b, a):
shallowCopy(arr[i + 1], arr[i])
shallowCopy(arr[a], t)
when declared(shallowCopy):
shallowCopy(t, arr[b + 1])
for i in countdown(b, a):
shallowCopy(arr[i + 1], arr[i])
shallowCopy(arr[a], t)
else:
t = move arr[b + 1]
for i in countdown(b, a):
arr[i + 1] = move arr[i]
arr[a] = move t
proc setJustSeen*(r: RoutingTable, n: Node) =
## Move `n` to the head (most recently seen) of its bucket.
proc setJustSeen*(r: RoutingTable, n: Node, seen = true) =
## If seen, move `n` to the head (most recently seen) of its bucket.
## If `n` is not in the routing table, do nothing.
let b = r.bucketForNode(n.id)
let idx = b.nodes.find(n)
if idx >= 0:
if idx != 0:
b.nodes.moveRight(0, idx - 1)
if seen:
let idx = b.nodes.find(n)
if idx >= 0:
if idx != 0:
b.nodes.moveRight(0, idx - 1)
if not n.seen:
b.nodes[0].seen = true
routing_table_nodes.inc(labelValues = ["seen"])
if not alreadySeen(n): # first time seeing the node
dht_routing_table_nodes.inc(labelValues = ["seen"])
n.registerSeen(seen)
proc nodeToRevalidate*(r: RoutingTable): Node =
## Return a node to revalidate. The least recently seen node from a random
@ -537,7 +561,7 @@ proc nodeToRevalidate*(r: RoutingTable): Node =
return b.nodes[^1]
proc randomNodes*(r: RoutingTable, maxAmount: int,
pred: proc(x: Node): bool {.gcsafe, noSideEffect.} = nil): seq[Node] =
pred: proc(x: Node): bool {.gcsafe, noSideEffect, raises: [].} = nil): seq[Node] =
## Get a `maxAmount` of random nodes from the routing table with the `pred`
## predicate function applied as filter on the nodes selected.
var maxAmount = maxAmount
@ -560,7 +584,8 @@ proc randomNodes*(r: RoutingTable, maxAmount: int,
# while it will take less total time compared to e.g. an (async)
# randomLookup, the time might be wasted as all nodes are possibly seen
# already.
while len(seen) < maxAmount:
# We check against the number of nodes to avoid an infinite loop in case of a filter.
while len(result) < maxAmount and len(seen) < sz:
let bucket = r.rng[].sample(r.buckets)
if bucket.nodes.len != 0:
let node = r.rng[].sample(bucket.nodes)

View File

@ -0,0 +1,88 @@
# logos-storage-dht - Logos Storage DHT
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
#
## Session cache as mentioned at
## https://github.com/ethereum/devp2p/blob/master/discv5/discv5-theory.md#session-cache
##
## A session stores encryption and decryption keys for P2P encryption.
## Since key exchange can be started both ways, and these might not get finalised with
## UDP transport, we can't be sure what encryption key will be used by the other side:
## - the one derived in the key-exchange started by us,
## - the one derived in the key-exchange started by the other node.
## To alleviate this issue, we store two decryption keys in each session.
{.push raises: [].}
import
std/[net, options],
stint, stew/endians2,
node, lru
export lru
const
aesKeySize* = 128 div 8
keySize = sizeof(NodeId) +
16 + # max size of ip address (ipv6)
2 # Sizeof port
type
AesKey* = array[aesKeySize, byte]
SessionKey* = array[keySize, byte]
SessionValue* = array[3 * sizeof(AesKey), byte]
Sessions* = LRUCache[SessionKey, SessionValue]
func makeKey(id: NodeId, address: Address): SessionKey =
var pos = 0
result[pos ..< pos+sizeof(id)] = toBytesBE(id)
pos.inc(sizeof(id))
case address.ip.family
of IpAddressFamily.IpV4:
result[pos ..< pos+sizeof(address.ip.address_v4)] = address.ip.address_v4
of IpAddressFamily.IpV6:
result[pos ..< pos+sizeof(address.ip.address_v6)] = address.ip.address_v6
pos.inc(sizeof(address.ip.address_v6))
result[pos ..< pos+sizeof(address.port)] = toBytesBE(address.port.uint16)
func swapr*(s: var Sessions, id: NodeId, address: Address) =
var value: array[3 * sizeof(AesKey), byte]
let
key = makeKey(id, address)
entry = s.get(key)
if entry.isSome():
let val = entry.get()
copyMem(addr value[0], unsafeAddr val[16], sizeof(AesKey))
copyMem(addr value[16], unsafeAddr val[0], sizeof(AesKey))
copyMem(addr value[32], unsafeAddr val[32], sizeof(AesKey))
s.put(key, value)
func store*(s: var Sessions, id: NodeId, address: Address, r, w: AesKey) =
var value: array[3 * sizeof(AesKey), byte]
let
key = makeKey(id, address)
entry = s.get(key)
if entry.isSome():
let val = entry.get()
copyMem(addr value[0], unsafeAddr val[16], sizeof(r))
value[16 .. 31] = r
value[32 .. ^1] = w
s.put(key, value)
func load*(s: var Sessions, id: NodeId, address: Address, r1, r2, w: var AesKey): bool =
let res = s.get(makeKey(id, address))
if res.isSome():
let val = res.get()
copyMem(addr r1[0], unsafeAddr val[0], sizeof(r1))
copyMem(addr r2[0], unsafeAddr val[sizeof(r1)], sizeof(r2))
copyMem(addr w[0], unsafeAddr val[sizeof(r1) + sizeof(r2)], sizeof(w))
return true
else:
return false
func del*(s: var Sessions, id: NodeId, address: Address) =
s.del(makeKey(id, address))

View File

@ -6,10 +6,10 @@
#
import
chronicles,
std/[options, strutils, sugar],
pkg/stew/[results, byteutils, arrayops],
results,
std/[net, options, strutils, sugar],
pkg/stew/[byteutils, arrayops],
stew/endians2,
stew/shims/net,
stew/base64,
libp2p/crypto/crypto,
libp2p/crypto/secp,
@ -58,7 +58,7 @@ proc incSeqNo*(
proc update*(
r: var SignedPeerRecord,
pk: crypto.PrivateKey,
ip: Option[ValidIpAddress],
ip: Option[IpAddress],
tcpPort, udpPort: Option[Port] = none[Port]()):
RecordResult[void] =
## Update a `SignedPeerRecord` with given ip address, tcp port, udp port and optional
@ -97,9 +97,8 @@ proc update*(
if udpPort.isNone and tcpPort.isNone:
return err "No existing address in SignedPeerRecord with no port provided"
let ipAddr = try: ValidIpAddress.init(ip.get)
except ValueError as e:
return err ("Existing address contains invalid address: " & $e.msg).cstring
let ipAddr = ip.get
if tcpPort.isSome:
transProto = IpTransportProtocol.tcpProtocol
transProtoPort = tcpPort.get
@ -123,9 +122,13 @@ proc update*(
.mapErr((e: string) => e.cstring)
existingIp =
if existingNetProtoFam == MultiCodec.codec("ip6"):
ipv6 array[16, byte].initCopyFrom(existingNetProtoAddr)
IpAddress(
family: IPv6, address_v6: array[16, byte].initCopyFrom(existingNetProtoAddr)
)
else:
ipv4 array[4, byte].initCopyFrom(existingNetProtoAddr)
IpAddress(
family: IPv4, address_v4: array[4, byte].initCopyFrom(existingNetProtoAddr)
)
ipAddr = ip.get(existingIp)
@ -223,7 +226,7 @@ proc init*(
T: type SignedPeerRecord,
seqNum: uint64,
pk: PrivateKey,
ip: Option[ValidIpAddress],
ip: Option[IpAddress],
tcpPort, udpPort: Option[Port]):
RecordResult[T] =
## Initialize a `SignedPeerRecord` with given sequence number, private key, optional
@ -238,9 +241,7 @@ proc init*(
tcpPort, udpPort
var
ipAddr = try: ValidIpAddress.init("127.0.0.1")
except ValueError as e:
return err ("Existing address contains invalid address: " & $e.msg).cstring
ipAddr = static parseIpAddress("127.0.0.1")
proto: IpTransportProtocol
protoPort: Port

View File

@ -0,0 +1,299 @@
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Everything below the handling of ordinary messages
import
std/[net, tables, options, sets],
bearssl/rand,
chronos,
chronicles,
metrics,
libp2p/crypto/crypto,
"."/[node, encoding, sessions]
const
handshakeTimeout* = 500.milliseconds ## timeout for the reply on the
## whoareyou message
responseTimeout* = 1.seconds ## timeout for the response of a request-response
## call
logScope:
topics = "discv5 transport"
declarePublicCounter dht_transport_tx_packets,
"Discovery transport packets sent", labels = ["state"]
declarePublicCounter dht_transport_tx_bytes,
"Discovery transport bytes sent", labels = ["state"]
declarePublicCounter dht_transport_rx_packets,
"Discovery transport packets received", labels = ["state"]
declarePublicCounter dht_transport_rx_bytes,
"Discovery transport bytes received", labels = ["state"]
type
Transport* [Client] = ref object
client: Client
bindAddress: Address ## UDP binding address
transp: DatagramTransport
pendingRequests: Table[AESGCMNonce, (PendingRequest, Moment)]
keyexchangeInProgress: HashSet[NodeId]
pendingRequestsByNode: Table[NodeId, seq[seq[byte]]]
codec*: Codec
rng: ref HmacDrbgContext
PendingRequest = object
node: Node
message: seq[byte]
proc sendToA(t: Transport, a: Address, msg: seq[byte]) =
trace "Send packet", myport = t.bindAddress.port, address = a
let ta = initTAddress(a.ip, a.port)
let f = t.transp.sendTo(ta, msg)
f.addCallback(
proc(data: pointer) =
if f.failed:
# Could be `TransportUseClosedError` in case the transport is already
# closed, or could be `TransportOsError` in case of a socket error.
# In the latter case this would probably mostly occur if the network
# interface underneath gets disconnected or similar.
# TODO: Should this kind of error be propagated upwards? Probably, but
# it should not stop the process as that would reset the discovery
# progress in case there is even a small window of no connection.
# One case that needs this error available upwards is when revalidating
# nodes. Else the revalidation might end up clearing the routing tabl
# because of ping failures due to own network connection failure.
warn "Discovery send failed", msg = f.readError.msg
dht_transport_tx_packets.inc(labelValues = ["failed"])
dht_transport_tx_bytes.inc(msg.len.int64, labelValues = ["failed"])
)
dht_transport_tx_packets.inc()
dht_transport_tx_bytes.inc(msg.len.int64)
proc send(t: Transport, n: Node, data: seq[byte]) =
doAssert(n.address.isSome())
t.sendToA(n.address.get(), data)
proc sendMessage*(t: Transport, toId: NodeId, toAddr: Address, message: seq[byte]) =
let (data, _, _) = encodeMessagePacket(t.rng[], t.codec, toId, toAddr,
message)
t.sendToA(toAddr, data)
# TODO: This could be improved to do the clean-up immediatily in case a non
# whoareyou response does arrive, but we would need to store the AuthTag
# somewhere
proc registerRequest(t: Transport, n: Node, message: seq[byte],
nonce: AESGCMNonce) =
let request = PendingRequest(node: n, message: message)
if not t.pendingRequests.hasKeyOrPut(nonce, (request, Moment.now())):
sleepAsync(responseTimeout).addCallback() do(data: pointer):
t.pendingRequests.del(nonce)
##Todo: remove dependence on message. This should be higher
proc sendMessage*(t: Transport, toNode: Node, message: seq[byte]) =
doAssert(toNode.address.isSome())
let address = toNode.address.get()
let (data, nonce, haskey) = encodeMessagePacket(t.rng[], t.codec,
toNode.id, address, message)
if haskey:
trace "Send message: has key", myport = t.bindAddress.port , dstId = toNode
t.registerRequest(toNode, message, nonce)
t.send(toNode, data)
else:
# we don't have an encryption key for this target, so we should initiate keyexchange
if not (toNode.id in t.keyexchangeInProgress):
trace "Send message: send random to trigger Whoareyou", myport = t.bindAddress.port , dstId = toNode
t.registerRequest(toNode, message, nonce)
t.send(toNode, data)
t.keyexchangeInProgress.incl(toNode.id)
trace "keyexchangeInProgress added", myport = t.bindAddress.port , dstId = toNode
sleepAsync(handshakeTimeout).addCallback() do(data: pointer):
t.keyexchangeInProgress.excl(toNode.id)
trace "keyexchangeInProgress removed (timeout)", myport = t.bindAddress.port , dstId = toNode
else:
# delay sending this message until whoareyou is received and handshake is sent
# have to reencode once keys are clear
t.pendingRequestsByNode.mgetOrPut(toNode.id, newSeq[seq[byte]]()).add(message)
trace "Send message: Node with this id already has ongoing keyexchage, delaying packet",
myport = t.bindAddress.port , dstId = toNode, qlen=t.pendingRequestsByNode[toNode.id].len
proc sendWhoareyou(t: Transport, toId: NodeId, a: Address,
requestNonce: AESGCMNonce, node: Option[Node]) =
let key = HandshakeKey(nodeId: toId, address: a)
if not t.codec.hasHandshake(key):
let
recordSeq = if node.isSome(): node.get().record.seqNum
else: 0
pubkey = if node.isSome(): some(node.get().pubkey)
else: none(PublicKey)
let data = encodeWhoareyouPacket(t.rng[], t.codec, toId, a, requestNonce,
recordSeq, pubkey)
sleepAsync(handshakeTimeout).addCallback() do(data: pointer):
# handshake key is popped in decodeHandshakePacket. if not yet popped by timeout:
if t.codec.hasHandshake(key):
debug "Handshake timeout", myport = t.bindAddress.port , dstId = toId, address = a
t.codec.handshakes.del(key)
trace "Send whoareyou", dstId = toId, address = a
t.sendToA(a, data)
else:
# TODO: is this reasonable to drop it? Should we allow a mini-queue here?
# Queue should be on sender side, as this is random encoded!
debug "Node with this id already has ongoing handshake, queuing packet", myport = t.bindAddress.port , dstId = toId, address = a
proc sendPending(t:Transport, toNode: Node):
Future[void] {.async.} =
if t.pendingRequestsByNode.hasKey(toNode.id):
trace "Found pending request", myport = t.bindAddress.port, src = toNode, len = t.pendingRequestsByNode[toNode.id].len
for message in t.pendingRequestsByNode[toNode.id]:
trace "Sending pending packet", myport = t.bindAddress.port, dstId = toNode.id
let address = toNode.address.get()
let (data, nonce, haskey) = encodeMessagePacket(t.rng[], t.codec, toNode.id, address, message)
t.registerRequest(toNode, message, nonce)
t.send(toNode, data)
t.pendingRequestsByNode.del(toNode.id)
proc receive*(t: Transport, a: Address, packet: openArray[byte]) =
dht_transport_rx_packets.inc()
dht_transport_rx_bytes.inc(packet.len.int64)
let decoded = t.codec.decodePacket(a, packet)
if decoded.isOk:
let packet = decoded[]
case packet.flag
of OrdinaryMessage:
if packet.messageOpt.isSome():
let message = packet.messageOpt.get()
trace "Received message packet", myport = t.bindAddress.port, srcId = packet.srcId, address = a,
kind = message.kind, p = $packet
t.client.handleMessage(packet.srcId, a, message)
else:
trace "Not decryptable message packet received", myport = t.bindAddress.port,
srcId = packet.srcId, address = a
# If we already have a keyexchange in progress, we have a case of simultaneous cross-connect.
# We could try to decide here which should go on, but since we are on top of UDP, a more robust
# choice is to answer here and resolve conflicts in the next stage (reception of Whoareyou), or
# even later (reception of Handshake).
if packet.srcId in t.keyexchangeInProgress:
trace "cross-connect detected, still sending Whoareyou"
t.sendWhoareyou(packet.srcId, a, packet.requestNonce,
t.client.getNode(packet.srcId))
of Flag.Whoareyou:
trace "Received whoareyou packet", myport = t.bindAddress.port, address = a
var
prt: (PendingRequest, Moment)
if t.pendingRequests.take(packet.whoareyou.requestNonce, prt):
let
pr = prt[0]
startTime = prt[1]
toNode = pr.node
rtt = Moment.now() - startTime
# trace "whoareyou RTT:", rtt, node = toNode
toNode.registerRtt(rtt)
# This is a node we previously contacted and thus must have an address.
doAssert(toNode.address.isSome())
let address = toNode.address.get()
let data = encodeHandshakePacket(
t.rng[],
t.codec,
toNode.id,
address,
pr.message,
packet.whoareyou,
toNode.pubkey
).expect("Valid handshake packet to encode")
trace "Send handshake message packet", myport = t.bindAddress.port, dstId = toNode.id, address
t.send(toNode, data)
# keyexchange ready, we can send queued packets
t.keyexchangeInProgress.excl(toNode.id)
trace "keyexchangeInProgress removed (finished)", myport = t.bindAddress.port, dstId = toNode.id, address
discard t.sendPending(toNode)
else:
debug "Timed out or unrequested whoareyou packet", address = a
of HandshakeMessage:
trace "Received handshake message packet", myport = t.bindAddress.port, srcId = packet.srcIdHs,
address = a, kind = packet.message.kind
t.client.handleMessage(packet.srcIdHs, a, packet.message)
# For a handshake message it is possible that we received an newer SPR.
# In that case we can add/update it to the routing table.
if packet.node.isSome():
let node = packet.node.get()
# Lets not add nodes without correct IP in the SPR to the routing table.
# The SPR could contain bogus IPs and although they would get removed
# on the next revalidation, one could spam these as the handshake
# message occurs on (first) incoming messages.
if node.address.isSome() and a == node.address.get():
# TODO: maybe here we could verify that the address matches what we were
# sending the 'whoareyou' message to. In that case, we can set 'seen'
# TODO: verify how this works with restrictive NAT and firewall scenarios.
node.registerSeen()
if t.client.addNode(node):
trace "Added new node to routing table after handshake", node, tablesize=t.client.nodesDiscovered()
discard t.sendPending(node)
else:
trace "address mismatch, not adding seen flag", node, address = a, nodeAddress = node.address.get()
else:
dht_transport_rx_packets.inc(labelValues = ["failed_decode"])
dht_transport_rx_bytes.inc(packet.len.int64, labelValues = ["failed_decode"])
trace "Packet decoding error", myport = t.bindAddress.port, error = decoded.error, address = a
proc processClient[T](transp: DatagramTransport, raddr: TransportAddress):
Future[void] {.async.} =
let t = getUserData[Transport[T]](transp)
# TODO: should we use `peekMessage()` to avoid allocation?
let buf = try:
transp.getMessage()
except TransportOsError as e:
# This is likely to be local network connection issues.
warn "Transport getMessage", exception = e.name, msg = e.msg
return
let ip = try: raddr.address()
except ValueError as e:
error "Not a valid IpAddress", exception = e.name, msg = e.msg
return
let a = Address(ip: ip, port: raddr.port)
t.receive(a, buf)
proc open*[T](t: Transport[T]) {.raises: [Defect, CatchableError].} =
info "Starting transport", bindAddress = t.bindAddress
# TODO allow binding to specific IP / IPv6 / etc
let ta = initTAddress(t.bindAddress.ip, t.bindAddress.port)
t.transp = newDatagramTransport(processClient[T], udata = t, local = ta)
proc close*(t: Transport) =
t.transp.close
proc closed*(t: Transport) : bool =
t.transp.closed
proc closeWait*(t: Transport) {.async.} =
await t.transp.closeWait
proc newTransport*[T](
client: T,
privKey: PrivateKey,
localNode: Node,
bindPort: Port,
bindIp = IPv4_any(),
rng = newRng()): Transport[T]=
# TODO Consider whether this should be a Defect
doAssert rng != nil, "RNG initialization failed"
Transport[T](
client: client,
bindAddress: Address(ip: bindIp, port: bindPort),
codec: Codec(
localNode: localNode,
privKey: privKey,
sessions: Sessions.init(256)),
rng: rng)

View File

@ -1,25 +1,6 @@
import std/os
const currentDir = currentSourcePath()[0 .. ^(len("config.nims") + 1)]
switch("define", "libp2p_pki_schemes=secp256k1")
task testAll, "Run DHT tests":
exec "nim c -r tests/testAll.nim"
task test, "Run DHT tests":
testAllTask()
when getEnv("NIMBUS_BUILD_SYSTEM") == "yes" and
# BEWARE
# In Nim 1.6, config files are evaluated with a working directory
# matching where the Nim command was invocated. This means that we
# must do all file existance checks with full absolute paths:
system.fileExists(currentDir & "nimbus-build-system.paths"):
echo "Using Nimbus Paths"
include "nimbus-build-system.paths"
elif fileExists("nimble.paths"):
echo "Using Nimble Paths"
# begin Nimble config (version 1)
# begin Nimble config (version 2)
when withDir(thisDir(), system.fileExists("nimble.paths")):
include "nimble.paths"
# end Nimble config
# end Nimble config

7
env.sh
View File

@ -1,7 +0,0 @@
#!/usr/bin/env bash
# We use ${BASH_SOURCE[0]} instead of $0 to allow sourcing this file
# and we fall back to a Zsh-specific special var to also support Zsh.
REL_PATH="$(dirname ${BASH_SOURCE[0]:-${(%):-%x}})"
ABS_PATH="$(cd ${REL_PATH}; pwd)"
source ${ABS_PATH}/vendor/nimbus-build-system/scripts/env.sh

View File

@ -1,30 +0,0 @@
import
std/sugar,
libp2p/crypto/[crypto, secp]
from secp256k1 import ecdhRaw, SkEcdhRawSecret, toRaw
proc fromHex*(T: type PrivateKey, data: string): Result[PrivateKey, cstring] =
let skKey = ? SkPrivateKey.init(data).mapErr(e =>
("Failed to init private key from hex string: " & $e).cstring)
ok PrivateKey.init(skKey)
proc fromHex*(T: type PublicKey, data: string): Result[PublicKey, cstring] =
let skKey = ? SkPublicKey.init(data).mapErr(e =>
("Failed to init public key from hex string: " & $e).cstring)
ok PublicKey.init(skKey)
func ecdhRaw*(seckey: SkPrivateKey, pubkey: SkPublicKey): SkEcdhRawSecret {.borrow.}
proc ecdhRaw*(
priv: PrivateKey,
pub: PublicKey): Result[SkEcdhRawSecret, cstring] =
# TODO: Do we need to support non-secp256k1 schemes?
if priv.scheme != Secp256k1 or pub.scheme != Secp256k1:
return err "Must use secp256k1 scheme".cstring
ok ecdhRaw(priv.skkey, pub.skkey)
proc toRaw*(pubkey: PublicKey): seq[byte] =
secp256k1.SkPublicKey(pubkey.skkey).toRaw()[1..^1]

View File

@ -1,62 +0,0 @@
# codex-dht - Codex DHT
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
#
## Session cache as mentioned at
## https://github.com/ethereum/devp2p/blob/master/discv5/discv5-theory.md#session-cache
##
{.push raises: [Defect].}
import
std/options,
stint, stew/endians2, stew/shims/net,
node, lru
export lru
const
aesKeySize* = 128 div 8
keySize = sizeof(NodeId) +
16 + # max size of ip address (ipv6)
2 # Sizeof port
type
AesKey* = array[aesKeySize, byte]
SessionKey* = array[keySize, byte]
SessionValue* = array[sizeof(AesKey) + sizeof(AesKey), byte]
Sessions* = LRUCache[SessionKey, SessionValue]
func makeKey(id: NodeId, address: Address): SessionKey =
var pos = 0
result[pos ..< pos+sizeof(id)] = toBytes(id)
pos.inc(sizeof(id))
case address.ip.family
of IpAddressFamily.IpV4:
result[pos ..< pos+sizeof(address.ip.address_v4)] = address.ip.address_v4
of IpAddressFamily.IpV6:
result[pos ..< pos+sizeof(address.ip.address_v6)] = address.ip.address_v6
pos.inc(sizeof(address.ip.address_v6))
result[pos ..< pos+sizeof(address.port)] = toBytes(address.port.uint16)
func store*(s: var Sessions, id: NodeId, address: Address, r, w: AesKey) =
var value: array[sizeof(r) + sizeof(w), byte]
value[0 .. 15] = r
value[16 .. ^1] = w
s.put(makeKey(id, address), value)
func load*(s: var Sessions, id: NodeId, address: Address, r, w: var AesKey): bool =
let res = s.get(makeKey(id, address))
if res.isSome():
let val = res.get()
copyMem(addr r[0], unsafeAddr val[0], sizeof(r))
copyMem(addr w[0], unsafeAddr val[sizeof(r)], sizeof(w))
return true
else:
return false
func del*(s: var Sessions, id: NodeId, address: Address) =
s.del(makeKey(id, address))

View File

@ -1,217 +0,0 @@
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Everything below the handling of ordinary messages
import
std/[tables, options],
bearssl/rand,
chronos,
chronicles,
libp2p/crypto/crypto,
stew/shims/net,
"."/[node, encoding, sessions]
const
handshakeTimeout* = 2.seconds ## timeout for the reply on the
## whoareyou message
responseTimeout* = 4.seconds ## timeout for the response of a request-response
## call
type
Transport* [Client] = ref object
client: Client
bindAddress: Address ## UDP binding address
transp: DatagramTransport
pendingRequests: Table[AESGCMNonce, PendingRequest]
codec*: Codec
rng: ref HmacDrbgContext
PendingRequest = object
node: Node
message: seq[byte]
proc sendToA(t: Transport, a: Address, data: seq[byte]) =
let ta = initTAddress(a.ip, a.port)
let f = t.transp.sendTo(ta, data)
f.callback = proc(data: pointer) {.gcsafe.} =
if f.failed:
# Could be `TransportUseClosedError` in case the transport is already
# closed, or could be `TransportOsError` in case of a socket error.
# In the latter case this would probably mostly occur if the network
# interface underneath gets disconnected or similar.
# TODO: Should this kind of error be propagated upwards? Probably, but
# it should not stop the process as that would reset the discovery
# progress in case there is even a small window of no connection.
# One case that needs this error available upwards is when revalidating
# nodes. Else the revalidation might end up clearing the routing tabl
# because of ping failures due to own network connection failure.
warn "Discovery send failed", msg = f.readError.msg
proc send(t: Transport, n: Node, data: seq[byte]) =
doAssert(n.address.isSome())
t.sendToA(n.address.get(), data)
proc sendMessage*(t: Transport, toId: NodeId, toAddr: Address, message: seq[byte]) =
let (data, _) = encodeMessagePacket(t.rng[], t.codec, toId, toAddr,
message)
t.sendToA(toAddr, data)
# TODO: This could be improved to do the clean-up immediatily in case a non
# whoareyou response does arrive, but we would need to store the AuthTag
# somewhere
proc registerRequest(t: Transport, n: Node, message: seq[byte],
nonce: AESGCMNonce) =
let request = PendingRequest(node: n, message: message)
if not t.pendingRequests.hasKeyOrPut(nonce, request):
sleepAsync(responseTimeout).addCallback() do(data: pointer):
t.pendingRequests.del(nonce)
##Todo: remove dependence on message. This should be higher
proc sendMessage*(t: Transport, toNode: Node, message: seq[byte]) =
doAssert(toNode.address.isSome())
let address = toNode.address.get()
let (data, nonce) = encodeMessagePacket(t.rng[], t.codec,
toNode.id, address, message)
t.registerRequest(toNode, message, nonce)
t.send(toNode, data)
proc sendWhoareyou(t: Transport, toId: NodeId, a: Address,
requestNonce: AESGCMNonce, node: Option[Node]) =
let key = HandshakeKey(nodeId: toId, address: a)
if not t.codec.hasHandshake(key):
let
recordSeq = if node.isSome(): node.get().record.seqNum
else: 0
pubkey = if node.isSome(): some(node.get().pubkey)
else: none(PublicKey)
let data = encodeWhoareyouPacket(t.rng[], t.codec, toId, a, requestNonce,
recordSeq, pubkey)
sleepAsync(handshakeTimeout).addCallback() do(data: pointer):
# TODO: should we still provide cancellation in case handshake completes
# correctly?
t.codec.handshakes.del(key)
trace "Send whoareyou", dstId = toId, address = a
t.sendToA(a, data)
else:
debug "Node with this id already has ongoing handshake, ignoring packet"
proc receive*(t: Transport, a: Address, packet: openArray[byte]) =
let decoded = t.codec.decodePacket(a, packet)
if decoded.isOk:
let packet = decoded[]
case packet.flag
of OrdinaryMessage:
if packet.messageOpt.isSome():
let message = packet.messageOpt.get()
trace "Received message packet", srcId = packet.srcId, address = a,
kind = message.kind, p = $packet
t.client.handleMessage(packet.srcId, a, message)
else:
trace "Not decryptable message packet received",
srcId = packet.srcId, address = a
t.sendWhoareyou(packet.srcId, a, packet.requestNonce,
t.client.getNode(packet.srcId))
of Flag.Whoareyou:
trace "Received whoareyou packet", address = a
var pr: PendingRequest
if t.pendingRequests.take(packet.whoareyou.requestNonce, pr):
let toNode = pr.node
# This is a node we previously contacted and thus must have an address.
doAssert(toNode.address.isSome())
let address = toNode.address.get()
let data = encodeHandshakePacket(
t.rng[],
t.codec,
toNode.id,
address,
pr.message,
packet.whoareyou,
toNode.pubkey
).expect("Valid handshake packet to encode")
trace "Send handshake message packet", dstId = toNode.id, address
t.send(toNode, data)
else:
debug "Timed out or unrequested whoareyou packet", address = a
of HandshakeMessage:
trace "Received handshake message packet", srcId = packet.srcIdHs,
address = a, kind = packet.message.kind
t.client.handleMessage(packet.srcIdHs, a, packet.message)
# For a handshake message it is possible that we received an newer SPR.
# In that case we can add/update it to the routing table.
if packet.node.isSome():
let node = packet.node.get()
# Lets not add nodes without correct IP in the SPR to the routing table.
# The SPR could contain bogus IPs and although they would get removed
# on the next revalidation, one could spam these as the handshake
# message occurs on (first) incoming messages.
if node.address.isSome() and a == node.address.get():
# TODO: maybe here we could verify that the address matches what we were
# sending the 'whoareyou' message to. In that case, we can set 'seen'
node.seen = true
if t.client.addNode(node):
trace "Added new node to routing table after handshake", node
else:
trace "Packet decoding error", error = decoded.error, address = a
proc processClient[T](transp: DatagramTransport, raddr: TransportAddress):
Future[void] {.async.} =
let t = getUserData[Transport[T]](transp)
# TODO: should we use `peekMessage()` to avoid allocation?
let buf = try: transp.getMessage()
except TransportOsError as e:
# This is likely to be local network connection issues.
warn "Transport getMessage", exception = e.name, msg = e.msg
return
let ip = try: raddr.address()
except ValueError as e:
error "Not a valid IpAddress", exception = e.name, msg = e.msg
return
let a = Address(ip: ValidIpAddress.init(ip), port: raddr.port)
t.receive(a, buf)
proc open*[T](t: Transport[T]) {.raises: [Defect, CatchableError].} =
info "Starting transport", bindAddress = t.bindAddress
# TODO allow binding to specific IP / IPv6 / etc
let ta = initTAddress(t.bindAddress.ip, t.bindAddress.port)
t.transp = newDatagramTransport(processClient[T], udata = t, local = ta)
proc close*(t: Transport) =
t.transp.close
proc closed*(t: Transport) : bool =
t.transp.closed
proc closeWait*(t: Transport) {.async.} =
await t.transp.closeWait
proc newTransport*[T](
client: T,
privKey: PrivateKey,
localNode: Node,
bindPort: Port,
bindIp = IPv4_any(),
rng = newRng()): Transport[T]=
# TODO Consider whether this should be a Defect
doAssert rng != nil, "RNG initialization failed"
Transport[T](
client: client,
bindAddress: Address(ip: ValidIpAddress.init(bindIp), port: bindPort),
codec: Codec(
localNode: localNode,
privKey: privKey,
sessions: Sessions.init(256)),
rng: rng)

View File

@ -1,335 +0,0 @@
{
"version": 2,
"packages": {
"nim": {
"version": "1.6.14",
"vcsRevision": "71ba2e7f3c5815d956b1ae0341b0743242b8fec6",
"url": "https://github.com/nim-lang/Nim.git",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "f9ce6fa986a4e75514fe26d4c773789b8897eb18"
}
},
"unittest2": {
"version": "0.0.2",
"vcsRevision": "02c49b8a994dd3f9eddfaab45262f9b8fa507f8e",
"url": "https://github.com/status-im/nim-unittest2.git",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "a7f3331cabb5fad0d04c93be0aad1f020f9c8033"
}
},
"stew": {
"version": "0.1.0",
"vcsRevision": "e18f5a62af2ade7a1fd1d39635d4e04d944def08",
"url": "https://github.com/status-im/nim-stew.git",
"downloadMethod": "git",
"dependencies": [
"unittest2"
],
"checksums": {
"sha1": "2a80972f66597bf87d820dca8164d89d3bb24c6d"
}
},
"nimcrypto": {
"version": "0.5.4",
"vcsRevision": "a5742a9a214ac33f91615f3862c7b099aec43b00",
"url": "https://github.com/cheatfate/nimcrypto.git",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "f76c87707cd4e96355b8bb6ef27e7f8b0aac1e08"
}
},
"secp256k1": {
"version": "0.5.2",
"vcsRevision": "5340cf188168d6afcafc8023770d880f067c0b2f",
"url": "https://github.com/status-im/nim-secp256k1.git",
"downloadMethod": "git",
"dependencies": [
"stew",
"nimcrypto"
],
"checksums": {
"sha1": "ae9cbea4487be94a06653ffee075a7f1bd1e231e"
}
},
"bearssl": {
"version": "0.1.5",
"vcsRevision": "f4c4233de453cb7eac0ce3f3ffad6496295f83ab",
"url": "https://github.com/status-im/nim-bearssl.git",
"downloadMethod": "git",
"dependencies": [
"unittest2"
],
"checksums": {
"sha1": "dabf4aaac8969fb10281ebd9ff51875d37eeaaa9"
}
},
"httputils": {
"version": "0.3.0",
"vcsRevision": "e88e231dfcef4585fe3b2fbd9b664dbd28a88040",
"url": "https://github.com/status-im/nim-http-utils.git",
"downloadMethod": "git",
"dependencies": [
"stew",
"unittest2"
],
"checksums": {
"sha1": "dd0dcef76616ad35922944671c49222c8a17fb1f"
}
},
"chronos": {
"version": "3.0.11",
"vcsRevision": "6525f4ce1d1a7eba146e5f1a53f6f105077ae686",
"url": "https://github.com/status-im/nim-chronos.git",
"downloadMethod": "git",
"dependencies": [
"stew",
"bearssl",
"httputils",
"unittest2"
],
"checksums": {
"sha1": "8cdf821ecc76fb91fdfb5191cad31f813822fcb2"
}
},
"metrics": {
"version": "0.0.1",
"vcsRevision": "743f81d4f6c6ebf0ac02389f2392ff8b4235bee5",
"url": "https://github.com/status-im/nim-metrics.git",
"downloadMethod": "git",
"dependencies": [
"chronos"
],
"checksums": {
"sha1": "6274c7ae424b871bc21ca3a6b6713971ff6a8095"
}
},
"testutils": {
"version": "0.5.0",
"vcsRevision": "dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34",
"url": "https://github.com/status-im/nim-testutils",
"downloadMethod": "git",
"dependencies": [
"unittest2"
],
"checksums": {
"sha1": "756d0757c4dd06a068f9d38c7f238576ba5ee897"
}
},
"faststreams": {
"version": "0.3.0",
"vcsRevision": "1b561a9e71b6bdad1c1cdff753418906037e9d09",
"url": "https://github.com/status-im/nim-faststreams.git",
"downloadMethod": "git",
"dependencies": [
"stew",
"testutils",
"chronos",
"unittest2"
],
"checksums": {
"sha1": "97edf9797924af48566a0af8267203dc21d80c77"
}
},
"serialization": {
"version": "0.1.0",
"vcsRevision": "493d18b8292fc03aa4f835fd825dea1183f97466",
"url": "https://github.com/status-im/nim-serialization.git",
"downloadMethod": "git",
"dependencies": [
"faststreams",
"unittest2",
"stew"
],
"checksums": {
"sha1": "893921d41eb4e90a635442f02dd17b5f90bcbb00"
}
},
"json_serialization": {
"version": "0.1.0",
"vcsRevision": "e5b18fb710c3d0167ec79f3b892f5a7a1bc6d1a4",
"url": "https://github.com/status-im/nim-json-serialization.git",
"downloadMethod": "git",
"dependencies": [
"serialization",
"stew"
],
"checksums": {
"sha1": "bdcdeefca4e2b31710a23cc817aa6abfa0d041e2"
}
},
"chronicles": {
"version": "0.10.3",
"vcsRevision": "7631f7b2ee03398cb1512a79923264e8f9410af6",
"url": "https://github.com/status-im/nim-chronicles.git",
"downloadMethod": "git",
"dependencies": [
"testutils",
"json_serialization"
],
"checksums": {
"sha1": "2b6795cc40a687d3716b617e70d96e5af361c4af"
}
},
"dnsclient": {
"version": "0.3.4",
"vcsRevision": "23214235d4784d24aceed99bbfe153379ea557c8",
"url": "https://github.com/ba0f3/dnsclient.nim",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "65262c7e533ff49d6aca5539da4bc6c6ce132f40"
}
},
"zlib": {
"version": "0.1.0",
"vcsRevision": "74cdeb54b21bededb5a515d36f608bc1850555a2",
"url": "https://github.com/status-im/nim-zlib",
"downloadMethod": "git",
"dependencies": [
"stew"
],
"checksums": {
"sha1": "01d330dc4c1924e56b1559ee73bc760e526f635c"
}
},
"websock": {
"version": "0.1.0",
"vcsRevision": "7b2ed397d6e4c37ea4df08ae82aeac7ff04cd180",
"url": "https://github.com/status-im/nim-websock.git",
"downloadMethod": "git",
"dependencies": [
"chronos",
"httputils",
"chronicles",
"stew",
"nimcrypto",
"bearssl",
"zlib"
],
"checksums": {
"sha1": "d27f126527be59f5a0dc35303cb37b82d4e2770b"
}
},
"libp2p": {
"version": "1.0.0",
"vcsRevision": "a3e9d1ed80c048cd5abc839cbe0863cefcedc702",
"url": "https://github.com/status-im/nim-libp2p.git",
"downloadMethod": "git",
"dependencies": [
"nimcrypto",
"dnsclient",
"bearssl",
"chronicles",
"chronos",
"metrics",
"secp256k1",
"stew",
"websock"
],
"checksums": {
"sha1": "65e473566f19f7f9a3529745e7181fb58d30b5ef"
}
},
"combparser": {
"version": "0.2.0",
"vcsRevision": "ba4464c005d7617c008e2ed2ebc1ba52feb469c6",
"url": "https://github.com/PMunch/combparser.git",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "a3635260961a893b88f69aac19f1b24e032a7e97"
}
},
"asynctest": {
"version": "0.3.2",
"vcsRevision": "a236a5f0f3031573ac2cb082b63dbf6e170e06e7",
"url": "https://github.com/status-im/asynctest.git",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "0ef50d086659835b0a23a4beb77cb11747695448"
}
},
"questionable": {
"version": "0.10.6",
"vcsRevision": "30e4184a99c8c1ba329925912d2c5d4b09acf8cc",
"url": "https://github.com/status-im/questionable.git",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "ca2d1e2e0be6566b4bf13261b29645721d01673d"
}
},
"upraises": {
"version": "0.1.0",
"vcsRevision": "ff4f8108e44fba9b35cac535ab63d3927e8fd3c2",
"url": "https://github.com/markspanbroek/upraises.git",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "a0243c8039e12d547dbb2e9c73789c16bb8bc956"
}
},
"sqlite3_abi": {
"version": "3.40.1.1",
"vcsRevision": "362e1bd9f689ad9f5380d9d27f0705b3d4dfc7d3",
"url": "https://github.com/arnetheduck/nim-sqlite3-abi",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "8e91db8156a82383d9c48f53b33e48f4e93077b1"
}
},
"protobuf_serialization": {
"version": "0.2.0",
"vcsRevision": "27b400fdf3bd8ce7120ca66fc1de39d3f1a5804a",
"url": "https://github.com/status-im/nim-protobuf-serialization",
"downloadMethod": "git",
"dependencies": [
"stew",
"faststreams",
"serialization",
"combparser"
],
"checksums": {
"sha1": "9c30c45b92900b425b147aeceae87bee6295dd80"
}
},
"datastore": {
"version": "0.0.1",
"vcsRevision": "0cde8aeb67c59fd0ac95496dc6b5e1168d6632aa",
"url": "https://github.com/status-im/nim-datastore",
"downloadMethod": "git",
"dependencies": [
"asynctest",
"chronos",
"questionable",
"sqlite3_abi",
"stew",
"unittest2",
"upraises"
],
"checksums": {
"sha1": "2c03bb47de97962d2a64be1ed0a8161cd9d65159"
}
},
"stint": {
"version": "0.0.1",
"vcsRevision": "036c71d06a6b22f8f967ba9d54afd2189c3872ca",
"url": "https://github.com/status-im/nim-stint",
"downloadMethod": "git",
"dependencies": [
"stew"
],
"checksums": {
"sha1": "0f187a2115315ca898e5f9a30c5e506cf6057062"
}
}
},
"tasks": {}
}

73
tests/build.nims Normal file
View File

@ -0,0 +1,73 @@
import std / [os, strutils, sequtils]
task testAll, "Run DHT tests":
exec "nim c -r test.nim"
rmFile "./test"
task compileParallelTests, "Compile parallel tests":
exec "nim c --hints:off --verbosity:0 dht/test_providers.nim"
exec "nim c --hints:off --verbosity:0 dht/test_providermngr.nim"
exec "nim c --hints:off --verbosity:0 discv5/test_discoveryv5.nim"
exec "nim c --hints:off --verbosity:0 discv5/test_discoveryv5_encoding.nim"
task test, "Run DHT tests":
# compile with trace logging to make sure it doesn't crash
exec "nim c -d:testsAll -d:chronicles_enabled=on -d:chronicles_log_level=TRACE test.nim"
rmFile "./test"
compileParallelTestsTask()
exec "nim c -r -d:testsAll --verbosity:0 testAllParallel.nim"
rmFile "./testAllParallel"
task testPart1, "Run DHT tests A":
compileParallelTestsTask()
exec "nim c -r -d:testsPart1 testAllParallel.nim"
rmFile "./testAllParallel"
task testPart2, "Run DHT tests B":
compileParallelTestsTask()
exec "nim c -r -d:testsPart2 testAllParallel.nim"
rmFile "./testAllParallel"
task coverage, "generates code coverage report":
var (output, exitCode) = gorgeEx("which lcov")
if exitCode != 0:
echo ""
echo " ************************** ⛔️ ERROR ⛔️ **************************"
echo " ** **"
echo " ** ERROR: lcov not found, it must be installed to run code **"
echo " ** coverage locally **"
echo " ** **"
echo " *****************************************************************"
echo ""
quit 1
(output, exitCode) = gorgeEx("gcov --version")
if output.contains("Apple LLVM"):
echo ""
echo " ************************* ⚠️ WARNING ⚠️ *************************"
echo " ** **"
echo " ** WARNING: Using Apple's llvm-cov in place of gcov, which **"
echo " ** emulates an old version of gcov (4.2.0) and therefore **"
echo " ** coverage results will differ than those on CI (which **"
echo " ** uses a much newer version of gcov). **"
echo " ** **"
echo " *****************************************************************"
echo ""
var nimSrcs = ""
for f in walkDirRec(".", {pcFile}):
if f.endswith(".nim"): nimSrcs.add " " & f.absolutePath.quoteShell()
echo "======== Running Tests ======== "
exec("nim c -r coverage.nim")
exec("rm nimcache/*.c")
rmDir("coverage"); mkDir("coverage")
echo " ======== Running LCOV ======== "
exec("lcov --capture --directory nimcache --output-file coverage/coverage.info")
exec("lcov --extract coverage/coverage.info --output-file coverage/coverage.f.info " & nimSrcs)
echo " ======== Generating HTML coverage report ======== "
exec("genhtml coverage/coverage.f.info --output-directory coverage/report ")
echo " ======== Opening HTML coverage report in browser... ======== "
if findExe("open") != "":
exec("open coverage/report/index.html")

View File

@ -1,20 +1,17 @@
import
std/net,
bearssl/rand,
chronos,
libp2p/crypto/[crypto, secp],
libp2p/multiaddress,
libp2pdht/discv5/[node, routing_table, spr],
libp2pdht/discv5/crypto as dhtcrypto,
libp2pdht/discv5/protocol as discv5_protocol,
stew/shims/net
export net
codexdht/discv5/[node, routing_table, spr],
codexdht/discv5/protocol as discv5_protocol
proc localAddress*(port: int): Address =
Address(ip: ValidIpAddress.init("127.0.0.1"), port: Port(port))
Address(ip: IPv4_loopback(), port: Port(port))
proc example*(T: type PrivateKey, rng: ref HmacDrbgContext): PrivateKey =
PrivateKey.random(rng[]).expect("Valid rng for private key")
PrivateKey.random(PKScheme.Secp256k1, rng[]).expect("Valid rng for private key")
proc example*(T: type NodeId, rng: ref HmacDrbgContext): NodeId =
let
@ -53,8 +50,8 @@ proc nodeIdInNodes*(id: NodeId, nodes: openArray[Node]): bool =
for n in nodes:
if id == n.id: return true
proc generateNode*(privKey: PrivateKey, port: int = 20302,
ip: ValidIpAddress = ValidIpAddress.init("127.0.0.1")): Node =
proc generateNode*(privKey: PrivateKey, port: int,
ip: IpAddress = parseIpAddress("127.0.0.1")): Node =
let
port = Port(port)
@ -67,51 +64,51 @@ proc generateNRandomNodes*(rng: ref HmacDrbgContext, n: int): seq[Node] =
for i in 1..n:
let
privKey = PrivateKey.example(rng)
node = privKey.generateNode()
node = privKey.generateNode(port = 20402 + 10*n)
res.add(node)
res
proc nodeAndPrivKeyAtDistance*(n: Node, rng: var HmacDrbgContext, d: uint32,
ip: ValidIpAddress = ValidIpAddress.init("127.0.0.1")): (Node, PrivateKey) =
ip: IpAddress = parseIpAddress("127.0.0.1")): (Node, PrivateKey) =
while true:
let
privKey = PrivateKey.random(rng).expect("Valid rng for private key")
node = privKey.generateNode(ip = ip)
node = privKey.generateNode(port = 21302 + 10*d.int, ip = ip)
if logDistance(n.id, node.id) == d:
return (node, privKey)
proc nodeAtDistance*(n: Node, rng: var HmacDrbgContext, d: uint32,
ip: ValidIpAddress = ValidIpAddress.init("127.0.0.1")): Node =
ip: IpAddress = parseIpAddress("127.0.0.1")): Node =
let (node, _) = n.nodeAndPrivKeyAtDistance(rng, d, ip)
node
proc nodesAtDistance*(
n: Node, rng: var HmacDrbgContext, d: uint32, amount: int,
ip: ValidIpAddress = ValidIpAddress.init("127.0.0.1")): seq[Node] =
ip: IpAddress = parseIpAddress("127.0.0.1")): seq[Node] =
for i in 0..<amount:
result.add(nodeAtDistance(n, rng, d, ip))
proc nodesAtDistanceUniqueIp*(
n: Node, rng: var HmacDrbgContext, d: uint32, amount: int,
ip: ValidIpAddress = ValidIpAddress.init("127.0.0.1")): seq[Node] =
ip: IpAddress = parseIpAddress("127.0.0.1")): seq[Node] =
var ta = initTAddress(ip, Port(0))
for i in 0..<amount:
ta.inc()
result.add(nodeAtDistance(n, rng, d, ValidIpAddress.init(ta.address())))
result.add(nodeAtDistance(n, rng, d, ta.address()))
proc addSeenNode*(d: discv5_protocol.Protocol, n: Node): bool =
# Add it as a seen node, warning: for testing convenience only!
n.seen = true
n.registerSeen()
d.addNode(n)
func udpExample*(_: type MultiAddress): MultiAddress =
## creates a new udp multiaddress on a random port
Multiaddress.init("/ip4/0.0.0.0/udp/0")
## creates a new udp MultiAddress on a random port
MultiAddress.init("/ip4/0.0.0.0/udp/0")
func udpExamples*(_: type MultiAddress, count: int): seq[MultiAddress] =
var res: seq[MultiAddress] = @[]
for i in 1..count:
res.add Multiaddress.init("/ip4/0.0.0.0/udp/" & $i).get
res.add MultiAddress.init("/ip4/0.0.0.0/udp/" & $i).get
return res
proc toSignedPeerRecord*(privKey: PrivateKey) : SignedPeerRecord =

View File

@ -2,15 +2,14 @@
import std/sequtils
import pkg/chronos
import pkg/asynctest
import pkg/asynctest/chronos/unittest
import pkg/datastore
import pkg/libp2p
from pkg/libp2p import PeerId
import libp2pdht/dht
import libp2pdht/private/eth/p2p/discoveryv5/spr
import libp2pdht/private/eth/p2p/discoveryv5/providers
import libp2pdht/discv5/node
import libp2pdht/private/eth/p2p/discoveryv5/lru
import codexdht/private/eth/p2p/discoveryv5/spr
import codexdht/private/eth/p2p/discoveryv5/providers
import codexdht/discv5/node
import codexdht/private/eth/p2p/discoveryv5/lru
import ./test_helper
suite "Test Providers Manager simple":
@ -101,10 +100,10 @@ suite "Test Providers Manager multiple":
not (await manager.contains(nodeIds[49]))
not (await manager.contains(nodeIds[99]))
test "Should remove by PeerId":
(await (manager.remove(providers[0].data.peerId))).tryGet
(await (manager.remove(providers[5].data.peerId))).tryGet
(await (manager.remove(providers[9].data.peerId))).tryGet
test "Should remove by PeerId with associated keys":
(await (manager.remove(providers[0].data.peerId, true))).tryGet
(await (manager.remove(providers[5].data.peerId, true))).tryGet
(await (manager.remove(providers[9].data.peerId, true))).tryGet
for id in nodeIds:
check:
@ -117,6 +116,22 @@ suite "Test Providers Manager multiple":
not (await manager.contains(providers[5].data.peerId))
not (await manager.contains(providers[9].data.peerId))
test "Should not return keys without provider":
for id in nodeIds:
check:
(await manager.get(id)).tryGet.len == 10
for provider in providers:
(await (manager.remove(provider.data.peerId))).tryGet
for id in nodeIds:
check:
(await manager.get(id)).tryGet.len == 0
for provider in providers:
check:
not (await manager.contains(provider.data.peerId))
suite "Test providers with cache":
let
rng = newRng()
@ -165,9 +180,9 @@ suite "Test providers with cache":
not (await manager.contains(nodeIds[99]))
test "Should remove by PeerId":
(await (manager.remove(providers[0].data.peerId))).tryGet
(await (manager.remove(providers[5].data.peerId))).tryGet
(await (manager.remove(providers[9].data.peerId))).tryGet
(await (manager.remove(providers[0].data.peerId, true))).tryGet
(await (manager.remove(providers[5].data.peerId, true))).tryGet
(await (manager.remove(providers[9].data.peerId, true))).tryGet
for id in nodeIds:
check:
@ -219,6 +234,24 @@ suite "Test Provider Maintenance":
for id in nodeIds:
check: (await manager.get(id)).tryGet.len == 0
test "Should not cleanup unexpired":
let
unexpired = PrivateKey.example(rng).toSignedPeerRecord()
(await manager.add(nodeIds[0], unexpired, ttl = 1.minutes)).tryGet
await sleepAsync(500.millis)
await manager.store.cleanupExpired()
let
unexpiredProvs = (await manager.get(nodeIds[0])).tryGet
check:
unexpiredProvs.len == 1
await (unexpired.data.peerId in manager)
(await manager.remove(nodeIds[0])).tryGet
test "Should cleanup orphaned":
for id in nodeIds:
check: (await manager.get(id)).tryGet.len == 0

View File

@ -10,18 +10,15 @@
{.used.}
import
std/[options, sequtils],
asynctest,
std/[options],
asynctest/chronos/unittest2,
bearssl/rand,
chronicles,
chronos,
nimcrypto,
libp2p/crypto/[crypto, secp],
libp2p/[multiaddress, multicodec, multihash, routing_record, signed_envelope],
libp2pdht/dht,
libp2pdht/discv5/crypto as dhtcrypto,
libp2pdht/discv5/protocol as discv5_protocol,
stew/byteutils,
codexdht/discv5/crypto as dhtcrypto,
codexdht/discv5/protocol as discv5_protocol,
test_helper
proc bootstrapNodes(
@ -34,7 +31,7 @@ proc bootstrapNodes(
debug "---- STARTING BOOSTRAPS ---"
for i in 0..<nodecount:
let privKey = PrivateKey.example(rng)
let node = initDiscoveryNode(rng, privKey, localAddress(20302 + i), bootnodes)
let node = initDiscoveryNode(rng, privKey, localAddress(23302 + i), bootnodes)
await node.start()
result.add((node, privKey))
if delay > 0:
@ -53,13 +50,13 @@ proc bootstrapNetwork(
bootNodeKey = PrivateKey.fromHex(
"a2b50376a79b1a8c8a3296485572bdfbf54708bb46d3c25d73d2723aaaf6a617")
.expect("Valid private key hex")
bootNodeAddr = localAddress(20301)
bootNodeAddr = localAddress(25311)
bootNode = initDiscoveryNode(rng, bootNodeKey, bootNodeAddr, @[]) # just a shortcut for new and open
#waitFor bootNode.bootstrap() # immediate, since no bootnodes are defined above
var res = await bootstrapNodes(nodecount - 1,
@[bootnode.localNode.record],
@[bootNode.localNode.record],
rng,
delay)
res.insert((bootNode, bootNodeKey), 0)
@ -125,7 +122,6 @@ suite "Providers Tests: node alone":
debug "Providers:", providers
check (providers.len == 0)
suite "Providers Tests: two nodes":
var

View File

@ -2,12 +2,12 @@
import
std/tables,
chronos, chronicles, stint, asynctest, stew/shims/net,
chronos, chronicles, stint, asynctest/chronos/unittest,
stew/byteutils, bearssl/rand,
libp2p/crypto/crypto,
libp2pdht/discv5/[transport, spr, node, routing_table, encoding, sessions, nodes_verification],
libp2pdht/discv5/crypto as dhtcrypto,
libp2pdht/discv5/protocol as discv5_protocol,
codexdht/discv5/[transport, spr, node, routing_table, encoding, sessions, nodes_verification],
codexdht/discv5/crypto as dhtcrypto,
codexdht/discv5/protocol as discv5_protocol,
../dht/test_helper
suite "Discovery v5 Tests":
@ -22,13 +22,13 @@ suite "Discovery v5 Tests":
pk = PrivateKey.example(rng)
targetPk = PrivateKey.example(rng)
node = initDiscoveryNode(rng, pk, localAddress(20302))
targetNode = targetPk.generateNode()
targetNode = targetPk.generateNode(port=26302)
check node.addNode(targetNode)
for i in 0..<1000:
let pk = PrivateKey.example(rng)
discard node.addNode(pk.generateNode())
discard node.addNode(pk.generateNode(port=27302+i))
let n = node.getNode(targetNode.id)
check n.isSome()
@ -265,7 +265,7 @@ suite "Discovery v5 Tests":
# Generate 1000 random nodes and add to our main node's routing table
for i in 0..<1000:
discard mainNode.addSeenNode(generateNode(PrivateKey.example(rng))) # for testing only!
discard mainNode.addSeenNode(generateNode(PrivateKey.example(rng), port=28302+i)) # for testing only!
let
neighbours = mainNode.neighbours(mainNode.localNode.id)
@ -287,7 +287,7 @@ suite "Discovery v5 Tests":
await mainNode.closeWait()
await testNode.closeWait()
proc testLookupTargets(fast: bool = false) {.async.} =
proc testLookupTargets(fast: bool = false): Future[bool] {.async.} =
const
nodeCount = 17
@ -306,9 +306,9 @@ suite "Discovery v5 Tests":
for t in nodes:
if n != t:
let pong = await n.ping(t.localNode)
check pong.isOk()
if pong.isErr():
echo pong.error
return false
# check (await n.ping(t.localNode)).isOk()
for i in 1 ..< nodeCount:
@ -318,16 +318,19 @@ suite "Discovery v5 Tests":
let target = nodes[i]
let discovered = await nodes[nodeCount-1].lookup(target.localNode.id, fast = fast)
debug "Lookup result", target = target.localNode, discovered
check discovered[0] == target.localNode
if discovered[0] != target.localNode:
return false
for node in nodes:
await node.closeWait()
return true
test "Lookup targets":
await testLookupTargets()
check await testLookupTargets()
test "Lookup targets using traditional findNode":
await testLookupTargets(fast = true)
check await testLookupTargets(fast = true)
test "Resolve target":
let
@ -412,31 +415,37 @@ suite "Discovery v5 Tests":
await mainNode.closeWait()
await lookupNode.closeWait()
# We no longer support field filtering
# test "Random nodes with spr field filter":
# let
# lookupNode = initDiscoveryNode(rng, PrivateKey.example(rng), localAddress(20301))
# targetNode = generateNode(PrivateKey.example(rng))
# otherNode = generateNode(PrivateKey.example(rng))
# anotherNode = generateNode(PrivateKey.example(rng))
test "Random nodes, also with filter":
let
lookupNode = initDiscoveryNode(rng, PrivateKey.example(rng), localAddress(20301))
targetNode = initDiscoveryNode(rng, PrivateKey.example(rng), localAddress(20302))
otherNode = initDiscoveryNode(rng, PrivateKey.example(rng), localAddress(20303))
anotherNode = initDiscoveryNode(rng, PrivateKey.example(rng), localAddress(20304))
# check:
# lookupNode.addNode(targetNode)
# lookupNode.addNode(otherNode)
# lookupNode.addNode(anotherNode)
check:
lookupNode.addNode(targetNode.localNode.record)
lookupNode.addNode(otherNode.localNode.record)
lookupNode.addNode(anotherNode.localNode.record)
# let discovered = lookupNode.randomNodes(10)
# check discovered.len == 3
# let discoveredFiltered = lookupNode.randomNodes(10,
# ("test", @[byte 1,2,3,4]))
# check discoveredFiltered.len == 1 and discoveredFiltered.contains(targetNode)
let discovered = lookupNode.randomNodes(10)
check discovered.len == 3
let discoveredFiltered = lookupNode.randomNodes(10,
proc(n: Node) : bool = n.address.get.port == Port(20302))
check discoveredFiltered.len == 1 and discoveredFiltered.contains(targetNode.localNode)
let discoveredEmpty = lookupNode.randomNodes(10,
proc(n: Node) : bool = n.address.get.port == Port(20305))
check discoveredEmpty.len == 0
await lookupNode.closeWait()
await targetNode.closeWait()
await otherNode.closeWait()
await anotherNode.closeWait()
# await lookupNode.closeWait()
test "New protocol with spr":
let
privKey = PrivateKey.example(rng)
ip = some(ValidIpAddress.init("127.0.0.1"))
ip = some(parseIpAddress("127.0.0.1"))
port = Port(20301)
node = newProtocol(privKey, ip, some(port), some(port), bindPort = port,
rng = rng)
@ -531,7 +540,7 @@ suite "Discovery v5 Tests":
let
port = Port(9000)
fromNoderecord = SignedPeerRecord.init(1, PrivateKey.example(rng),
some(ValidIpAddress.init("11.12.13.14")),
some(parseIpAddress("11.12.13.14")),
some(port), some(port))[]
fromNode = newNode(fromNoderecord)[]
privKey = PrivateKey.example(rng)
@ -543,7 +552,7 @@ suite "Discovery v5 Tests":
block: # Duplicates
let
record = SignedPeerRecord.init(
1, privKey, some(ValidIpAddress.init("12.13.14.15")),
1, privKey, some(parseIpAddress("12.13.14.15")),
some(port), some(port))[]
# Exact duplicates
@ -553,7 +562,7 @@ suite "Discovery v5 Tests":
# Node id duplicates
let recordSameId = SignedPeerRecord.init(
1, privKey, some(ValidIpAddress.init("212.13.14.15")),
1, privKey, some(parseIpAddress("212.13.14.15")),
some(port), some(port))[]
records.add(recordSameId)
nodes = verifyNodesRecords(records, fromNode, limit, targetDistance)
@ -562,7 +571,7 @@ suite "Discovery v5 Tests":
block: # No address
let
recordNoAddress = SignedPeerRecord.init(
1, privKey, none(ValidIpAddress), some(port), some(port))[]
1, privKey, none(IpAddress), some(port), some(port))[]
records = [recordNoAddress]
test = verifyNodesRecords(records, fromNode, limit, targetDistance)
check test.len == 0
@ -570,7 +579,7 @@ suite "Discovery v5 Tests":
block: # Invalid address - site local
let
recordInvalidAddress = SignedPeerRecord.init(
1, privKey, some(ValidIpAddress.init("10.1.2.3")),
1, privKey, some(parseIpAddress("10.1.2.3")),
some(port), some(port))[]
records = [recordInvalidAddress]
test = verifyNodesRecords(records, fromNode, limit, targetDistance)
@ -579,7 +588,7 @@ suite "Discovery v5 Tests":
block: # Invalid address - loopback
let
recordInvalidAddress = SignedPeerRecord.init(
1, privKey, some(ValidIpAddress.init("127.0.0.1")),
1, privKey, some(parseIpAddress("127.0.0.1")),
some(port), some(port))[]
records = [recordInvalidAddress]
test = verifyNodesRecords(records, fromNode, limit, targetDistance)
@ -588,7 +597,7 @@ suite "Discovery v5 Tests":
block: # Invalid distance
let
recordInvalidDistance = SignedPeerRecord.init(
1, privKey, some(ValidIpAddress.init("12.13.14.15")),
1, privKey, some(parseIpAddress("12.13.14.15")),
some(port), some(port))[]
records = [recordInvalidDistance]
test = verifyNodesRecords(records, fromNode, limit, @[0'u16])
@ -597,7 +606,7 @@ suite "Discovery v5 Tests":
block: # Invalid distance but distance validation is disabled
let
recordInvalidDistance = SignedPeerRecord.init(
1, privKey, some(ValidIpAddress.init("12.13.14.15")),
1, privKey, some(parseIpAddress("12.13.14.15")),
some(port), some(port))[]
records = [recordInvalidDistance]
test = verifyNodesRecords(records, fromNode, limit)
@ -624,12 +633,12 @@ suite "Discovery v5 Tests":
let
privKey = PrivateKey.example(rng)
enrRec = SignedPeerRecord.init(1, privKey,
some(ValidIpAddress.init("127.0.0.1")), some(Port(9000)),
some(parseIpAddress("127.0.0.1")), some(Port(9000)),
some(Port(9000))).expect("Properly intialized private key")
sendNode = newNode(enrRec).expect("Properly initialized record")
var codec = Codec(localNode: sendNode, privKey: privKey, sessions: Sessions.init(5))
let (packet, _) = encodeMessagePacket(rng[], codec,
let (packet, _, _) = encodeMessagePacket(rng[], codec,
receiveNode.localNode.id, receiveNode.localNode.address.get(), @[])
receiveNode.transport.receive(a, packet)
@ -653,13 +662,13 @@ suite "Discovery v5 Tests":
let
privKey = PrivateKey.example(rng)
enrRec = SignedPeerRecord.init(1, privKey,
some(ValidIpAddress.init("127.0.0.1")), some(Port(9000)),
some(parseIpAddress("127.0.0.1")), some(Port(9000)),
some(Port(9000))).expect("Properly intialized private key")
sendNode = newNode(enrRec).expect("Properly initialized record")
var codec = Codec(localNode: sendNode, privKey: privKey, sessions: Sessions.init(5))
for i in 0 ..< 5:
let a = localAddress(20303 + i)
let (packet, _) = encodeMessagePacket(rng[], codec,
let (packet, _, _) = encodeMessagePacket(rng[], codec,
receiveNode.localNode.id, receiveNode.localNode.address.get(), @[])
receiveNode.transport.receive(a, packet)
@ -684,14 +693,14 @@ suite "Discovery v5 Tests":
a = localAddress(20303)
privKey = PrivateKey.example(rng)
enrRec = SignedPeerRecord.init(1, privKey,
some(ValidIpAddress.init("127.0.0.1")), some(Port(9000)),
some(parseIpAddress("127.0.0.1")), some(Port(9000)),
some(Port(9000))).expect("Properly intialized private key")
sendNode = newNode(enrRec).expect("Properly initialized record")
var codec = Codec(localNode: sendNode, privKey: privKey, sessions: Sessions.init(5))
var firstRequestNonce: AESGCMNonce
for i in 0 ..< 5:
let (packet, requestNonce) = encodeMessagePacket(rng[], codec,
let (packet, requestNonce, _) = encodeMessagePacket(rng[], codec,
receiveNode.localNode.id, receiveNode.localNode.address.get(), @[])
receiveNode.transport.receive(a, packet)
if i == 0:

View File

@ -2,14 +2,13 @@
import
std/[options, sequtils, tables],
asynctest/unittest2,
asynctest/chronos/unittest2,
bearssl/rand,
chronos,
libp2p/crypto/secp,
libp2pdht/discv5/[messages, messages_encoding, encoding, spr, node, sessions],
libp2pdht/discv5/crypto,
codexdht/discv5/[messages, messages_encoding, encoding, spr, node, sessions],
codexdht/discv5/crypto,
stew/byteutils,
stew/shims/net,
stint,
../dht/test_helper
@ -275,12 +274,12 @@ suite "Discovery v5.1 Packet Encodings Test Vectors":
let
enrRecA = SignedPeerRecord.init(1, privKeyA,
some(ValidIpAddress.init("127.0.0.1")), some(Port(9000)),
some(Port(9000))).expect("Properly intialized private key")
some(parseIpAddress("127.0.0.1")), some(Port(9001)),
some(Port(9001))).expect("Properly intialized private key")
enrRecB = SignedPeerRecord.init(1, privKeyB,
some(ValidIpAddress.init("127.0.0.1")), some(Port(9000)),
some(Port(9000))).expect("Properly intialized private key")
some(parseIpAddress("127.0.0.1")), some(Port(9001)),
some(Port(9001))).expect("Properly intialized private key")
nodeA = newNode(enrRecA).expect("Properly initialized record")
nodeB = newNode(enrRecB).expect("Properly initialized record")
@ -508,12 +507,12 @@ suite "Discovery v5.1 Additional Encode/Decode":
let
enrRecA = SignedPeerRecord.init(1, privKeyA,
some(ValidIpAddress.init("127.0.0.1")), some(Port(9000)),
some(Port(9000))).expect("Properly intialized private key")
some(parseIpAddress("127.0.0.1")), some(Port(9001)),
some(Port(9001))).expect("Properly intialized private key")
enrRecB = SignedPeerRecord.init(1, privKeyB,
some(ValidIpAddress.init("127.0.0.1")), some(Port(9000)),
some(Port(9000))).expect("Properly intialized private key")
some(parseIpAddress("127.0.0.1")), some(Port(9001)),
some(Port(9001))).expect("Properly intialized private key")
nodeA = newNode(enrRecA).expect("Properly initialized record")
nodeB = newNode(enrRecB).expect("Properly initialized record")
@ -526,7 +525,7 @@ suite "Discovery v5.1 Additional Encode/Decode":
reqId = RequestId.init(rng[])
message = encodeMessage(m, reqId)
let (data, nonce) = encodeMessagePacket(rng[], codecA, nodeB.id,
let (data, nonce, _) = encodeMessagePacket(rng[], codecA, nodeB.id,
nodeB.address.get(), message)
let decoded = codecB.decodePacket(nodeA.address.get(), data)
@ -642,7 +641,7 @@ suite "Discovery v5.1 Additional Encode/Decode":
codecB.sessions.store(nodeA.id, nodeA.address.get(), secrets.initiatorKey,
secrets.recipientKey)
let (data, nonce) = encodeMessagePacket(rng[], codecA, nodeB.id,
let (data, nonce, _) = encodeMessagePacket(rng[], codecA, nodeB.id,
nodeB.address.get(), message)
let decoded = codecB.decodePacket(nodeA.address.get(), data)

6
tests/test.nim Normal file
View File

@ -0,0 +1,6 @@
import ./dht/test_providers
import ./dht/test_providermngr
import ./discv5/test_discoveryv5
import ./discv5/test_discoveryv5_encoding
{.warning[UnusedImport]: off.}

13
tests/test.nimble Normal file
View File

@ -0,0 +1,13 @@
# Package
version = "0.4.0"
author = "Status Research & Development GmbH"
description = "Tests for Logos Storage DHT"
license = "MIT"
installFiles = @["build.nims"]
# Dependencies
requires "asynctest >= 0.5.2 & < 0.6.0"
requires "unittest2 <= 0.0.9"
include "build.nims"

View File

@ -1,5 +0,0 @@
import
./dht/[test_providers, test_providermngr],
./discv5/[test_discoveryv5, test_discoveryv5_encoding]
{.warning[UnusedImport]: off.}

22
tests/testAllParallel.nim Normal file
View File

@ -0,0 +1,22 @@
# import
# ./dht/[test_providers, test_providermngr],
# ./discv5/[test_discoveryv5, test_discoveryv5_encoding]
import osproc
var cmds: seq[string]
when defined(testsPart1) or defined(testsAll):
cmds.add [
"nim c -r --hints:off --verbosity:0 dht/test_providers.nim",
"nim c -r --hints:off --verbosity:0 dht/test_providermngr.nim",
]
when defined(testsPart2) or defined(testsAll):
cmds.add [
"nim c -r --hints:off --verbosity:0 discv5/test_discoveryv5.nim",
"nim c -r --hints:off --verbosity:0 discv5/test_discoveryv5_encoding.nim",
]
echo "Running Test Commands: ", cmds
quit execProcesses(cmds)

View File

@ -1,2 +0,0 @@
deps=""
resolver="MaxVer"