Compare commits

..

No commits in common. "master" and "v0.2.1" have entirely different histories.

62 changed files with 1825 additions and 1360 deletions

View File

@ -0,0 +1,42 @@
name: Install Nimble
description: install nimble
inputs:
nimble_version:
description: "install nimble"
# TODO: make sure to change to tagged release when available
default: "latest"
os:
description: "operating system"
default: "linux"
cpu:
description: "cpu architecture"
default: "amd64"
runs:
using: "composite"
steps:
- uses: actions/checkout@v3
- name: Build Nimble
shell: bash
run: |
set -x
mkdir -p .nimble
cd .nimble
if [[ '${{ inputs.cpu }}' == 'amd64' ]]; then
CPU=x64
elif [[ '${{ inputs.cpu }}' == 'i386' ]]; then
CPU=x32
else
CPU=${{ inputs.cpu }}
fi
if [[ '${{ inputs.os }}' == 'macos' ]]; then
OS=apple
else
OS='${{ inputs.os }}'
fi
URL=https://github.com/nim-lang/nimble/releases/download/${{ inputs.nimble_version }}/nimble-"$OS"_"$CPU".tar.gz
curl -o nimble.tar.gz -L -s -S "$URL"
tar -xvf nimble.tar.gz
- name: Derive environment variables
shell: bash
run: echo '${{ github.workspace }}/.nimble/' >> $GITHUB_PATH

173
.github/workflows/ci-nimbus.yml vendored Normal file
View File

@ -0,0 +1,173 @@
name: CI-nimbus
on:
push:
paths:
- atlas.lock
- .github/workflows/ci-nimbus.yml
jobs:
build:
timeout-minutes: 90
strategy:
fail-fast: false
matrix:
target:
- os: linux
cpu: amd64
# - os: linux
# cpu: i386
- os: macos
cpu: amd64
- os: windows
cpu: amd64
#- os: windows
#cpu: i386
branch: [version-1-6]
include:
- target:
os: linux
builder: ubuntu-20.04
shell: bash
- target:
os: macos
builder: macos-12
shell: bash
- target:
os: windows
builder: windows-2019
shell: msys2 {0}
defaults:
run:
shell: ${{ matrix.shell }}
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
runs-on: ${{ matrix.builder }}
continue-on-error: ${{ matrix.branch == 'version-1-6' || matrix.branch == 'devel' }}
steps:
- name: Checkout
uses: actions/checkout@v2
with:
submodules: true
- name: Install build dependencies (Linux i386)
if: runner.os == 'Linux' && matrix.target.cpu == 'i386'
run: |
sudo dpkg --add-architecture i386
sudo apt-get update -qq
sudo DEBIAN_FRONTEND='noninteractive' apt-get install \
--no-install-recommends -yq gcc-multilib g++-multilib \
libssl-dev:i386
mkdir -p external/bin
cat << EOF > external/bin/gcc
#!/bin/bash
exec $(which gcc) -m32 "\$@"
EOF
cat << EOF > external/bin/g++
#!/bin/bash
exec $(which g++) -m32 "\$@"
EOF
chmod 755 external/bin/gcc external/bin/g++
echo '${{ github.workspace }}/external/bin' >> $GITHUB_PATH
- name: MSYS2 (Windows i386)
if: runner.os == 'Windows' && matrix.target.cpu == 'i386'
uses: msys2/setup-msys2@v2
with:
path-type: inherit
msystem: MINGW32
install: >-
base-devel
git
mingw-w64-i686-toolchain
- name: MSYS2 (Windows amd64)
if: runner.os == 'Windows' && matrix.target.cpu == 'amd64'
uses: msys2/setup-msys2@v2
with:
path-type: inherit
install: >-
base-devel
git
mingw-w64-x86_64-toolchain
- name: Restore Nim DLLs dependencies (Windows) from cache
if: runner.os == 'Windows'
id: windows-dlls-cache
uses: actions/cache@v2
with:
path: external/dlls
key: 'dlls'
- name: Install DLL dependencies (Windows)
if: >
steps.windows-dlls-cache.outputs.cache-hit != 'true' &&
runner.os == 'Windows'
run: |
mkdir external
curl -L "https://nim-lang.org/download/windeps.zip" -o external/windeps.zip
7z x external/windeps.zip -oexternal/dlls
- name: Path to cached dependencies (Windows)
if: >
runner.os == 'Windows'
run: |
echo '${{ github.workspace }}'"/external/dlls" >> $GITHUB_PATH
- name: Derive environment variables
run: |
if [[ '${{ matrix.target.cpu }}' == 'amd64' ]]; then
PLATFORM=x64
else
PLATFORM=x86
fi
echo "PLATFORM=$PLATFORM" >> $GITHUB_ENV
ncpu=
MAKE_CMD="make"
case '${{ runner.os }}' in
'Linux')
ncpu=$(nproc)
;;
'macOS')
ncpu=$(sysctl -n hw.ncpu)
;;
'Windows')
ncpu=$NUMBER_OF_PROCESSORS
MAKE_CMD="mingw32-make"
;;
esac
[[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1
echo "ncpu=$ncpu" >> $GITHUB_ENV
echo "MAKE_CMD=${MAKE_CMD}" >> $GITHUB_ENV
- uses: jiro4989/setup-nim-action@v1
with:
nim-version: 1.6.14
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Restore Nim toolchain binaries from cache
id: nim-cache
uses: actions/cache@v3
with:
path: NimBinaries
key: ${{ matrix.target.os }}-${{ matrix.target.cpu }}-nim-${{ hashFiles('atlas.lock') }}
- name: Restore Vendor Clones from cache
id: vendor-cache
uses: actions/cache@v3
with:
path: vendor/*/
key: ${{ matrix.target.os }}-${{ matrix.target.cpu }}-vendor-${{ hashFiles('atlas.lock') }}
- name: Run tests
run: |
if [[ "${{ matrix.target.os }}" == "windows" ]]; then
# https://github.com/status-im/nimbus-eth2/issues/3121
export NIMFLAGS="-d:nimRawSetjmp"
fi
echo "BUILD: "
export NIM_COMMIT=${{ matrix.branch }}
make -j${ncpu} CI_CACHE=NimBinaries ARCH_OVERRIDE=${PLATFORM} QUICK_AND_DIRTY_COMPILER=1
make test -j${ncpu}

View File

@ -1,22 +1,170 @@
name: CI
on: [push, pull_request]
on:
push:
branches:
- main
pull_request:
workflow_dispatch:
jobs:
test:
runs-on: ${{ matrix.os }}
build:
timeout-minutes: 90
strategy:
fail-fast: false
matrix:
nim: [2.2.4]
os: [ubuntu-latest, macos-latest, windows-latest]
target:
- os: linux
cpu: amd64
- os: macos
cpu: amd64
- os: windows
cpu: amd64
#- os: windows
#cpu: i386
branch: [version-1-6]
include:
- target:
os: linux
builder: ubuntu-20.04
shell: bash
- target:
os: macos
builder: macos-12
shell: bash
- target:
os: windows
builder: windows-2019
shell: msys2 {0}
defaults:
run:
shell: ${{ matrix.shell }}
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
runs-on: ${{ matrix.builder }}
continue-on-error: ${{ matrix.branch == 'version-1-6' || matrix.branch == 'devel' }}
steps:
- name: Checkout
uses: actions/checkout@v4
- uses: jiro4989/setup-nim-action@v2
with:
nim-version: ${{matrix.nim}}
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Build
run: nimble install -y
- name: Test
run: nimble test -y
- name: Checkout
uses: actions/checkout@v2
with:
submodules: true
- name: Install build dependencies (Linux i386)
if: runner.os == 'Linux' && matrix.target.cpu == 'i386'
run: |
sudo dpkg --add-architecture i386
sudo apt-get update -qq
sudo DEBIAN_FRONTEND='noninteractive' apt-get install \
--no-install-recommends -yq gcc-multilib g++-multilib \
libssl-dev:i386
mkdir -p external/bin
cat << EOF > external/bin/gcc
#!/bin/bash
exec $(which gcc) -m32 "\$@"
EOF
cat << EOF > external/bin/g++
#!/bin/bash
exec $(which g++) -m32 "\$@"
EOF
chmod 755 external/bin/gcc external/bin/g++
echo '${{ github.workspace }}/external/bin' >> $GITHUB_PATH
- name: MSYS2 (Windows i386)
if: runner.os == 'Windows' && matrix.target.cpu == 'i386'
uses: msys2/setup-msys2@v2
with:
path-type: inherit
msystem: MINGW32
install: >-
base-devel
git
mingw-w64-i686-toolchain
- name: MSYS2 (Windows amd64)
if: runner.os == 'Windows' && matrix.target.cpu == 'amd64'
uses: msys2/setup-msys2@v2
with:
path-type: inherit
install: >-
base-devel
git
mingw-w64-x86_64-toolchain
- name: Restore Nim DLLs dependencies (Windows) from cache
if: runner.os == 'Windows'
id: windows-dlls-cache
uses: actions/cache@v2
with:
path: external/dlls
key: 'dlls'
- name: Install DLL dependencies (Windows)
if: >
steps.windows-dlls-cache.outputs.cache-hit != 'true' &&
runner.os == 'Windows'
run: |
mkdir external
curl -L "https://nim-lang.org/download/windeps.zip" -o external/windeps.zip
7z x external/windeps.zip -oexternal/dlls
- name: Path to cached dependencies (Windows)
if: >
runner.os == 'Windows'
run: |
echo '${{ github.workspace }}'"/external/dlls" >> $GITHUB_PATH
- name: Derive environment variables
run: |
if [[ '${{ matrix.target.cpu }}' == 'amd64' ]]; then
PLATFORM=x64
else
PLATFORM=x86
fi
echo "PLATFORM=$PLATFORM" >> $GITHUB_ENV
ncpu=
MAKE_CMD="make"
case '${{ runner.os }}' in
'Linux')
ncpu=$(nproc)
;;
'macOS')
ncpu=$(sysctl -n hw.ncpu)
;;
'Windows')
ncpu=$NUMBER_OF_PROCESSORS
MAKE_CMD="mingw32-make"
;;
esac
[[ -z "$ncpu" || $ncpu -le 0 ]] && ncpu=1
echo "ncpu=$ncpu" >> $GITHUB_ENV
echo "MAKE_CMD=${MAKE_CMD}" >> $GITHUB_ENV
- name: Restore nimble dependencies from cache
id: nimble_deps
uses: actions/cache@v3
with:
path: |
~/.nimble/pkgs2
~/.nimble/packages_official.json
key: ${{ matrix.target.os }}-${{ matrix.target.cpu }}-nimble-${{ hashFiles('nimble.lock') }}
- name: Setup Nimble
uses: "./.github/actions/install_nimble"
with:
os: ${{ matrix.target.os }}
cpu: ${{ matrix.target.cpu }}
- name: Run tests
run: |
rm -rf ~/.nimble/
if [[ "${{ matrix.target.os }}" == "windows" ]]; then
# https://github.com/status-im/nimbus-eth2/issues/3121
export NIMFLAGS="-d:nimRawSetjmp"
fi
nimble test -y
if [[ "${{ matrix.branch }}" == "version-1-6" || "${{ matrix.branch }}" == "devel" ]]; then
echo -e "\nTesting with '--gc:orc':\n"
export NIMFLAGS="${NIMFLAGS} --gc:orc"
nimble test -y
fi;

6
.gitignore vendored
View File

@ -1,6 +1,3 @@
*
!*.*
!*/
coverage
nimcache
tests/testAll
@ -11,6 +8,3 @@ nimbus-build-system.paths
vendor/*
NimBinaries
.update.timestamp
*.dSYM
.vscode/*
nimbledeps

71
Makefile Normal file
View File

@ -0,0 +1,71 @@
# Copyright (c) 2020 Status Research & Development GmbH. Licensed under
# either of:
# - Apache License, version 2.0
# - MIT license
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
SHELL := bash # the shell used internally by Make
# used inside the included makefiles
BUILD_SYSTEM_DIR := vendor/nimbus-build-system
# -d:insecure - Necessary to enable Prometheus HTTP endpoint for metrics
# -d:chronicles_colors:none - Necessary to disable colors in logs for Docker
DOCKER_IMAGE_NIM_PARAMS ?= -d:chronicles_colors:none -d:insecure
LINK_PCRE := 0
# we don't want an error here, so we can handle things later, in the ".DEFAULT" target
-include $(BUILD_SYSTEM_DIR)/makefiles/variables.mk
.PHONY: \
all \
clean \
coverage \
deps \
libbacktrace \
test \
update
ifeq ($(NIM_PARAMS),)
# "variables.mk" was not included, so we update the submodules.
GIT_SUBMODULE_UPDATE := nimble install https://github.com/elcritch/atlas && atlas rep --noexec atlas.lock
.DEFAULT:
+@ echo -e "Git submodules not found. Running '$(GIT_SUBMODULE_UPDATE)'.\n"; \
$(GIT_SUBMODULE_UPDATE); \
echo
# Now that the included *.mk files appeared, and are newer than this file, Make will restart itself:
# https://www.gnu.org/software/make/manual/make.html#Remaking-Makefiles
#
# After restarting, it will execute its original goal, so we don't have to start a child Make here
# with "$(MAKE) $(MAKECMDGOALS)". Isn't hidden control flow great?
else # "variables.mk" was included. Business as usual until the end of this file.
# default target, because it's the first one that doesn't start with '.'
# Builds the codex binary
all: | build deps
echo -e $(BUILD_MSG) "$@" && \
$(ENV_SCRIPT) nim test $(NIM_PARAMS)
# must be included after the default target
-include $(BUILD_SYSTEM_DIR)/makefiles/targets.mk
deps: | deps-common nat-libs
#- deletes and recreates "codexdht.nims" which on Windows is a copy instead of a proper symlink
update: | update-common
rm -rf codexdht.nims && \
$(MAKE) codexdht.nims $(HANDLE_OUTPUT)
# Builds and run a part of the test suite
test: | build deps
echo -e $(BUILD_MSG) "$@" && \
$(ENV_SCRIPT) nim testAll $(NIM_PARAMS) config.nims
# usual cleaning
clean: | clean-common
endif # "variables.mk" was not included

View File

@ -1,12 +1,12 @@
# A DHT implementation for Logos Storage
# A DHT implementation for Codex
[![License: Apache](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
[![Stability: experimental](https://img.shields.io/badge/stability-experimental-orange.svg)](#stability)
[![CI (GitHub Actions)](https://github.com/logos-storage/logos-storage-nim-dht/workflows/CI/badge.svg?branch=master)](https://github.com/logos-storage/logos-storage-nim-dht/actions/workflows/ci.yml?query=workflow%3ACI+branch%3Amaster)
[![codecov](https://codecov.io/gh/logos-storage/logos-storage-nim-dht/branch/master/graph/badge.svg?token=tlmMJgU4l7)](https://codecov.io/gh/logos-storage/logos-storage-nim-dht)
[![CI (GitHub Actions)](https://github.com/status-im/nim-libp2p-dht/workflows/CI/badge.svg?branch=main)](https://github.com/status-im/nim-libp2p-dht/actions?query=workflow%3ACI+branch%3Amain)
[![codecov](https://codecov.io/gh/status-im/nim-libp2p-dht/branch/main/graph/badge.svg?token=tlmMJgU4l7)](https://codecov.io/gh/status-im/nim-libp2p-dht)
This DHT implementation is aiming to provide a DHT for Logos Storage with the following properties
This DHT implementation is aiming to provide a DHT for Codex with the following properties
* flexible secure transport usage with
* fast UDP based operation
* eventual fallback to TCP-based operation (maybe though libp2p)
@ -19,26 +19,3 @@ This DHT implementation is aiming to provide a DHT for Logos Storage with the fo
Current implementation is based on nim-eth's Discovery v5 implementation.
Base files were copied from [`status-im/nim-eth@779d767b024175a51cf74c79ec7513301ebe2f46`](https://github.com/status-im/nim-eth/commit/779d767b024175a51cf74c79ec7513301ebe2f46)
## Building
This repo is setup to use Nimble lockfiles. This requires Nimble 0.14+ which isn't installed by default when this was written. If `nimble -v` reports `0.13.x` then you will need to install Nimble 0.14. Note that using Nimble 0.14 changes how Nimble behaves!
Nimble 0.14 can be install by:
```sh
nimble install nimble@0.14.2
```
After this you can setup your Nimble environment. Note that this will build the pinned version of Nim! The first run can take ~15 minutes.
```sh
nimble setup # creates a nimble.paths used for rest of Nimble commands
nimble testAll
```
You can also run tasks directly:
```sh
nim testAll
```

143
atlas.lock Normal file
View File

@ -0,0 +1,143 @@
{
"items": {
"nimbus-build-system": {
"dir": "vendor/nimbus-build-system",
"url": "https://github.com/status-im/nimbus-build-system",
"commit": "239c3a7fbb88fd241da0ade3246fd2e5fcff4f25"
},
"nim-nat-traversal": {
"dir": "vendor/nim-nat-traversal",
"url": "https://github.com/status-im/nim-nat-traversal",
"commit": "802d75edcc656e616120fb27f950ff1285ddcbba"
},
"nim-zlib": {
"dir": "vendor/nim-zlib",
"url": "https://github.com/status-im/nim-zlib",
"commit": "f34ca261efd90f118dc1647beefd2f7a69b05d93"
},
"nim-stew": {
"dir": "vendor/nim-stew",
"url": "https://github.com/status-im/nim-stew.git",
"commit": "e18f5a62af2ade7a1fd1d39635d4e04d944def08"
},
"nim-http-utils": {
"dir": "vendor/nim-http-utils",
"url": "https://github.com/status-im/nim-http-utils.git",
"commit": "3b491a40c60aad9e8d3407443f46f62511e63b18"
},
"nim-chronos": {
"dir": "vendor/nim-chronos",
"url": "https://github.com/status-im/nim-chronos.git",
"commit": "6525f4ce1d1a7eba146e5f1a53f6f105077ae686"
},
"upraises": {
"dir": "vendor/upraises",
"url": "https://github.com/markspanbroek/upraises.git",
"commit": "bc2628989b63854d980e92dadbd58f83e34b6f25"
},
"nim-sqlite3-abi": {
"dir": "vendor/nim-sqlite3-abi",
"url": "https://github.com/arnetheduck/nim-sqlite3-abi.git",
"commit": "362e1bd9f689ad9f5380d9d27f0705b3d4dfc7d3"
},
"questionable": {
"dir": "vendor/questionable",
"url": "https://github.com/status-im/questionable.git",
"commit": "0d7ce8efdedaf184680cb7268721fca0af947a74"
},
"nim-websock": {
"dir": "vendor/nim-websock",
"url": "https://github.com/status-im/nim-websock.git",
"commit": "2c3ae3137f3c9cb48134285bd4a47186fa51f0e8"
},
"nim-secp256k1": {
"dir": "vendor/nim-secp256k1",
"url": "https://github.com/status-im/nim-secp256k1.git",
"commit": "5340cf188168d6afcafc8023770d880f067c0b2f"
},
"nim-bearssl": {
"dir": "vendor/nim-bearssl",
"url": "https://github.com/status-im/nim-bearssl.git",
"commit": "f4c4233de453cb7eac0ce3f3ffad6496295f83ab"
},
"dnsclient.nim": {
"dir": "vendor/dnsclient.nim",
"url": "https://github.com/ba0f3/dnsclient.nim",
"commit": "23214235d4784d24aceed99bbfe153379ea557c8"
},
"nimcrypto": {
"dir": "vendor/nimcrypto",
"url": "https://github.com/status-im/nimcrypto.git",
"commit": "a5742a9a214ac33f91615f3862c7b099aec43b00"
},
"nim-json-serialization": {
"dir": "vendor/nim-json-serialization",
"url": "https://github.com/status-im/nim-json-serialization.git",
"commit": "e5b18fb710c3d0167ec79f3b892f5a7a1bc6d1a4"
},
"nim-testutils": {
"dir": "vendor/nim-testutils",
"url": "https://github.com/status-im/nim-testutils",
"commit": "b56a5953e37fc5117bd6ea6dfa18418c5e112815"
},
"nim-unittest2": {
"dir": "vendor/nim-unittest2",
"url": "https://github.com/status-im/nim-unittest2.git",
"commit": "b178f47527074964f76c395ad0dfc81cf118f379"
},
"npeg": {
"dir": "vendor/npeg",
"url": "https://github.com/zevv/npeg",
"commit": "b15a10e388b91b898c581dbbcb6a718d46b27d2f"
},
"nim-serialization": {
"dir": "vendor/nim-serialization",
"url": "https://github.com/status-im/nim-serialization.git",
"commit": "493d18b8292fc03aa4f835fd825dea1183f97466"
},
"nim-faststreams": {
"dir": "vendor/nim-faststreams",
"url": "https://github.com/status-im/nim-faststreams.git",
"commit": "1b561a9e71b6bdad1c1cdff753418906037e9d09"
},
"nim-datastore": {
"dir": "vendor/nim-datastore",
"url": "https://github.com/codex-storage/nim-datastore.git",
"commit": "0cde8aeb67c59fd0ac95496dc6b5e1168d6632aa"
},
"asynctest": {
"dir": "vendor/asynctest",
"url": "https://github.com/markspanbroek/asynctest",
"commit": "a236a5f0f3031573ac2cb082b63dbf6e170e06e7"
},
"nim-stint": {
"dir": "vendor/nim-stint",
"url": "https://github.com/status-im/nim-stint.git",
"commit": "036c71d06a6b22f8f967ba9d54afd2189c3872ca"
},
"nim-metrics": {
"dir": "vendor/nim-metrics",
"url": "https://github.com/status-im/nim-metrics.git",
"commit": "743f81d4f6c6ebf0ac02389f2392ff8b4235bee5"
},
"nim-libp2p": {
"dir": "vendor/nim-libp2p",
"url": "https://github.com/status-im/nim-libp2p.git",
"commit": "a3e9d1ed80c048cd5abc839cbe0863cefcedc702"
},
"nim-chronicles": {
"dir": "vendor/nim-chronicles",
"url": "https://github.com/status-im/nim-chronicles.git",
"commit": "7631f7b2ee03398cb1512a79923264e8f9410af6"
},
"nim-protobuf-serialization": {
"dir": "vendor/nim-protobuf-serialization",
"url": "https://github.com/status-im/nim-protobuf-serialization",
"commit": "28214b3e40c755a9886d2ec8f261ec48fbb6bec6"
}
},
"nimcfg": "############# begin Atlas config section ##########\n--noNimblePath\n--path:\"vendor/nim-secp256k1\"\n--path:\"vendor/nim-protobuf-serialization\"\n--path:\"vendor/nimcrypto\"\n--path:\"vendor/nim-bearssl\"\n--path:\"vendor/nim-chronicles\"\n--path:\"vendor/nim-chronos\"\n--path:\"vendor/nim-libp2p\"\n--path:\"vendor/nim-metrics\"\n--path:\"vendor/nim-stew\"\n--path:\"vendor/nim-stint\"\n--path:\"vendor/asynctest\"\n--path:\"vendor/nim-datastore\"\n--path:\"vendor/questionable\"\n--path:\"vendor/nim-faststreams\"\n--path:\"vendor/nim-serialization\"\n--path:\"vendor/npeg/src\"\n--path:\"vendor/nim-unittest2\"\n--path:\"vendor/nim-testutils\"\n--path:\"vendor/nim-json-serialization\"\n--path:\"vendor/nim-http-utils\"\n--path:\"vendor/dnsclient.nim/src\"\n--path:\"vendor/nim-websock\"\n--path:\"vendor/nim-sqlite3-abi\"\n--path:\"vendor/upraises\"\n--path:\"vendor/nim-zlib\"\n############# end Atlas config section ##########\n",
"nimVersion": "1.6.14",
"gccVersion": "",
"clangVersion": ""
}

22
codecov.yml Normal file
View File

@ -0,0 +1,22 @@
coverage:
status:
project:
default:
# advanced settings
# Prevents PR from being blocked with a reduction in coverage.
# Note, if we want to re-enable this, a `threshold` value can be used
# allow coverage to drop by x% while still posting a success status.
# `informational`: https://docs.codecov.com/docs/commit-status#informational
# `threshold`: https://docs.codecov.com/docs/commit-status#threshold
informational: true
patch:
default:
# advanced settings
# Prevents PR from being blocked with a reduction in coverage.
# Note, if we want to re-enable this, a `threshold` value can be used
# allow coverage to drop by x% while still posting a success status.
# `informational`: https://docs.codecov.com/docs/commit-status#informational
# `threshold`: https://docs.codecov.com/docs/commit-status#threshold
informational: true

View File

@ -1,5 +1,5 @@
import
./codexdht/dht,
./codexdht/discv5
./libp2pdht/dht,
./libp2pdht/discv5
export dht, discv5

View File

@ -1,42 +1,65 @@
# Package
version = "0.6.0"
version = "0.2.1"
author = "Status Research & Development GmbH"
description = "DHT based on Eth discv5 implementation"
description = "DHT based on the libp2p Kademlia spec"
license = "MIT"
skipDirs = @["tests"]
# Dependencies
requires "nim >= 2.2.4 & < 3.0.0"
requires "secp256k1 >= 0.6.0 & < 0.7.0"
requires "nimcrypto >= 0.6.2 & < 0.8.0"
requires "bearssl >= 0.2.5 & < 0.3.0"
requires "chronicles >= 0.11.2 & < 0.13.0"
requires "chronos >= 4.0.4 & < 4.1.0"
requires "libp2p >= 1.14.1 & < 2.0.0"
requires "metrics >= 0.1.0 & < 0.2.0"
requires "stew >= 0.4.2"
requires "stint >= 0.8.1 & < 0.9.0"
requires "https://github.com/logos-storage/nim-datastore >= 0.2.1 & < 0.3.0"
requires "questionable >= 0.10.15 & < 0.11.0"
requires "leveldbstatic >= 0.2.1 & < 0.3.0"
requires "nim >= 1.2.0"
requires "secp256k1#b3f38e2795e805743b299dc5d96d332db375b520" # >= 0.5.2 & < 0.6.0
requires "protobuf_serialization#27b400fdf3bd8ce7120ca66fc1de39d3f1a5804a" # >= 0.2.0 & < 0.3.0
requires "nimcrypto == 0.5.4"
requires "bearssl#head"
requires "chronicles >= 0.10.2 & < 0.11.0"
requires "chronos#1394c9e04957928afc1db33d2e0965cfb677a1e0" # >= 3.0.11 & < 3.1.0
requires "libp2p#unstable"
requires "metrics"
requires "stew#head"
requires "stint"
requires "asynctest >= 0.3.1 & < 0.4.0"
requires "https://github.com/status-im/nim-datastore#head"
requires "questionable"
task testAll, "Run all test suites":
exec "nimble install -d -y"
withDir "tests":
exec "nimble testAll"
task testAll, "Run DHT tests":
exec "nim c -r tests/testAll.nim"
task test, "Run the test suite":
exec "nimble install -d -y"
withDir "tests":
exec "nimble test"
# task coverage, "generates code coverage report":
# var (output, exitCode) = gorgeEx("which lcov")
# if exitCode != 0:
# echo ""
# echo " ************************** ⛔️ ERROR ⛔️ **************************"
# echo " ** **"
# echo " ** ERROR: lcov not found, it must be installed to run code **"
# echo " ** coverage locally **"
# echo " ** **"
# echo " *****************************************************************"
# echo ""
# quit 1
task testPart1, "Run the test suite part 1":
exec "nimble install -d -y"
withDir "tests":
exec "nimble testPart1"
# (output, exitCode) = gorgeEx("gcov --version")
# if output.contains("Apple LLVM"):
# echo ""
# echo " ************************* ⚠️ WARNING ⚠️ *************************"
# echo " ** **"
# echo " ** WARNING: Using Apple's llvm-cov in place of gcov, which **"
# echo " ** emulates an old version of gcov (4.2.0) and therefore **"
# echo " ** coverage results will differ than those on CI (which **"
# echo " ** uses a much newer version of gcov). **"
# echo " ** **"
# echo " *****************************************************************"
# echo ""
# exec("nimble --verbose test --opt:speed -d:debug --verbosity:0 --hints:off --lineDir:on -d:chronicles_log_level=INFO --nimcache:nimcache --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage")
# exec("cd nimcache; rm *.c; cd ..")
# mkDir("coverage")
# exec("lcov --capture --directory nimcache --output-file coverage/coverage.info")
# exec("$(which bash) -c 'shopt -s globstar; ls $(pwd)/libp2pdht/{*,**/*}.nim'")
# exec("$(which bash) -c 'shopt -s globstar; lcov --extract coverage/coverage.info $(pwd)/libp2pdht/{*,**/*}.nim --output-file coverage/coverage.f.info'")
# echo "Generating HTML coverage report"
# exec("genhtml coverage/coverage.f.info --output-directory coverage/report")
# echo "Opening HTML coverage report in browser..."
# exec("open coverage/report/index.html")
task testPart2, "Run the test suite part 2":
exec "nimble install -d -y"
withDir "tests":
exec "nimble testPart2"

View File

@ -1,104 +0,0 @@
import
std/sugar,
libp2p/crypto/[crypto, secp],
stew/[byteutils, objects, ptrops],
results
import secp256k1
const
KeyLength* = secp256k1.SkEcdhSecretSize
## Ecdh shared secret key length without leading byte
## (publicKey * privateKey).x, where length of x is 32 bytes
FullKeyLength* = KeyLength + 1
## Ecdh shared secret with leading byte 0x02 or 0x03
type
SharedSecret* = object
## Representation of ECDH shared secret, without leading `y` byte
data*: array[KeyLength, byte]
SharedSecretFull* = object
## Representation of ECDH shared secret, with leading `y` byte
## (`y` is 0x02 when (publicKey * privateKey).y is even or 0x03 when odd)
data*: array[FullKeyLength, byte]
proc fromHex*(T: type PrivateKey, data: string): Result[PrivateKey, cstring] =
let skKey = ? secp.SkPrivateKey.init(data).mapErr(e =>
("Failed to init private key from hex string: " & $e).cstring)
ok PrivateKey.init(skKey)
proc fromHex*(T: type PublicKey, data: string): Result[PublicKey, cstring] =
let skKey = ? secp.SkPublicKey.init(data).mapErr(e =>
("Failed to init public key from hex string: " & $e).cstring)
ok PublicKey.init(skKey)
proc ecdhSharedSecretHash(output: ptr byte, x32, y32: ptr byte, data: pointer): cint
{.cdecl, raises: [].} =
## Hash function used by `ecdhSharedSecret` below
##
## `x32` and `y32` are result of scalar multiplication of publicKey * privateKey.
## Both `x32` and `y32` are 32 bytes length.
##
## Take the `x32` part as ecdh shared secret.
## output length is derived from x32 length and taken from ecdh
## generic parameter `KeyLength`
copyMem(output, x32, KeyLength)
return 1
func ecdhSharedSecret(seckey: SkPrivateKey, pubkey: secp.SkPublicKey): SharedSecret =
## Compute ecdh agreed shared secret.
let res = secp256k1.ecdh[KeyLength](
secp256k1.SkSecretKey(seckey),
secp256k1.SkPublicKey(pubkey),
ecdhSharedSecretHash,
nil,
)
# This function only fail if the hash function return zero.
# Because our hash function always success, we can turn the error into defect
doAssert res.isOk, $res.error
SharedSecret(data: res.get)
proc toRaw*(pubkey: PublicKey): seq[byte] =
secp256k1.SkPublicKey(pubkey.skkey).toRaw()[1..^1]
proc ecdhSharedSecretFullHash(output: ptr byte, x32, y32: ptr byte, data: pointer): cint
{.cdecl, raises: [].} =
## Hash function used by `ecdhSharedSecretFull` below
# `x32` and `y32` are result of scalar multiplication of publicKey * privateKey.
# Leading byte is 0x02 if `y32` is even and 0x03 if odd. Then concat with `x32`.
# output length is derived from `x32` length + 1 and taken from ecdh
# generic parameter `FullKeyLength`
# output[0] = 0x02 | (y32[31] & 1)
output[] = 0x02 or (y32.offset(31)[] and 0x01)
copyMem(output.offset(1), x32, KeyLength)
return 1
func ecdhSharedSecretFull*(seckey: PrivateKey, pubkey: PublicKey): SharedSecretFull =
## Compute ecdh agreed shared secret with leading byte.
##
let res = ecdh[FullKeyLength](secp256k1.SkSecretKey(seckey.skkey),
secp256k1.SkPublicKey(pubkey.skkey),
ecdhSharedSecretFullHash, nil)
# This function only fail if the hash function return zero.
# Because our hash function always success, we can turn the error into defect
doAssert res.isOk, $res.error
SharedSecretFull(data: res.get)
proc ecdhRaw*(
priv: PrivateKey,
pub: PublicKey
): Result[SharedSecretFull, cstring] =
## emulate old ecdhRaw style keys
##
## this includes a leading 0x02 or 0x03
##
# TODO: Do we need to support non-secp256k1 schemes?
if priv.scheme != Secp256k1 or pub.scheme != Secp256k1:
return err "Must use secp256k1 scheme".cstring
ok ecdhSharedSecretFull(priv, pub)

View File

@ -1,88 +0,0 @@
# logos-storage-dht - Logos Storage DHT
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
#
## Session cache as mentioned at
## https://github.com/ethereum/devp2p/blob/master/discv5/discv5-theory.md#session-cache
##
## A session stores encryption and decryption keys for P2P encryption.
## Since key exchange can be started both ways, and these might not get finalised with
## UDP transport, we can't be sure what encryption key will be used by the other side:
## - the one derived in the key-exchange started by us,
## - the one derived in the key-exchange started by the other node.
## To alleviate this issue, we store two decryption keys in each session.
{.push raises: [].}
import
std/[net, options],
stint, stew/endians2,
node, lru
export lru
const
aesKeySize* = 128 div 8
keySize = sizeof(NodeId) +
16 + # max size of ip address (ipv6)
2 # Sizeof port
type
AesKey* = array[aesKeySize, byte]
SessionKey* = array[keySize, byte]
SessionValue* = array[3 * sizeof(AesKey), byte]
Sessions* = LRUCache[SessionKey, SessionValue]
func makeKey(id: NodeId, address: Address): SessionKey =
var pos = 0
result[pos ..< pos+sizeof(id)] = toBytesBE(id)
pos.inc(sizeof(id))
case address.ip.family
of IpAddressFamily.IpV4:
result[pos ..< pos+sizeof(address.ip.address_v4)] = address.ip.address_v4
of IpAddressFamily.IpV6:
result[pos ..< pos+sizeof(address.ip.address_v6)] = address.ip.address_v6
pos.inc(sizeof(address.ip.address_v6))
result[pos ..< pos+sizeof(address.port)] = toBytesBE(address.port.uint16)
func swapr*(s: var Sessions, id: NodeId, address: Address) =
var value: array[3 * sizeof(AesKey), byte]
let
key = makeKey(id, address)
entry = s.get(key)
if entry.isSome():
let val = entry.get()
copyMem(addr value[0], unsafeAddr val[16], sizeof(AesKey))
copyMem(addr value[16], unsafeAddr val[0], sizeof(AesKey))
copyMem(addr value[32], unsafeAddr val[32], sizeof(AesKey))
s.put(key, value)
func store*(s: var Sessions, id: NodeId, address: Address, r, w: AesKey) =
var value: array[3 * sizeof(AesKey), byte]
let
key = makeKey(id, address)
entry = s.get(key)
if entry.isSome():
let val = entry.get()
copyMem(addr value[0], unsafeAddr val[16], sizeof(r))
value[16 .. 31] = r
value[32 .. ^1] = w
s.put(key, value)
func load*(s: var Sessions, id: NodeId, address: Address, r1, r2, w: var AesKey): bool =
let res = s.get(makeKey(id, address))
if res.isSome():
let val = res.get()
copyMem(addr r1[0], unsafeAddr val[0], sizeof(r1))
copyMem(addr r2[0], unsafeAddr val[sizeof(r1)], sizeof(r2))
copyMem(addr w[0], unsafeAddr val[sizeof(r1) + sizeof(r2)], sizeof(w))
return true
else:
return false
func del*(s: var Sessions, id: NodeId, address: Address) =
s.del(makeKey(id, address))

View File

@ -1,299 +0,0 @@
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Everything below the handling of ordinary messages
import
std/[net, tables, options, sets],
bearssl/rand,
chronos,
chronicles,
metrics,
libp2p/crypto/crypto,
"."/[node, encoding, sessions]
const
handshakeTimeout* = 500.milliseconds ## timeout for the reply on the
## whoareyou message
responseTimeout* = 1.seconds ## timeout for the response of a request-response
## call
logScope:
topics = "discv5 transport"
declarePublicCounter dht_transport_tx_packets,
"Discovery transport packets sent", labels = ["state"]
declarePublicCounter dht_transport_tx_bytes,
"Discovery transport bytes sent", labels = ["state"]
declarePublicCounter dht_transport_rx_packets,
"Discovery transport packets received", labels = ["state"]
declarePublicCounter dht_transport_rx_bytes,
"Discovery transport bytes received", labels = ["state"]
type
Transport* [Client] = ref object
client: Client
bindAddress: Address ## UDP binding address
transp: DatagramTransport
pendingRequests: Table[AESGCMNonce, (PendingRequest, Moment)]
keyexchangeInProgress: HashSet[NodeId]
pendingRequestsByNode: Table[NodeId, seq[seq[byte]]]
codec*: Codec
rng: ref HmacDrbgContext
PendingRequest = object
node: Node
message: seq[byte]
proc sendToA(t: Transport, a: Address, msg: seq[byte]) =
trace "Send packet", myport = t.bindAddress.port, address = a
let ta = initTAddress(a.ip, a.port)
let f = t.transp.sendTo(ta, msg)
f.addCallback(
proc(data: pointer) =
if f.failed:
# Could be `TransportUseClosedError` in case the transport is already
# closed, or could be `TransportOsError` in case of a socket error.
# In the latter case this would probably mostly occur if the network
# interface underneath gets disconnected or similar.
# TODO: Should this kind of error be propagated upwards? Probably, but
# it should not stop the process as that would reset the discovery
# progress in case there is even a small window of no connection.
# One case that needs this error available upwards is when revalidating
# nodes. Else the revalidation might end up clearing the routing tabl
# because of ping failures due to own network connection failure.
warn "Discovery send failed", msg = f.readError.msg
dht_transport_tx_packets.inc(labelValues = ["failed"])
dht_transport_tx_bytes.inc(msg.len.int64, labelValues = ["failed"])
)
dht_transport_tx_packets.inc()
dht_transport_tx_bytes.inc(msg.len.int64)
proc send(t: Transport, n: Node, data: seq[byte]) =
doAssert(n.address.isSome())
t.sendToA(n.address.get(), data)
proc sendMessage*(t: Transport, toId: NodeId, toAddr: Address, message: seq[byte]) =
let (data, _, _) = encodeMessagePacket(t.rng[], t.codec, toId, toAddr,
message)
t.sendToA(toAddr, data)
# TODO: This could be improved to do the clean-up immediatily in case a non
# whoareyou response does arrive, but we would need to store the AuthTag
# somewhere
proc registerRequest(t: Transport, n: Node, message: seq[byte],
nonce: AESGCMNonce) =
let request = PendingRequest(node: n, message: message)
if not t.pendingRequests.hasKeyOrPut(nonce, (request, Moment.now())):
sleepAsync(responseTimeout).addCallback() do(data: pointer):
t.pendingRequests.del(nonce)
##Todo: remove dependence on message. This should be higher
proc sendMessage*(t: Transport, toNode: Node, message: seq[byte]) =
doAssert(toNode.address.isSome())
let address = toNode.address.get()
let (data, nonce, haskey) = encodeMessagePacket(t.rng[], t.codec,
toNode.id, address, message)
if haskey:
trace "Send message: has key", myport = t.bindAddress.port , dstId = toNode
t.registerRequest(toNode, message, nonce)
t.send(toNode, data)
else:
# we don't have an encryption key for this target, so we should initiate keyexchange
if not (toNode.id in t.keyexchangeInProgress):
trace "Send message: send random to trigger Whoareyou", myport = t.bindAddress.port , dstId = toNode
t.registerRequest(toNode, message, nonce)
t.send(toNode, data)
t.keyexchangeInProgress.incl(toNode.id)
trace "keyexchangeInProgress added", myport = t.bindAddress.port , dstId = toNode
sleepAsync(handshakeTimeout).addCallback() do(data: pointer):
t.keyexchangeInProgress.excl(toNode.id)
trace "keyexchangeInProgress removed (timeout)", myport = t.bindAddress.port , dstId = toNode
else:
# delay sending this message until whoareyou is received and handshake is sent
# have to reencode once keys are clear
t.pendingRequestsByNode.mgetOrPut(toNode.id, newSeq[seq[byte]]()).add(message)
trace "Send message: Node with this id already has ongoing keyexchage, delaying packet",
myport = t.bindAddress.port , dstId = toNode, qlen=t.pendingRequestsByNode[toNode.id].len
proc sendWhoareyou(t: Transport, toId: NodeId, a: Address,
requestNonce: AESGCMNonce, node: Option[Node]) =
let key = HandshakeKey(nodeId: toId, address: a)
if not t.codec.hasHandshake(key):
let
recordSeq = if node.isSome(): node.get().record.seqNum
else: 0
pubkey = if node.isSome(): some(node.get().pubkey)
else: none(PublicKey)
let data = encodeWhoareyouPacket(t.rng[], t.codec, toId, a, requestNonce,
recordSeq, pubkey)
sleepAsync(handshakeTimeout).addCallback() do(data: pointer):
# handshake key is popped in decodeHandshakePacket. if not yet popped by timeout:
if t.codec.hasHandshake(key):
debug "Handshake timeout", myport = t.bindAddress.port , dstId = toId, address = a
t.codec.handshakes.del(key)
trace "Send whoareyou", dstId = toId, address = a
t.sendToA(a, data)
else:
# TODO: is this reasonable to drop it? Should we allow a mini-queue here?
# Queue should be on sender side, as this is random encoded!
debug "Node with this id already has ongoing handshake, queuing packet", myport = t.bindAddress.port , dstId = toId, address = a
proc sendPending(t:Transport, toNode: Node):
Future[void] {.async.} =
if t.pendingRequestsByNode.hasKey(toNode.id):
trace "Found pending request", myport = t.bindAddress.port, src = toNode, len = t.pendingRequestsByNode[toNode.id].len
for message in t.pendingRequestsByNode[toNode.id]:
trace "Sending pending packet", myport = t.bindAddress.port, dstId = toNode.id
let address = toNode.address.get()
let (data, nonce, haskey) = encodeMessagePacket(t.rng[], t.codec, toNode.id, address, message)
t.registerRequest(toNode, message, nonce)
t.send(toNode, data)
t.pendingRequestsByNode.del(toNode.id)
proc receive*(t: Transport, a: Address, packet: openArray[byte]) =
dht_transport_rx_packets.inc()
dht_transport_rx_bytes.inc(packet.len.int64)
let decoded = t.codec.decodePacket(a, packet)
if decoded.isOk:
let packet = decoded[]
case packet.flag
of OrdinaryMessage:
if packet.messageOpt.isSome():
let message = packet.messageOpt.get()
trace "Received message packet", myport = t.bindAddress.port, srcId = packet.srcId, address = a,
kind = message.kind, p = $packet
t.client.handleMessage(packet.srcId, a, message)
else:
trace "Not decryptable message packet received", myport = t.bindAddress.port,
srcId = packet.srcId, address = a
# If we already have a keyexchange in progress, we have a case of simultaneous cross-connect.
# We could try to decide here which should go on, but since we are on top of UDP, a more robust
# choice is to answer here and resolve conflicts in the next stage (reception of Whoareyou), or
# even later (reception of Handshake).
if packet.srcId in t.keyexchangeInProgress:
trace "cross-connect detected, still sending Whoareyou"
t.sendWhoareyou(packet.srcId, a, packet.requestNonce,
t.client.getNode(packet.srcId))
of Flag.Whoareyou:
trace "Received whoareyou packet", myport = t.bindAddress.port, address = a
var
prt: (PendingRequest, Moment)
if t.pendingRequests.take(packet.whoareyou.requestNonce, prt):
let
pr = prt[0]
startTime = prt[1]
toNode = pr.node
rtt = Moment.now() - startTime
# trace "whoareyou RTT:", rtt, node = toNode
toNode.registerRtt(rtt)
# This is a node we previously contacted and thus must have an address.
doAssert(toNode.address.isSome())
let address = toNode.address.get()
let data = encodeHandshakePacket(
t.rng[],
t.codec,
toNode.id,
address,
pr.message,
packet.whoareyou,
toNode.pubkey
).expect("Valid handshake packet to encode")
trace "Send handshake message packet", myport = t.bindAddress.port, dstId = toNode.id, address
t.send(toNode, data)
# keyexchange ready, we can send queued packets
t.keyexchangeInProgress.excl(toNode.id)
trace "keyexchangeInProgress removed (finished)", myport = t.bindAddress.port, dstId = toNode.id, address
discard t.sendPending(toNode)
else:
debug "Timed out or unrequested whoareyou packet", address = a
of HandshakeMessage:
trace "Received handshake message packet", myport = t.bindAddress.port, srcId = packet.srcIdHs,
address = a, kind = packet.message.kind
t.client.handleMessage(packet.srcIdHs, a, packet.message)
# For a handshake message it is possible that we received an newer SPR.
# In that case we can add/update it to the routing table.
if packet.node.isSome():
let node = packet.node.get()
# Lets not add nodes without correct IP in the SPR to the routing table.
# The SPR could contain bogus IPs and although they would get removed
# on the next revalidation, one could spam these as the handshake
# message occurs on (first) incoming messages.
if node.address.isSome() and a == node.address.get():
# TODO: maybe here we could verify that the address matches what we were
# sending the 'whoareyou' message to. In that case, we can set 'seen'
# TODO: verify how this works with restrictive NAT and firewall scenarios.
node.registerSeen()
if t.client.addNode(node):
trace "Added new node to routing table after handshake", node, tablesize=t.client.nodesDiscovered()
discard t.sendPending(node)
else:
trace "address mismatch, not adding seen flag", node, address = a, nodeAddress = node.address.get()
else:
dht_transport_rx_packets.inc(labelValues = ["failed_decode"])
dht_transport_rx_bytes.inc(packet.len.int64, labelValues = ["failed_decode"])
trace "Packet decoding error", myport = t.bindAddress.port, error = decoded.error, address = a
proc processClient[T](transp: DatagramTransport, raddr: TransportAddress):
Future[void] {.async.} =
let t = getUserData[Transport[T]](transp)
# TODO: should we use `peekMessage()` to avoid allocation?
let buf = try:
transp.getMessage()
except TransportOsError as e:
# This is likely to be local network connection issues.
warn "Transport getMessage", exception = e.name, msg = e.msg
return
let ip = try: raddr.address()
except ValueError as e:
error "Not a valid IpAddress", exception = e.name, msg = e.msg
return
let a = Address(ip: ip, port: raddr.port)
t.receive(a, buf)
proc open*[T](t: Transport[T]) {.raises: [Defect, CatchableError].} =
info "Starting transport", bindAddress = t.bindAddress
# TODO allow binding to specific IP / IPv6 / etc
let ta = initTAddress(t.bindAddress.ip, t.bindAddress.port)
t.transp = newDatagramTransport(processClient[T], udata = t, local = ta)
proc close*(t: Transport) =
t.transp.close
proc closed*(t: Transport) : bool =
t.transp.closed
proc closeWait*(t: Transport) {.async.} =
await t.transp.closeWait
proc newTransport*[T](
client: T,
privKey: PrivateKey,
localNode: Node,
bindPort: Port,
bindIp = IPv4_any(),
rng = newRng()): Transport[T]=
# TODO Consider whether this should be a Defect
doAssert rng != nil, "RNG initialization failed"
Transport[T](
client: client,
bindAddress: Address(ip: bindIp, port: bindPort),
codec: Codec(
localNode: localNode,
privKey: privKey,
sessions: Sessions.init(256)),
rng: rng)

View File

@ -1,6 +1,25 @@
import std/os
const currentDir = currentSourcePath()[0 .. ^(len("config.nims") + 1)]
switch("define", "libp2p_pki_schemes=secp256k1")
# begin Nimble config (version 2)
when withDir(thisDir(), system.fileExists("nimble.paths")):
task testAll, "Run DHT tests":
exec "nim c -r tests/testAll.nim"
task test, "Run DHT tests":
testAllTask()
when getEnv("NIMBUS_BUILD_SYSTEM") == "yes" and
# BEWARE
# In Nim 1.6, config files are evaluated with a working directory
# matching where the Nim command was invocated. This means that we
# must do all file existance checks with full absolute paths:
system.fileExists(currentDir & "nimbus-build-system.paths"):
echo "Using Nimbus Paths"
include "nimbus-build-system.paths"
elif fileExists("nimble.paths"):
echo "Using Nimble Paths"
# begin Nimble config (version 1)
include "nimble.paths"
# end Nimble config
# end Nimble config

7
env.sh Executable file
View File

@ -0,0 +1,7 @@
#!/usr/bin/env bash
# We use ${BASH_SOURCE[0]} instead of $0 to allow sourcing this file
# and we fall back to a Zsh-specific special var to also support Zsh.
REL_PATH="$(dirname ${BASH_SOURCE[0]:-${(%):-%x}})"
ABS_PATH="$(cd ${REL_PATH}; pwd)"
source ${ABS_PATH}/vendor/nimbus-build-system/scripts/env.sh

View File

@ -0,0 +1,30 @@
import
std/sugar,
libp2p/crypto/[crypto, secp]
from secp256k1 import ecdhRaw, SkEcdhRawSecret, toRaw
proc fromHex*(T: type PrivateKey, data: string): Result[PrivateKey, cstring] =
let skKey = ? SkPrivateKey.init(data).mapErr(e =>
("Failed to init private key from hex string: " & $e).cstring)
ok PrivateKey.init(skKey)
proc fromHex*(T: type PublicKey, data: string): Result[PublicKey, cstring] =
let skKey = ? SkPublicKey.init(data).mapErr(e =>
("Failed to init public key from hex string: " & $e).cstring)
ok PublicKey.init(skKey)
func ecdhRaw*(seckey: SkPrivateKey, pubkey: SkPublicKey): SkEcdhRawSecret {.borrow.}
proc ecdhRaw*(
priv: PrivateKey,
pub: PublicKey): Result[SkEcdhRawSecret, cstring] =
# TODO: Do we need to support non-secp256k1 schemes?
if priv.scheme != Secp256k1 or pub.scheme != Secp256k1:
return err "Must use secp256k1 scheme".cstring
ok ecdhRaw(priv.skkey, pub.skkey)
proc toRaw*(pubkey: PublicKey): seq[byte] =
secp256k1.SkPublicKey(pubkey.skkey).toRaw()[1..^1]

View File

@ -1,4 +1,4 @@
# logos-storage-dht - Logos Storage DHT
# codex-dht - Codex DHT
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
@ -11,21 +11,19 @@
## https://github.com/ethereum/devp2p/blob/master/discv5/discv5-theory.md#sessions
##
{.push raises: [].}
{.push raises: [Defect].}
import
std/[hashes, net, options, sugar, tables],
stew/endians2,
bearssl/rand,
chronicles,
stew/[byteutils],
stew/[results, byteutils],
stint,
libp2p/crypto/crypto as libp2p_crypto,
libp2p/crypto/secp,
libp2p/signed_envelope,
metrics,
nimcrypto,
results,
"."/[messages, messages_encoding, node, spr, hkdf, sessions],
"."/crypto
@ -34,16 +32,13 @@ from stew/objects import checkedEnumAssign
export crypto
declareCounter dht_session_lru_cache_hits, "Session LRU cache hits"
declareCounter dht_session_lru_cache_misses, "Session LRU cache misses"
declareCounter dht_session_decrypt_failures, "Session decrypt failures"
declareCounter discovery_session_lru_cache_hits, "Session LRU cache hits"
declareCounter discovery_session_lru_cache_misses, "Session LRU cache misses"
declareCounter discovery_session_decrypt_failures, "Session decrypt failures"
logScope:
topics = "discv5"
type
cipher = aes128
const
version: uint16 = 1
idSignatureText = "discovery v5 identity proof"
@ -166,7 +161,7 @@ proc deriveKeys*(n1, n2: NodeId, priv: PrivateKey, pub: PublicKey,
ok secrets
proc encryptGCM*(key: AesKey, nonce, pt, authData: openArray[byte]): seq[byte] =
var ectx: GCM[cipher]
var ectx: GCM[aes128]
ectx.init(key, nonce, authData)
result = newSeq[byte](pt.len + gcmTagSize)
ectx.encrypt(pt, result)
@ -179,7 +174,7 @@ proc decryptGCM*(key: AesKey, nonce, ct, authData: openArray[byte]):
debug "cipher is missing tag", len = ct.len
return
var dctx: GCM[cipher]
var dctx: GCM[aes128]
dctx.init(key, nonce, authData)
var res = newSeq[byte](ct.len - gcmTagSize)
var tag: array[gcmTagSize, byte]
@ -193,7 +188,7 @@ proc decryptGCM*(key: AesKey, nonce, ct, authData: openArray[byte]):
return some(res)
proc encryptHeader*(id: NodeId, iv, header: openArray[byte]): seq[byte] =
var ectx: CTR[cipher]
var ectx: CTR[aes128]
ectx.init(id.toByteArrayBE().toOpenArray(0, 15), iv)
result = newSeq[byte](header.len)
ectx.encrypt(header, result)
@ -205,7 +200,7 @@ proc hasHandshake*(c: Codec, key: HandshakeKey): bool =
proc encodeStaticHeader*(flag: Flag, nonce: AESGCMNonce, authSize: int):
seq[byte] =
result.add(protocolId)
result.add(endians2.toBytesBE(version))
result.add(version.toBytesBE())
result.add(byte(flag))
result.add(nonce)
# TODO: assert on authSize of > 2^16?
@ -213,9 +208,8 @@ proc encodeStaticHeader*(flag: Flag, nonce: AESGCMNonce, authSize: int):
proc encodeMessagePacket*(rng: var HmacDrbgContext, c: var Codec,
toId: NodeId, toAddr: Address, message: openArray[byte]):
(seq[byte], AESGCMNonce, bool) =
(seq[byte], AESGCMNonce) =
var nonce: AESGCMNonce
var haskey: bool
hmacDrbgGenerate(rng, nonce) # Random AESGCM nonce
var iv: array[ivSize, byte]
hmacDrbgGenerate(rng, iv) # Random IV
@ -231,11 +225,10 @@ proc encodeMessagePacket*(rng: var HmacDrbgContext, c: var Codec,
# message
var messageEncrypted: seq[byte]
var initiatorKey, recipientKey1, recipientKey2: AesKey
if c.sessions.load(toId, toAddr, recipientKey1, recipientKey2, initiatorKey):
haskey = true
var initiatorKey, recipientKey: AesKey
if c.sessions.load(toId, toAddr, recipientKey, initiatorKey):
messageEncrypted = encryptGCM(initiatorKey, nonce, message, @iv & header)
dht_session_lru_cache_hits.inc()
discovery_session_lru_cache_hits.inc()
else:
# We might not have the node's keys if the handshake hasn't been performed
# yet. That's fine, we send a random-packet and we will be responded with
@ -244,11 +237,10 @@ proc encodeMessagePacket*(rng: var HmacDrbgContext, c: var Codec,
# message. 16 bytes for the gcm tag and 4 bytes for ping with requestId of
# 1 byte (e.g "01c20101"). Could increase to 27 for 8 bytes requestId in
# case this must not look like a random packet.
haskey = false
var randomData: array[gcmTagSize + 4, byte]
hmacDrbgGenerate(rng, randomData)
messageEncrypted.add(randomData)
dht_session_lru_cache_misses.inc()
discovery_session_lru_cache_misses.inc()
let maskedHeader = encryptHeader(toId, iv, header)
@ -257,7 +249,7 @@ proc encodeMessagePacket*(rng: var HmacDrbgContext, c: var Codec,
packet.add(maskedHeader)
packet.add(messageEncrypted)
return (packet, nonce, haskey)
return (packet, nonce)
proc encodeWhoareyouPacket*(rng: var HmacDrbgContext, c: var Codec,
toId: NodeId, toAddr: Address, requestNonce: AESGCMNonce, recordSeq: uint64,
@ -315,7 +307,7 @@ proc encodeHandshakePacket*(rng: var HmacDrbgContext, c: var Codec,
authdataHead.add(c.localNode.id.toByteArrayBE())
let ephKeys = ? KeyPair.random(PKScheme.Secp256k1, rng)
let ephKeys = ? KeyPair.random(rng)
.mapErr((e: CryptoError) =>
("Failed to create random key pair: " & $e).cstring)
@ -378,7 +370,7 @@ proc decodeHeader*(id: NodeId, iv, maskedHeader: openArray[byte]):
DecodeResult[(StaticHeader, seq[byte])] =
# No need to check staticHeader size as that is included in minimum packet
# size check in decodePacket
var ectx: CTR[cipher]
var ectx: CTR[aes128]
ectx.init(id.toByteArrayBE().toOpenArray(0, aesKeySize - 1), iv)
# Decrypt static-header part of the header
var staticHeader = newSeq[byte](staticHeaderSize)
@ -427,35 +419,26 @@ proc decodeMessagePacket(c: var Codec, fromAddr: Address, nonce: AESGCMNonce,
let srcId = NodeId.fromBytesBE(header.toOpenArray(staticHeaderSize,
header.high))
var initiatorKey, recipientKey1, recipientKey2: AesKey
if not c.sessions.load(srcId, fromAddr, recipientKey1, recipientKey2, initiatorKey):
var initiatorKey, recipientKey: AesKey
if not c.sessions.load(srcId, fromAddr, recipientKey, initiatorKey):
# Don't consider this an error, simply haven't done a handshake yet or
# the session got removed.
trace "Decrypting failed (no keys)"
dht_session_lru_cache_misses.inc()
discovery_session_lru_cache_misses.inc()
return ok(Packet(flag: Flag.OrdinaryMessage, requestNonce: nonce,
srcId: srcId))
dht_session_lru_cache_hits.inc()
discovery_session_lru_cache_hits.inc()
var pt = decryptGCM(recipientKey2, nonce, ct, @iv & @header)
let pt = decryptGCM(recipientKey, nonce, ct, @iv & @header)
if pt.isNone():
trace "Decrypting failed, trying other key"
pt = decryptGCM(recipientKey1, nonce, ct, @iv & @header)
if pt.isNone():
# Don't consider this an error, the session got probably removed at the
# peer's side and a random message is send.
# This might also be a cross-connect. Not deleting key, as it might be
# needed later, depending on message order.
trace "Decrypting failed (invalid keys)", address = fromAddr
#c.sessions.del(srcId, fromAddr)
dht_session_decrypt_failures.inc()
return ok(Packet(flag: Flag.OrdinaryMessage, requestNonce: nonce,
srcId: srcId))
# Most probably the same decryption key will work next time. We should
# elevate it's priority.
c.sessions.swapr(srcId, fromAddr)
# Don't consider this an error, the session got probably removed at the
# peer's side and a random message is send.
trace "Decrypting failed (invalid keys)"
c.sessions.del(srcId, fromAddr)
discovery_session_decrypt_failures.inc()
return ok(Packet(flag: Flag.OrdinaryMessage, requestNonce: nonce,
srcId: srcId))
let message = ? decodeMessage(pt.get())

View File

@ -1,4 +1,4 @@
# logos-storage-dht - Logos Storage DHT
# codex-dht - Codex DHT
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
@ -15,7 +15,7 @@
## To select the right address, a majority count is done. This is done over a
## sort of moving window as votes expire after `IpVoteTimeout`.
{.push raises: [].}
{.push raises: [Defect].}
import
std/[tables, options],

View File

@ -1,6 +1,6 @@
import std/[tables, lists, options]
{.push raises: [].}
{.push raises: [Defect].}
export tables, lists, options
@ -55,10 +55,3 @@ iterator items*[K, V](lru: LRUCache[K, V]): V =
for item in lru.list:
yield item[1]
iterator keys*[K, V](lru: LRUCache[K, V]): K =
## Get cached keys - this doesn't touch the cache
##
for item in lru.table.keys:
yield item

View File

@ -1,4 +1,4 @@
# logos-storage-dht - Logos Storage DHT
# codex-dht - Codex DHT
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
@ -10,7 +10,7 @@
## These messages get protobuf encoded, while in the spec they get RLP encoded.
##
{.push raises: [].}
{.push raises: [Defect].}
import
std/[hashes, net],

View File

@ -1,4 +1,4 @@
# logos-storage-dht - Logos Storage DHT
# codex-dht - Codex DHT
# Copyright (c) 2020-2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
@ -11,10 +11,8 @@
import
std/net,
chronicles,
stew/endians2,
libp2p/routing_record,
libp2p/signed_envelope,
libp2p/protobuf/minprotobuf,
"."/[messages, spr, node],
../../../../dht/providers_encoding
@ -100,7 +98,7 @@ proc getField*(pb: ProtoBuffer, field: int,
if not(res):
ok(false)
else:
family = endians2.fromBytesBE(uint8, buffer).IpAddressFamily
family = uint8.fromBytesBE(buffer).IpAddressFamily
ok(true)
proc write*(pb: var ProtoBuffer, field: int, family: IpAddressFamily) =
@ -326,7 +324,7 @@ proc encodeMessage*[T: SomeMessage](p: T, reqId: RequestId): seq[byte] =
pb.write(2, encoded)
pb.finish()
result.add(pb.buffer)
trace "Encoded protobuf message", typ = $T
trace "Encoded protobuf message", typ = $T, encoded
proc decodeMessage*(body: openArray[byte]): DecodeResult[Message] =
## Decodes to the specific `Message` type.

View File

@ -1,51 +1,40 @@
# logos-storage-dht - Logos Storage DHT
# codex-dht - Codex DHT
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
{.push raises: [Defect].}
import
std/[hashes, net],
std/hashes,
bearssl/rand,
chronicles,
chronos,
nimcrypto,
stew/shims/net,
stint,
./crypto,
./spr
export stint
const
avgSmoothingFactor = 0.9
seenSmoothingFactor = 0.9
type
NodeId* = UInt256
Address* = object
ip*: IpAddress
ip*: ValidIpAddress
port*: Port
Stats* = object
rttMin*: float #millisec
rttAvg*: float #millisec
bwAvg*: float #bps
bwMax*: float #bps
Node* = ref object
id*: NodeId
pubkey*: PublicKey
address*: Option[Address]
record*: SignedPeerRecord
seen*: float ## Indicates if there was at least one successful
seen*: bool ## Indicates if there was at least one successful
## request-response with this node, or if the nde was verified
## through the underlying transport mechanisms. After first contact
## it tracks how reliable is the communication with the node.
stats*: Stats # traffic measurements and statistics
## through the underlying transport mechanisms.
func toNodeId*(pid: PeerId): NodeId =
## Convert public key to a node identifier.
@ -68,7 +57,7 @@ func newNode*(
id: ? pk.toNodeId(),
pubkey: pk,
record: record,
address: Address(ip: ip, port: port).some)
address: Address(ip: ValidIpAddress.init(ip), port: port).some)
ok node
@ -88,9 +77,7 @@ func newNode*(r: SignedPeerRecord): Result[Node, cstring] =
nodeId = ? pk.get().toNodeId()
if r.ip.isSome() and r.udp.isSome():
let a = Address(
ip: IpAddress(family: IPv4, address_v4: r.ip.get()), port: Port(r.udp.get())
)
let a = Address(ip: ipv4(r.ip.get()), port: Port(r.udp.get()))
ok(Node(
id: nodeId,
@ -104,7 +91,7 @@ func newNode*(r: SignedPeerRecord): Result[Node, cstring] =
record: r,
address: none(Address)))
proc update*(n: Node, pk: PrivateKey, ip: Option[IpAddress],
proc update*(n: Node, pk: PrivateKey, ip: Option[ValidIpAddress],
tcpPort, udpPort: Option[Port] = none[Port]()): Result[void, cstring] =
? n.record.update(pk, ip, tcpPort, udpPort)
@ -148,14 +135,14 @@ func shortLog*(id: NodeId): string =
result = sid
else:
result = newStringOfCap(10)
for i in 0..<3:
for i in 0..<2:
result.add(sid[i])
result.add("*")
for i in (len(sid) - 6)..sid.high:
result.add(sid[i])
chronicles.formatIt(NodeId): shortLog(it)
func hash*(ip: IpAddress): Hash =
func hash*(ip: ValidIpAddress): Hash =
case ip.family
of IpAddressFamily.IPv6: hash(ip.address_v6)
of IpAddressFamily.IPv4: hash(ip.address_v4)
@ -190,38 +177,3 @@ func shortLog*(nodes: seq[Node]): string =
result.add("]")
chronicles.formatIt(seq[Node]): shortLog(it)
func shortLog*(address: Address): string =
$address
chronicles.formatIt(Address): shortLog(it)
func registerSeen*(n:Node, seen = true) =
## Register event of seeing (getting message from) or not seeing (missing message) node
## Note: interpretation might depend on NAT type
if n.seen == 0: # first time seeing the node
n.seen = 1
else:
n.seen = seenSmoothingFactor * n.seen + (1.0 - seenSmoothingFactor) * seen.float
func alreadySeen*(n:Node) : bool =
## Was the node seen at least once?
n.seen > 0
# collecting performane metrics
func registerRtt*(n: Node, rtt: Duration) =
## register an RTT measurement
let rttMs = rtt.nanoseconds.float / 1e6
n.stats.rttMin =
if n.stats.rttMin == 0: rttMs
else: min(n.stats.rttMin, rttMs)
n.stats.rttAvg =
if n.stats.rttAvg == 0: rttMs
else: avgSmoothingFactor * n.stats.rttAvg + (1.0 - avgSmoothingFactor) * rttMs
func registerBw*(n: Node, bw: float) =
## register an bandwidth measurement
n.stats.bwMax = max(n.stats.bwMax, bw)
n.stats.bwAvg =
if n.stats.bwAvg == 0: bw
else: avgSmoothingFactor * n.stats.bwAvg + (1.0 - avgSmoothingFactor) * bw

View File

@ -1,8 +1,8 @@
{.push raises: [].}
{.push raises: [Defect].}
import
std/[net, sets, options],
results, chronicles, chronos,
std/[sets, options],
stew/results, stew/shims/net, chronicles, chronos,
"."/[node, spr, routing_table]
logScope:

View File

@ -1,4 +1,4 @@
# logos-storage-dht - Logos Storage DHT
# codex-dht - Codex DHT
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
@ -71,18 +71,18 @@
## more requests will be needed for a lookup (adding bandwidth and latency).
## This might be a concern for mobile devices.
{.push raises: [].}
{.push raises: [Defect].}
import
std/[net, tables, sets, options, math, sequtils, algorithm, strutils],
std/[tables, sets, options, math, sequtils, algorithm, strutils],
stew/shims/net as stewNet,
json_serialization/std/net,
stew/[base64, endians2],
stew/[base64, endians2, results],
pkg/[chronicles, chronicles/chronos_tools],
pkg/chronos,
pkg/stint,
pkg/bearssl/rand,
pkg/metrics,
pkg/results
pkg/metrics
import "."/[
messages,
@ -100,13 +100,13 @@ import nimcrypto except toHex
export options, results, node, spr, providers
declareCounter dht_message_requests_outgoing,
declareCounter discovery_message_requests_outgoing,
"Discovery protocol outgoing message requests", labels = ["response"]
declareCounter dht_message_requests_incoming,
declareCounter discovery_message_requests_incoming,
"Discovery protocol incoming message requests", labels = ["response"]
declareCounter dht_unsolicited_messages,
declareCounter discovery_unsolicited_messages,
"Discovery protocol unsolicited or timed-out messages"
declareCounter dht_enr_auto_update,
declareCounter discovery_enr_auto_update,
"Amount of discovery IP:port address SPR auto updates"
logScope:
@ -117,7 +117,6 @@ const
LookupRequestLimit = 3 ## Amount of distances requested in a single Findnode
## message for a lookup or query
FindNodeResultLimit = 16 ## Maximum amount of SPRs in the total Nodes messages
FindNodeFastResultLimit = 6 ## Maximum amount of SPRs in response to findNodeFast
## that will be processed
MaxNodesPerMessage = 3 ## Maximum amount of SPRs per individual Nodes message
RefreshInterval = 5.minutes ## Interval of launching a random query to
@ -126,17 +125,12 @@ const
RevalidateMax = 10000 ## Revalidation of a peer is done between min and max milliseconds.
## value in milliseconds
IpMajorityInterval = 5.minutes ## Interval for checking the latest IP:Port
DebugPrintInterval = 5.minutes ## Interval to print neighborhood with stats
## majority and updating this when SPR auto update is set.
InitialLookups = 1 ## Amount of lookups done when populating the routing table
ResponseTimeout* = 1.seconds ## timeout for the response of a request-response
ResponseTimeout* = 4.seconds ## timeout for the response of a request-response
MaxProvidersEntries* = 1_000_000 # one million records
MaxProvidersPerEntry* = 20 # providers per entry
## call
FindnodeSeenThreshold = 1.0 ## threshold used as findnode response filter
LookupSeenThreshold = 0.0 ## threshold used for lookup nodeset selection
QuerySeenThreshold = 0.0 ## threshold used for query nodeset selection
NoreplyRemoveThreshold = 0.5 ## remove node on no reply if 'seen' is below this value
func shortLog*(record: SignedPeerRecord): string =
## Returns compact string representation of ``SignedPeerRecord``.
@ -172,7 +166,6 @@ type
refreshLoop: Future[void]
revalidateLoop: Future[void]
ipMajorityLoop: Future[void]
debugPrintLoop: Future[void]
lastLookup: chronos.Moment
bootstrapRecords*: seq[SignedPeerRecord]
ipVote: IpVote
@ -189,9 +182,6 @@ type
DiscResult*[T] = Result[T, cstring]
func `$`*(p: Protocol): string =
$p.localNode.id
const
defaultDiscoveryConfig* = DiscoveryConfig(
tableIpLimits: DefaultTableIpLimits,
@ -241,7 +231,7 @@ proc randomNodes*(d: Protocol, maxAmount: int): seq[Node] =
d.routingTable.randomNodes(maxAmount)
proc randomNodes*(d: Protocol, maxAmount: int,
pred: proc(x: Node): bool {.gcsafe, noSideEffect, raises: [].}): seq[Node] =
pred: proc(x: Node): bool {.gcsafe, noSideEffect.}): seq[Node] =
## Get a `maxAmount` of random nodes from the local routing table with the
## `pred` predicate function applied as filter on the nodes selected.
d.routingTable.randomNodes(maxAmount, pred)
@ -253,14 +243,14 @@ proc randomNodes*(d: Protocol, maxAmount: int,
d.randomNodes(maxAmount, proc(x: Node): bool = x.record.contains(enrField))
proc neighbours*(d: Protocol, id: NodeId, k: int = BUCKET_SIZE,
seenThreshold = 0.0): seq[Node] =
seenOnly = false): seq[Node] =
## Return up to k neighbours (closest node ids) of the given node id.
d.routingTable.neighbours(id, k, seenThreshold)
d.routingTable.neighbours(id, k, seenOnly)
proc neighboursAtDistances*(d: Protocol, distances: seq[uint16],
k: int = BUCKET_SIZE, seenThreshold = 0.0): seq[Node] =
k: int = BUCKET_SIZE, seenOnly = false): seq[Node] =
## Return up to k neighbours (closest node ids) at given distances.
d.routingTable.neighboursAtDistances(distances, k, seenThreshold)
d.routingTable.neighboursAtDistances(distances, k, seenOnly)
proc nodesDiscovered*(d: Protocol): int = d.routingTable.len
@ -282,7 +272,7 @@ proc updateRecord*(
newSpr = spr.get()
seqNo = d.localNode.record.seqNum
info "Updated discovery SPR", uri = newSpr.toURI(), newSpr = newSpr.data
info "Updated discovery SPR", uri = newSpr.toURI()
d.localNode.record = newSpr
d.localNode.record.data.seqNo = seqNo
@ -348,7 +338,7 @@ proc handleFindNode(d: Protocol, fromId: NodeId, fromAddr: Address,
# TODO: Still deduplicate also?
if fn.distances.all(proc (x: uint16): bool = return x <= 256):
d.sendNodes(fromId, fromAddr, reqId,
d.routingTable.neighboursAtDistances(fn.distances, FindNodeResultLimit, FindnodeSeenThreshold))
d.routingTable.neighboursAtDistances(fn.distances, seenOnly = true))
else:
# At least one invalid distance, but the polite node we are, still respond
# with empty nodes.
@ -357,7 +347,7 @@ proc handleFindNode(d: Protocol, fromId: NodeId, fromAddr: Address,
proc handleFindNodeFast(d: Protocol, fromId: NodeId, fromAddr: Address,
fnf: FindNodeFastMessage, reqId: RequestId) =
d.sendNodes(fromId, fromAddr, reqId,
d.routingTable.neighbours(fnf.target, FindNodeFastResultLimit, FindnodeSeenThreshold))
d.routingTable.neighbours(fnf.target, seenOnly = true))
# TODO: if known, maybe we should add exact target even if not yet "seen"
proc handleTalkReq(d: Protocol, fromId: NodeId, fromAddr: Address,
@ -379,7 +369,7 @@ proc handleTalkReq(d: Protocol, fromId: NodeId, fromAddr: Address,
proc addProviderLocal(p: Protocol, cId: NodeId, prov: SignedPeerRecord) {.async.} =
trace "adding provider to local db", n = p.localNode, cId, prov
if (let res = (await p.providers.add(cId, prov)); res.isErr):
if (let res = (await p.providers.add(cid, prov)); res.isErr):
trace "Unable to add provider", cid, peerId = prov.data.peerId
proc handleAddProvider(
@ -413,27 +403,27 @@ proc handleMessage(d: Protocol, srcId: NodeId, fromAddr: Address,
message: Message) =
case message.kind
of ping:
dht_message_requests_incoming.inc()
discovery_message_requests_incoming.inc()
d.handlePing(srcId, fromAddr, message.ping, message.reqId)
of findNode:
dht_message_requests_incoming.inc()
discovery_message_requests_incoming.inc()
d.handleFindNode(srcId, fromAddr, message.findNode, message.reqId)
of findNodeFast:
dht_message_requests_incoming.inc()
discovery_message_requests_incoming.inc()
d.handleFindNodeFast(srcId, fromAddr, message.findNodeFast, message.reqId)
of talkReq:
dht_message_requests_incoming.inc()
discovery_message_requests_incoming.inc()
d.handleTalkReq(srcId, fromAddr, message.talkReq, message.reqId)
of addProvider:
dht_message_requests_incoming.inc()
dht_message_requests_incoming.inc(labelValues = ["no_response"])
discovery_message_requests_incoming.inc()
discovery_message_requests_incoming.inc(labelValues = ["no_response"])
d.handleAddProvider(srcId, fromAddr, message.addProvider, message.reqId)
of getProviders:
dht_message_requests_incoming.inc()
discovery_message_requests_incoming.inc()
asyncSpawn d.handleGetProviders(srcId, fromAddr, message.getProviders, message.reqId)
of regTopic, topicQuery:
dht_message_requests_incoming.inc()
dht_message_requests_incoming.inc(labelValues = ["no_response"])
discovery_message_requests_incoming.inc()
discovery_message_requests_incoming.inc(labelValues = ["no_response"])
trace "Received unimplemented message kind", kind = message.kind,
origin = fromAddr
else:
@ -441,7 +431,7 @@ proc handleMessage(d: Protocol, srcId: NodeId, fromAddr: Address,
if d.awaitedMessages.take((srcId, message.reqId), waiter):
waiter.complete(some(message))
else:
dht_unsolicited_messages.inc()
discovery_unsolicited_messages.inc()
trace "Timed out or unrequested message", kind = message.kind,
origin = fromAddr
@ -453,50 +443,27 @@ proc registerTalkProtocol*(d: Protocol, protocolId: seq[byte],
else:
ok()
proc replaceNode(d: Protocol, n: Node, forceRemoveBelow = 1.0) =
proc replaceNode(d: Protocol, n: Node) =
if n.record notin d.bootstrapRecords:
d.routingTable.replaceNode(n, forceRemoveBelow)
d.routingTable.replaceNode(n)
else:
# For now we never remove bootstrap nodes. It might make sense to actually
# do so and to retry them only in case we drop to a really low amount of
# peers in the routing table.
debug "Message request to bootstrap node failed", src=d.localNode, dst=n
proc sendRequest*[T: SomeMessage](d: Protocol, toNode: Node, m: T,
reqId: RequestId) =
doAssert(toNode.address.isSome())
let
message = encodeMessage(m, reqId)
trace "Send message packet", dstId = toNode.id,
address = toNode.address, kind = messageKind(T)
dht_message_requests_outgoing.inc()
d.transport.sendMessage(toNode, message)
proc waitResponse*[T: SomeMessage](d: Protocol, node: Node, msg: T):
Future[Option[Message]] =
let reqId = RequestId.init(d.rng[])
result = d.waitMessage(node, reqId)
sendRequest(d, node, msg, reqId)
proc waitMessage(d: Protocol, fromNode: Node, reqId: RequestId, timeout = ResponseTimeout):
proc waitMessage(d: Protocol, fromNode: Node, reqId: RequestId):
Future[Option[Message]] =
result = newFuture[Option[Message]]("waitMessage")
let res = result
let key = (fromNode.id, reqId)
sleepAsync(timeout).addCallback() do(data: pointer):
sleepAsync(ResponseTimeout).addCallback() do(data: pointer):
d.awaitedMessages.del(key)
if not res.finished:
res.complete(none(Message))
d.awaitedMessages[key] = result
proc waitNodeResponses*[T: SomeMessage](d: Protocol, node: Node, msg: T):
Future[DiscResult[seq[SignedPeerRecord]]] =
let reqId = RequestId.init(d.rng[])
result = d.waitNodes(node, reqId)
sendRequest(d, node, msg, reqId)
proc waitNodes(d: Protocol, fromNode: Node, reqId: RequestId):
Future[DiscResult[seq[SignedPeerRecord]]] {.async.} =
## Wait for one or more nodes replies.
@ -505,70 +472,72 @@ proc waitNodes(d: Protocol, fromNode: Node, reqId: RequestId):
## on that, more replies will be awaited.
## If one reply is lost here (timed out), others are ignored too.
## Same counts for out of order receival.
let startTime = Moment.now()
var op = await d.waitMessage(fromNode, reqId)
if op.isSome:
if op.get.kind == MessageKind.nodes:
var res = op.get.nodes.sprs
let
total = op.get.nodes.total
firstTime = Moment.now()
rtt = firstTime - startTime
# trace "nodes RTT:", rtt, node = fromNode
fromNode.registerRtt(rtt)
let total = op.get.nodes.total
for i in 1 ..< total:
op = await d.waitMessage(fromNode, reqId)
if op.isSome and op.get.kind == MessageKind.nodes:
res.add(op.get.nodes.sprs)
# Estimate bandwidth based on UDP packet train received, assuming these were
# released fast and spaced in time by bandwidth bottleneck. This is just a rough
# packet-pair based estimate, far from being perfect.
# TODO: get message size from lower layer for better bandwidth estimate
# TODO: get better reception timestamp from lower layers
let
deltaT = Moment.now() - firstTime
bwBps = 500.0 * 8.0 / (deltaT.nanoseconds.float / i.float / 1e9)
# trace "bw estimate:", deltaT = deltaT, i, bw_mbps = bwBps / 1e6, node = fromNode
fromNode.registerBw(bwBps)
else:
# No error on this as we received some nodes.
break
return ok(res)
else:
dht_message_requests_outgoing.inc(labelValues = ["invalid_response"])
discovery_message_requests_outgoing.inc(labelValues = ["invalid_response"])
return err("Invalid response to find node message")
else:
dht_message_requests_outgoing.inc(labelValues = ["no_response"])
discovery_message_requests_outgoing.inc(labelValues = ["no_response"])
return err("Nodes message not received in time")
proc sendRequest*[T: SomeMessage](d: Protocol, toId: NodeId, toAddr: Address, m: T):
RequestId =
let
reqId = RequestId.init(d.rng[])
message = encodeMessage(m, reqId)
trace "Send message packet", dstId = toId, toAddr, kind = messageKind(T)
discovery_message_requests_outgoing.inc()
d.transport.sendMessage(toId, toAddr, message)
return reqId
proc sendRequest*[T: SomeMessage](d: Protocol, toNode: Node, m: T):
RequestId =
doAssert(toNode.address.isSome())
let
reqId = RequestId.init(d.rng[])
message = encodeMessage(m, reqId)
trace "Send message packet", dstId = toNode.id,
address = toNode.address, kind = messageKind(T)
discovery_message_requests_outgoing.inc()
d.transport.sendMessage(toNode, message)
return reqId
proc ping*(d: Protocol, toNode: Node):
Future[DiscResult[PongMessage]] {.async.} =
## Send a discovery ping message.
##
## Returns the received pong message or an error.
let
msg = PingMessage(sprSeq: d.localNode.record.seqNum)
startTime = Moment.now()
resp = await d.waitResponse(toNode, msg)
rtt = Moment.now() - startTime
# trace "ping RTT:", rtt, node = toNode
toNode.registerRtt(rtt)
let reqId = d.sendRequest(toNode,
PingMessage(sprSeq: d.localNode.record.seqNum))
let resp = await d.waitMessage(toNode, reqId)
d.routingTable.setJustSeen(toNode, resp.isSome())
if resp.isSome():
if resp.get().kind == pong:
d.routingTable.setJustSeen(toNode)
return ok(resp.get().pong)
else:
d.replaceNode(toNode)
dht_message_requests_outgoing.inc(labelValues = ["invalid_response"])
discovery_message_requests_outgoing.inc(labelValues = ["invalid_response"])
return err("Invalid response to ping message")
else:
# A ping (or the pong) was lost, what should we do? Previous implementation called
# d.replaceNode(toNode) immediately, which removed the node. This is too aggressive,
# especially if we have a temporary network outage. Although bootstrap nodes are protected
# from being removed, everything else would slowly be removed.
d.replaceNode(toNode, NoreplyRemoveThreshold)
dht_message_requests_outgoing.inc(labelValues = ["no_response"])
d.replaceNode(toNode)
discovery_message_requests_outgoing.inc(labelValues = ["no_response"])
return err("Pong message not received in time")
proc findNode*(d: Protocol, toNode: Node, distances: seq[uint16]):
@ -577,13 +546,12 @@ proc findNode*(d: Protocol, toNode: Node, distances: seq[uint16]):
##
## Returns the received nodes or an error.
## Received SPRs are already validated and converted to `Node`.
let
msg = FindNodeMessage(distances: distances)
nodes = await d.waitNodeResponses(toNode, msg)
let reqId = d.sendRequest(toNode, FindNodeMessage(distances: distances))
let nodes = await d.waitNodes(toNode, reqId)
d.routingTable.setJustSeen(toNode, nodes.isOk)
if nodes.isOk:
let res = verifyNodesRecords(nodes.get(), toNode, FindNodeResultLimit, distances)
d.routingTable.setJustSeen(toNode)
return ok(res)
else:
trace "findNode nodes not OK."
@ -596,13 +564,12 @@ proc findNodeFast*(d: Protocol, toNode: Node, target: NodeId):
##
## Returns the received nodes or an error.
## Received SPRs are already validated and converted to `Node`.
let
msg = FindNodeFastMessage(target: target)
nodes = await d.waitNodeResponses(toNode, msg)
let reqId = d.sendRequest(toNode, FindNodeFastMessage(target: target))
let nodes = await d.waitNodes(toNode, reqId)
d.routingTable.setJustSeen(toNode, nodes.isOk)
if nodes.isOk:
let res = verifyNodesRecords(nodes.get(), toNode, FindNodeFastResultLimit)
let res = verifyNodesRecords(nodes.get(), toNode, FindNodeResultLimit)
d.routingTable.setJustSeen(toNode)
return ok(res)
else:
d.replaceNode(toNode)
@ -614,26 +581,21 @@ proc talkReq*(d: Protocol, toNode: Node, protocol, request: seq[byte]):
## Send a discovery talkreq message.
##
## Returns the received talkresp message or an error.
let
msg = TalkReqMessage(protocol: protocol, request: request)
startTime = Moment.now()
resp = await d.waitResponse(toNode, msg)
rtt = Moment.now() - startTime
# trace "talk RTT:", rtt, node = toNode
toNode.registerRtt(rtt)
let reqId = d.sendRequest(toNode,
TalkReqMessage(protocol: protocol, request: request))
let resp = await d.waitMessage(toNode, reqId)
d.routingTable.setJustSeen(toNode, resp.isSome())
if resp.isSome():
if resp.get().kind == talkResp:
d.routingTable.setJustSeen(toNode)
return ok(resp.get().talkResp.response)
else:
d.replaceNode(toNode)
dht_message_requests_outgoing.inc(labelValues = ["invalid_response"])
discovery_message_requests_outgoing.inc(labelValues = ["invalid_response"])
return err("Invalid response to talk request message")
else:
# remove on loss only if there is a replacement
d.replaceNode(toNode, NoreplyRemoveThreshold)
dht_message_requests_outgoing.inc(labelValues = ["no_response"])
d.replaceNode(toNode)
discovery_message_requests_outgoing.inc(labelValues = ["no_response"])
return err("Talk response message not received in time")
proc lookupDistances*(target, dest: NodeId): seq[uint16] =
@ -648,18 +610,25 @@ proc lookupDistances*(target, dest: NodeId): seq[uint16] =
result.add(td - uint16(i))
inc i
proc lookupWorker(d: Protocol, destNode: Node, target: NodeId, fast: bool):
proc lookupWorker(d: Protocol, destNode: Node, target: NodeId):
Future[seq[Node]] {.async.} =
let dists = lookupDistances(target, destNode.id)
let r =
if fast:
await d.findNodeFast(destNode, target)
else:
# Instead of doing max `LookupRequestLimit` findNode requests, make use
# of the discv5.1 functionality to request nodes for multiple distances.
let dists = lookupDistances(target, destNode.id)
await d.findNode(destNode, dists)
# Instead of doing max `LookupRequestLimit` findNode requests, make use
# of the discv5.1 functionality to request nodes for multiple distances.
let r = await d.findNode(destNode, dists)
if r.isOk:
result.add(r[])
# Attempt to add all nodes discovered
for n in result:
discard d.addNode(n)
proc lookupWorkerFast(d: Protocol, destNode: Node, target: NodeId):
Future[seq[Node]] {.async.} =
## use terget NodeId based find_node
let r = await d.findNodeFast(destNode, target)
if r.isOk:
result.add(r[])
@ -673,7 +642,7 @@ proc lookup*(d: Protocol, target: NodeId, fast: bool = false): Future[seq[Node]]
# `closestNodes` holds the k closest nodes to target found, sorted by distance
# Unvalidated nodes are used for requests as a form of validation.
var closestNodes = d.routingTable.neighbours(target, BUCKET_SIZE,
LookupSeenThreshold)
seenOnly = false)
var asked, seen = initHashSet[NodeId]()
asked.incl(d.localNode.id) # No need to ask our own node
@ -690,7 +659,10 @@ proc lookup*(d: Protocol, target: NodeId, fast: bool = false): Future[seq[Node]]
while i < closestNodes.len and pendingQueries.len < Alpha:
let n = closestNodes[i]
if not asked.containsOrIncl(n.id):
pendingQueries.add(d.lookupWorker(n, target, fast))
if fast:
pendingQueries.add(d.lookupWorkerFast(n, target))
else:
pendingQueries.add(d.lookupWorker(n, target))
inc i
trace "discv5 pending queries", total = pendingQueries.len
@ -735,8 +707,7 @@ proc addProvider*(
res.add(d.localNode)
for toNode in res:
if toNode != d.localNode:
let reqId = RequestId.init(d.rng[])
d.sendRequest(toNode, AddProviderMessage(cId: cId, prov: pr), reqId)
discard d.sendRequest(toNode, AddProviderMessage(cId: cId, prov: pr))
else:
asyncSpawn d.addProviderLocal(cId, pr)
@ -749,21 +720,22 @@ proc sendGetProviders(d: Protocol, toNode: Node,
trace "sendGetProviders", toNode, msg
let
resp = await d.waitResponse(toNode, msg)
reqId = d.sendRequest(toNode, msg)
resp = await d.waitMessage(toNode, reqId)
d.routingTable.setJustSeen(toNode, resp.isSome())
if resp.isSome():
if resp.get().kind == MessageKind.providers:
d.routingTable.setJustSeen(toNode)
return ok(resp.get().provs)
else:
# TODO: do we need to do something when there is an invalid response?
d.replaceNode(toNode)
dht_message_requests_outgoing.inc(labelValues = ["invalid_response"])
discovery_message_requests_outgoing.inc(labelValues = ["invalid_response"])
return err("Invalid response to GetProviders message")
else:
# remove on loss only if there is a replacement
d.replaceNode(toNode, NoreplyRemoveThreshold)
dht_message_requests_outgoing.inc(labelValues = ["no_response"])
# TODO: do we need to do something when there is no response?
d.replaceNode(toNode)
discovery_message_requests_outgoing.inc(labelValues = ["no_response"])
return err("GetProviders response message not received in time")
proc getProvidersLocal*(
@ -836,7 +808,7 @@ proc query*(d: Protocol, target: NodeId, k = BUCKET_SIZE): Future[seq[Node]]
## This will take k nodes from the routing table closest to target and
## query them for nodes closest to target. If there are less than k nodes in
## the routing table, nodes returned by the first queries will be used.
var queryBuffer = d.routingTable.neighbours(target, k, QuerySeenThreshold)
var queryBuffer = d.routingTable.neighbours(target, k, seenOnly = false)
var asked, seen = initHashSet[NodeId]()
asked.incl(d.localNode.id) # No need to ask our own node
@ -851,7 +823,7 @@ proc query*(d: Protocol, target: NodeId, k = BUCKET_SIZE): Future[seq[Node]]
while i < min(queryBuffer.len, k) and pendingQueries.len < Alpha:
let n = queryBuffer[i]
if not asked.containsOrIncl(n.id):
pendingQueries.add(d.lookupWorker(n, target, false))
pendingQueries.add(d.lookupWorker(n, target))
inc i
trace "discv5 pending queries", total = pendingQueries.len
@ -962,8 +934,7 @@ proc revalidateNode*(d: Protocol, n: Node) {.async.} =
discard d.addNode(nodes[][0])
# Get IP and port from pong message and add it to the ip votes
trace "pong rx", n, myip = res.ip, myport = res.port
let a = Address(ip: res.ip, port: Port(res.port))
let a = Address(ip: ValidIpAddress.init(res.ip), port: Port(res.port))
d.ipVote.insert(n.id, a)
proc revalidateLoop(d: Protocol) {.async.} =
@ -1033,7 +1004,7 @@ proc ipMajorityLoop(d: Protocol) {.async.} =
warn "Failed updating SPR with newly discovered external address",
majority, previous, error = res.error
else:
dht_enr_auto_update.inc()
discovery_enr_auto_update.inc()
info "Updated SPR with newly discovered external address",
majority, previous, uri = toURI(d.localNode.record)
else:
@ -1048,19 +1019,6 @@ proc ipMajorityLoop(d: Protocol) {.async.} =
trace "ipMajorityLoop canceled"
trace "ipMajorityLoop exited!"
proc debugPrintLoop(d: Protocol) {.async.} =
## Loop which prints the neighborhood with stats
while true:
await sleepAsync(DebugPrintInterval)
for b in d.routingTable.buckets:
debug "bucket", depth = b.getDepth,
len = b.nodes.len, standby = b.replacementLen
for n in b.nodes:
debug "node", n, rttMin = n.stats.rttMin.int, rttAvg = n.stats.rttAvg.int,
reliability = n.seen.round(3)
# bandwidth estimates are based on limited information, so not logging it yet to avoid confusion
# trace "node", n, bwMaxMbps = (n.stats.bwMax / 1e6).round(3), bwAvgMbps = (n.stats.bwAvg / 1e6).round(3)
func init*(
T: type DiscoveryConfig,
tableIpLimit: uint,
@ -1076,7 +1034,7 @@ func init*(
proc newProtocol*(
privKey: PrivateKey,
enrIp: Option[IpAddress],
enrIp: Option[ValidIpAddress],
enrTcpPort, enrUdpPort: Option[Port],
localEnrFields: openArray[(string, seq[byte])] = [],
bootstrapRecords: openArray[SignedPeerRecord] = [],
@ -1198,7 +1156,6 @@ proc start*(d: Protocol) {.async.} =
d.refreshLoop = refreshLoop(d)
d.revalidateLoop = revalidateLoop(d)
d.ipMajorityLoop = ipMajorityLoop(d)
d.debugPrintLoop = debugPrintLoop(d)
await d.providers.start()

View File

@ -1,17 +1,16 @@
# logos-storage-dht - Logos Storage DHT
# codex-dht - Codex DHT
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
{.push raises: [Defect].}
import std/sequtils
import pkg/chronicles
import pkg/libp2p
import pkg/questionable
import ../node
import ../lru
@ -36,21 +35,22 @@ type
func add*(
self: var ProvidersCache,
id: NodeId,
record: SignedPeerRecord) =
## Add providers for an id
## to the cache
provider: SignedPeerRecord) =
if self.disable:
return
without var providers =? self.cache.get(id):
providers = Providers.init(self.maxProviders.int)
var providers =
if id notin self.cache:
Providers.init(self.maxProviders.int)
else:
self.cache.get(id).get()
let
peerId = record.data.peerId
peerId = provider.data.peerId
trace "Adding provider record to cache", id, peerId
providers.put(peerId, record)
trace "Adding provider to cache", id, peerId
providers.put(peerId, provider)
self.cache.put(id, providers)
proc get*(
@ -58,55 +58,39 @@ proc get*(
id: NodeId,
start = 0,
stop = MaxProvidersPerEntry.int): seq[SignedPeerRecord] =
## Get providers for an id
## from the cache
if self.disable:
return
if recs =? self.cache.get(id):
if id in self.cache:
let
recs = self.cache.get(id).get
let
providers = toSeq(recs)[start..<min(recs.len, stop)]
trace "Providers already cached", id, len = providers.len
return providers
func remove*(
self: var ProvidersCache,
peerId: PeerId) =
## Remove a provider record from an id
## from the cache
##
if self.disable:
return
for id in self.cache.keys:
if var providers =? self.cache.get(id):
trace "Removing provider from cache", id, peerId
providers.del(peerId)
self.cache.put(id, providers)
func remove*(
self: var ProvidersCache,
id: NodeId,
peerId: PeerId) =
## Remove a provider record from an id
## from the cache
##
if self.disable:
return
if var providers =? self.cache.get(id):
trace "Removing record from cache", id
providers.del(peerId)
self.cache.put(id, providers)
if id notin self.cache:
return
var
providers = self.cache.get(id).get()
trace "Removing provider from cache", id
providers.del(peerId)
self.cache.put(id, providers)
func drop*(self: var ProvidersCache, id: NodeId) =
## Drop all the providers for an entry
##
if self.disable:
return

View File

@ -1,11 +1,11 @@
# logos-storage-dht - Logos Storage DHT
# codex-dht - Codex DHT
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
{.push raises: [Defect].}
import std/sequtils
import std/strutils

View File

@ -1,17 +1,15 @@
# logos-storage-dht - Logos Storage DHT
# codex-dht - Codex DHT
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
{.push raises: [Defect].}
import std/options
import std/sequtils
from std/times import now, utc, toTime, toUnix
import pkg/stew/endians2
import pkg/chronos
import pkg/libp2p
import pkg/datastore
@ -23,13 +21,16 @@ import ./common
const
ExpiredCleanupBatch* = 1000
CleanupInterval* = 24.hours
CleanupInterval* = 5.minutes
proc cleanupExpired*(
store: Datastore,
batchSize = ExpiredCleanupBatch) {.async.} =
trace "Cleaning up expired records"
let
now = Moment.now()
let
q = Query.init(CidKey, limit = batchSize)
@ -46,13 +47,11 @@ proc cleanupExpired*(
var
keys = newSeq[Key]()
let
now = times.now().utc().toTime().toUnix()
for item in iter:
if (maybeKey, data) =? (await item) and key =? maybeKey:
if pair =? (await item) and pair.key.isSome:
let
expired = endians2.fromBytesBE(uint64, data).int64
(key, data) = (pair.key.get(), pair.data)
expired = Moment.init(uint64.fromBytesBE(data).int64, Microsecond)
if now >= expired:
trace "Found expired record", key
@ -75,7 +74,7 @@ proc cleanupOrphaned*(
trace "Cleaning up orphaned records"
let
providersQuery = Query.init(ProvidersKey, limit = batchSize, value = false)
providersQuery = Query.init(ProvidersKey, limit = batchSize)
block:
without iter =? (await store.query(providersQuery)), err:
@ -84,7 +83,7 @@ proc cleanupOrphaned*(
defer:
if not isNil(iter):
trace "Cleaning up orphaned query iterator"
trace "Cleaning up query iterator"
discard (await iter.dispose())
var count = 0
@ -93,7 +92,10 @@ proc cleanupOrphaned*(
trace "Batch cleaned up", size = batchSize
count.inc
if (maybeKey, _) =? (await item) and key =? maybeKey:
if pair =? (await item) and pair.key.isSome:
let
key = pair.key.get()
without peerId =? key.fromProvKey(), err:
trace "Error extracting parts from cid key", key
continue
@ -102,17 +104,15 @@ proc cleanupOrphaned*(
trace "Error building cid key", err = err.msg
continue
without cidIter =? (await store.query(Query.init(cidKey, limit = 1, value = false))), err:
trace "Error querying key", cidKey, err = err.msg
without cidIter =? (await store.query(Query.init(cidKey, limit = 1))), err:
trace "Error querying key", cidKey
continue
let
res = block:
var count = 0
for item in cidIter:
if (key, _) =? (await item) and key.isSome:
count.inc
count
res = (await allFinished(toSeq(cidIter)))
.filterIt( it.completed )
.mapIt( it.read.get )
.filterIt( it.key.isSome ).len
if not isNil(cidIter):
trace "Disposing cid iter"

View File

@ -1,4 +1,4 @@
# logos-storage-dht - Logos Storage DHT
# codex-dht - Codex DHT
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
@ -7,18 +7,17 @@
import std/sequtils
import std/strutils
from std/times import now, utc, toTime, toUnix
import pkg/stew/endians2
import pkg/datastore
import pkg/chronos
import pkg/libp2p
import pkg/chronicles
import pkg/stew/results as rs
import pkg/stew/byteutils
import pkg/questionable
import pkg/questionable/results
{.push raises: [].}
{.push raises: [Defect].}
import ./maintenance
import ./cache
@ -57,30 +56,30 @@ proc getProvByKey*(self: ProvidersManager, key: Key): Future[?!SignedPeerRecord]
proc add*(
self: ProvidersManager,
id: NodeId,
cid: NodeId,
provider: SignedPeerRecord,
ttl = ZeroDuration): Future[?!void] {.async.} =
let
peerId = provider.data.peerId
trace "Adding provider to persistent store", id, peerId
trace "Adding provider to persistent store", cid, peerId
without provKey =? makeProviderKey(peerId), err:
trace "Error creating key from provider record", err = err.msg
return failure err.msg
without cidKey =? makeCidKey(id, peerId), err:
without cidKey =? makeCidKey(cid, peerId), err:
trace "Error creating key from content id", err = err.msg
return failure err.msg
let
now = times.now().utc().toTime().toUnix()
expires =
if ttl > ZeroDuration:
ttl.seconds + now
ttl
else:
self.ttl.seconds + now
ttl = endians2.toBytesBE(expires.uint64)
Moment.fromNow(self.ttl) - ZeroMoment
ttl = expires.microseconds.uint64.toBytesBE
bytes: seq[byte] =
if existing =? (await self.getProvByKey(provKey)) and
@ -94,17 +93,17 @@ proc add*(
bytes
if bytes.len > 0:
trace "Adding or updating provider record", id, peerId
trace "Adding or updating provider record", cid, peerId
if err =? (await self.store.put(provKey, bytes)).errorOption:
trace "Unable to store provider with key", key = provKey, err = err.msg
trace "Adding or updating id", id, key = cidKey, ttl = expires.seconds
trace "Adding or updating cid", cid, key = cidKey, ttl = expires.minutes
if err =? (await self.store.put(cidKey, @ttl)).errorOption:
trace "Unable to store provider with key", key = cidKey, err = err.msg
return
self.cache.add(id, provider)
trace "Provider for id added", cidKey, provKey
self.cache.add(cid, provider)
trace "Provider for cid added", cidKey, provKey
return success()
proc get*(
@ -137,10 +136,12 @@ proc get*(
trace "Cleaning up query iterator"
discard (await cidIter.dispose())
var keys: seq[Key]
for item in cidIter:
# TODO: =? doesn't support tuples
if (maybeKey, val) =? (await item) and key =? maybeKey:
if pair =? (await item) and pair.key.isSome:
let
(key, val) = (pair.key.get, pair.data)
without pairs =? key.fromCidKey() and
provKey =? makeProviderKey(pairs.peerId), err:
trace "Error creating key from provider record", err = err.msg
@ -149,24 +150,17 @@ proc get*(
trace "Querying provider key", key = provKey
without data =? (await self.store.get(provKey)):
trace "Error getting provider", key = provKey
keys.add(key)
continue
without provider =? SignedPeerRecord.decode(data).mapErr(mapFailure), err:
trace "Unable to decode provider from store", err = err.msg
keys.add(key)
continue
trace "Retrieved provider with key", key = provKey
providers.add(provider)
self.cache.add(id, provider)
trace "Deleting keys without provider from store", len = keys.len
if keys.len > 0 and err =? (await self.store.delete(keys)).errorOption:
trace "Error deleting records from persistent store", err = err.msg
return failure err
trace "Retrieved providers from persistent store", id = id, len = providers.len
trace "Retrieved providers from persistent store", cid = id, len = providers.len
return success providers
proc contains*(
@ -184,8 +178,8 @@ proc contains*(self: ProvidersManager, peerId: PeerId): Future[bool] {.async.} =
return (await self.store.has(provKey)) |? false
proc contains*(self: ProvidersManager, id: NodeId): Future[bool] {.async.} =
without cidKey =? (CidKey / $id), err:
proc contains*(self: ProvidersManager, cid: NodeId): Future[bool] {.async.} =
without cidKey =? (CidKey / $cid), err:
return false
let
@ -202,15 +196,15 @@ proc contains*(self: ProvidersManager, id: NodeId): Future[bool] {.async.} =
discard (await iter.dispose())
for item in iter:
if (key, _) =? (await item) and key.isSome:
if pair =? (await item) and pair.key.isSome:
return true
return false
proc remove*(self: ProvidersManager, id: NodeId): Future[?!void] {.async.} =
proc remove*(self: ProvidersManager, cid: NodeId): Future[?!void] {.async.} =
self.cache.drop(id)
without cidKey =? (CidKey / $id), err:
self.cache.drop(cid)
without cidKey =? (CidKey / $cid), err:
return failure(err.msg)
let
@ -230,14 +224,16 @@ proc remove*(self: ProvidersManager, id: NodeId): Future[?!void] {.async.} =
keys: seq[Key]
for item in iter:
if (maybeKey, _) =? (await item) and key =? maybeKey:
if pair =? (await item) and pair.key.isSome:
let
key = pair.key.get()
keys.add(key)
without pairs =? key.fromCidKey, err:
trace "Unable to parse peer id from key", key
return failure err
self.cache.remove(id, pairs.peerId)
self.cache.remove(cid, pairs.peerId)
trace "Deleted record from store", key
if keys.len > 0 and err =? (await self.store.delete(keys)).errorOption:
@ -246,60 +242,57 @@ proc remove*(self: ProvidersManager, id: NodeId): Future[?!void] {.async.} =
return success()
proc remove*(
self: ProvidersManager,
peerId: PeerId,
entries = false): Future[?!void] {.async.} =
if entries:
without cidKey =? (CidKey / "*" / $peerId), err:
return failure err
let
q = Query.init(cidKey)
block:
without iter =? (await self.store.query(q)), err:
trace "Unable to obtain record for key", key = cidKey
return failure err
defer:
if not isNil(iter):
trace "Cleaning up query iterator"
discard (await iter.dispose())
var
keys: seq[Key]
for item in iter:
if (maybeKey, _) =? (await item) and key =? maybeKey:
keys.add(key)
let
parts = key.id.split(datastore.Separator)
if keys.len > 0 and err =? (await self.store.delete(keys)).errorOption:
trace "Error deleting record from persistent store", err = err.msg
return failure err
trace "Deleted records from store"
without provKey =? peerId.makeProviderKey, err:
proc remove*(self: ProvidersManager, peerId: PeerId): Future[?!void] {.async.} =
without cidKey =? (CidKey / "*" / $peerId), err:
return failure err
trace "Removing provider from cache", peerId
self.cache.remove(peerId)
let
q = Query.init(cidKey)
block:
without iter =? (await self.store.query(q)), err:
trace "Unable to obtain record for key", key = cidKey
return failure err
defer:
if not isNil(iter):
trace "Cleaning up query iterator"
discard (await iter.dispose())
var
keys: seq[Key]
for item in iter:
if pair =? (await item) and pair.key.isSome:
let
key = pair.key.get()
keys.add(key)
let
parts = key.id.split(datastore.Separator)
self.cache.remove(NodeId.fromHex(parts[2]), peerId)
if keys.len > 0 and err =? (await self.store.delete(keys)).errorOption:
trace "Error deleting record from persistent store", err = err.msg
return failure err
trace "Deleted records from store"
without provKey =? makeProviderKey(peerId), err:
return failure err
trace "Removing provider record", key = provKey
return (await self.store.delete(provKey))
proc remove*(
self: ProvidersManager,
id: NodeId,
cid: NodeId,
peerId: PeerId): Future[?!void] {.async.} =
self.cache.remove(id, peerId)
without cidKey =? makeCidKey(id, peerId), err:
self.cache.remove(cid, peerId)
without cidKey =? makeCidKey(cid, peerId), err:
trace "Error creating key from content id", err = err.msg
return failure err.msg

View File

@ -1,26 +1,21 @@
# logos-storage-dht - Logos Storage DHT
# codex-dht - Codex DHT
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
{.push raises: [Defect].}
import
std/[algorithm, net, times, sequtils, bitops, sets, options, tables],
stint, chronicles, metrics, bearssl/rand, chronos,
std/[algorithm, times, sequtils, bitops, sets, options, tables],
stint, chronicles, metrics, bearssl/rand, chronos, stew/shims/net as stewNet,
"."/[node, random2, spr]
export options
declarePublicGauge dht_routing_table_nodes,
declarePublicGauge routing_table_nodes,
"Discovery routing table nodes", labels = ["state"]
declarePublicGauge dht_routing_table_buckets,
"Discovery routing table: number of buckets"
logScope:
topics = "discv5 routingtable"
type
DistanceProc* = proc(a, b: NodeId): NodeId {.raises: [Defect], gcsafe, noSideEffect.}
@ -34,7 +29,7 @@ type
IpLimits* = object
limit*: uint
ips: Table[IpAddress, uint]
ips: Table[ValidIpAddress, uint]
RoutingTable* = object
@ -101,7 +96,7 @@ type
ReplacementExisting
NoAddress
func inc*(ipLimits: var IpLimits, ip: IpAddress): bool =
func inc*(ipLimits: var IpLimits, ip: ValidIpAddress): bool =
let val = ipLimits.ips.getOrDefault(ip, 0)
if val < ipLimits.limit:
ipLimits.ips[ip] = val + 1
@ -109,7 +104,7 @@ func inc*(ipLimits: var IpLimits, ip: IpAddress): bool =
else:
false
func dec*(ipLimits: var IpLimits, ip: IpAddress) =
func dec*(ipLimits: var IpLimits, ip: ValidIpAddress) =
let val = ipLimits.ips.getOrDefault(ip, 0)
if val == 1:
ipLimits.ips.del(ip)
@ -182,8 +177,6 @@ proc midpoint(k: KBucket): NodeId =
proc len(k: KBucket): int = k.nodes.len
proc replacementLen*(k: KBucket): int = k.replacementCache.len
proc tail(k: KBucket): Node = k.nodes[high(k.nodes)]
proc ipLimitInc(r: var RoutingTable, b: KBucket, n: Node): bool =
@ -212,14 +205,14 @@ proc ipLimitDec(r: var RoutingTable, b: KBucket, n: Node) =
proc add(k: KBucket, n: Node) =
k.nodes.add(n)
dht_routing_table_nodes.inc()
routing_table_nodes.inc()
proc remove(k: KBucket, n: Node): bool =
let i = k.nodes.find(n)
if i != -1:
dht_routing_table_nodes.dec()
if alreadySeen(k.nodes[i]):
dht_routing_table_nodes.dec(labelValues = ["seen"])
routing_table_nodes.dec()
if k.nodes[i].seen:
routing_table_nodes.dec(labelValues = ["seen"])
k.nodes.delete(i)
trace "removed node:", node = n
true
@ -285,15 +278,11 @@ proc computeSharedPrefixBits(nodes: openArray[NodeId]): int =
# Reaching this would mean that all node ids are equal.
doAssert(false, "Unable to calculate number of shared prefix bits")
proc getDepth*(b: KBucket) : int =
computeSharedPrefixBits(@[b.istart, b.iend])
proc init*(T: type RoutingTable, localNode: Node, bitsPerHop = DefaultBitsPerHop,
ipLimits = DefaultTableIpLimits, rng: ref HmacDrbgContext,
distanceCalculator = XorDistanceCalculator): T =
## Initialize the routing table for provided `Node` and bitsPerHop value.
## `bitsPerHop` is default set to 5 as recommended by original Kademlia paper.
dht_routing_table_buckets.inc()
RoutingTable(
localNode: localNode,
buckets: @[KBucket.new(0.u256, high(UInt256), ipLimits.bucketIpLimit)],
@ -307,7 +296,6 @@ proc splitBucket(r: var RoutingTable, index: int) =
let (a, b) = bucket.split()
r.buckets[index] = a
r.buckets.insert(b, index + 1)
dht_routing_table_buckets.inc()
proc bucketForNode(r: RoutingTable, id: NodeId): KBucket =
result = binaryGetBucketForNode(r.buckets, id)
@ -329,12 +317,15 @@ proc addReplacement(r: var RoutingTable, k: KBucket, n: Node): NodeStatus =
# gets moved to the tail.
if k.replacementCache[nodeIdx].address.get().ip != n.address.get().ip:
if not ipLimitInc(r, k, n):
trace "replace: ip limit reached"
return IpLimitReached
ipLimitDec(r, k, k.replacementCache[nodeIdx])
k.replacementCache.delete(nodeIdx)
k.replacementCache.add(n)
trace "replace: already existed"
return ReplacementExisting
elif not ipLimitInc(r, k, n):
trace "replace: ip limit reached (2)"
return IpLimitReached
else:
doAssert(k.replacementCache.len <= REPLACEMENT_CACHE_SIZE)
@ -345,7 +336,7 @@ proc addReplacement(r: var RoutingTable, k: KBucket, n: Node): NodeStatus =
k.replacementCache.delete(0)
k.replacementCache.add(n)
debug "Node added to replacement cache", n
trace "replace: added"
return ReplacementAdded
proc addNode*(r: var RoutingTable, n: Node): NodeStatus =
@ -412,50 +403,42 @@ proc addNode*(r: var RoutingTable, n: Node): NodeStatus =
return IpLimitReached
bucket.add(n)
debug "Node added to routing table", n
return Added
else:
# Bucket must be full, but lets see if it should be split the bucket.
# Bucket must be full, but lets see if it should be split the bucket.
# Calculate the prefix shared by all nodes in the bucket's range, not the
# ones actually in the bucket.
let depth = computeSharedPrefixBits(@[bucket.istart, bucket.iend])
# Split if the bucket has the local node in its range or if the depth is not
# congruent to 0 mod `bitsPerHop`
if bucket.inRange(r.localNode) or
(depth mod r.bitsPerHop != 0 and depth != ID_SIZE):
r.splitBucket(r.buckets.find(bucket))
return r.addNode(n) # retry adding
# When bucket doesn't get split the node is added to the replacement cache
return r.addReplacement(bucket, n)
# Calculate the prefix shared by all nodes in the bucket's range, not the
# ones actually in the bucket.
let depth = computeSharedPrefixBits(@[bucket.istart, bucket.iend])
# Split if the bucket has the local node in its range or if the depth is not
# congruent to 0 mod `bitsPerHop`
if bucket.inRange(r.localNode) or
(depth mod r.bitsPerHop != 0 and depth != ID_SIZE):
r.splitBucket(r.buckets.find(bucket))
return r.addNode(n) # retry adding
else:
# When bucket doesn't get split the node is added to the replacement cache
return r.addReplacement(bucket, n)
proc removeNode*(r: var RoutingTable, n: Node) =
## Remove the node `n` from the routing table.
## No replemennt added, even if there is in replacement cache.
let b = r.bucketForNode(n.id)
if b.remove(n):
ipLimitDec(r, b, n)
proc replaceNode*(r: var RoutingTable, n: Node, forceRemoveBelow = 1.0) =
proc replaceNode*(r: var RoutingTable, n: Node) =
## Replace node `n` with last entry in the replacement cache. If there are
## no entries in the replacement cache, node `n` will either be removed
## or kept based on `forceRemoveBelow`. Default: remove.
## Note: Kademlia paper recommends here to not remove nodes if there are no
## replacements. This might mean pinging nodes that are not reachable, but
## also avoids being too agressive because UDP losses or temporary network
## failures.
## no entries in the replacement cache, node `n` will simply be removed.
# TODO: Kademlia paper recommends here to not remove nodes if there are no
# replacements. However, that would require a bit more complexity in the
# revalidation as you don't want to try pinging that node all the time.
let b = r.bucketForNode(n.id)
if (b.replacementCache.len > 0 or n.seen <= forceRemoveBelow):
if b.remove(n):
debug "Node removed from routing table", n
ipLimitDec(r, b, n)
if b.remove(n):
ipLimitDec(r, b, n)
if b.replacementCache.len > 0:
# Nodes in the replacement cache are already included in the ip limits.
let rn = b.replacementCache[high(b.replacementCache)]
b.add(rn)
b.replacementCache.delete(high(b.replacementCache))
debug "Node added to routing table from replacement cache", node=rn
if b.replacementCache.len > 0:
# Nodes in the replacement cache are already included in the ip limits.
b.add(b.replacementCache[high(b.replacementCache)])
b.replacementCache.delete(high(b.replacementCache))
proc getNode*(r: RoutingTable, id: NodeId): Option[Node] =
## Get the `Node` with `id` as `NodeId` from the routing table.
@ -476,16 +459,16 @@ proc nodesByDistanceTo(r: RoutingTable, k: KBucket, id: NodeId): seq[Node] =
sortedByIt(k.nodes, r.distance(it.id, id))
proc neighbours*(r: RoutingTable, id: NodeId, k: int = BUCKET_SIZE,
seenThreshold = 0.0): seq[Node] =
seenOnly = false): seq[Node] =
## Return up to k neighbours of the given node id.
## When seenThreshold is set, only nodes that have been contacted
## previously successfully and were seen enough recently will be selected.
## When seenOnly is set to true, only nodes that have been contacted
## previously successfully will be selected.
result = newSeqOfCap[Node](k * 2)
block addNodes:
for bucket in r.bucketsByDistanceTo(id):
for n in r.nodesByDistanceTo(bucket, id):
# Avoid nodes with 'seen' value below threshold
if n.seen >= seenThreshold:
# Only provide actively seen nodes when `seenOnly` set.
if not seenOnly or n.seen:
result.add(n)
if result.len == k * 2:
break addNodes
@ -497,22 +480,22 @@ proc neighbours*(r: RoutingTable, id: NodeId, k: int = BUCKET_SIZE,
result.setLen(k)
proc neighboursAtDistance*(r: RoutingTable, distance: uint16,
k: int = BUCKET_SIZE, seenThreshold = 0.0): seq[Node] =
k: int = BUCKET_SIZE, seenOnly = false): seq[Node] =
## Return up to k neighbours at given logarithmic distance.
result = r.neighbours(r.idAtDistance(r.localNode.id, distance), k, seenThreshold)
result = r.neighbours(r.idAtDistance(r.localNode.id, distance), k, seenOnly)
# This is a bit silly, first getting closest nodes then to only keep the ones
# that are exactly the requested distance.
keepIf(result, proc(n: Node): bool = r.logDistance(n.id, r.localNode.id) == distance)
proc neighboursAtDistances*(r: RoutingTable, distances: seq[uint16],
k: int = BUCKET_SIZE, seenThreshold = 0.0): seq[Node] =
k: int = BUCKET_SIZE, seenOnly = false): seq[Node] =
## Return up to k neighbours at given logarithmic distances.
# TODO: This will currently return nodes with neighbouring distances on the
# first one prioritize. It might end up not including all the node distances
# requested. Need to rework the logic here and not use the neighbours call.
if distances.len > 0:
result = r.neighbours(r.idAtDistance(r.localNode.id, distances[0]), k,
seenThreshold)
seenOnly)
# This is a bit silly, first getting closest nodes then to only keep the ones
# that are exactly the requested distances.
keepIf(result, proc(n: Node): bool =
@ -524,30 +507,23 @@ proc len*(r: RoutingTable): int =
proc moveRight[T](arr: var openArray[T], a, b: int) =
## In `arr` move elements in range [a, b] right by 1.
var t: T
when declared(shallowCopy):
shallowCopy(t, arr[b + 1])
for i in countdown(b, a):
shallowCopy(arr[i + 1], arr[i])
shallowCopy(arr[a], t)
else:
t = move arr[b + 1]
for i in countdown(b, a):
arr[i + 1] = move arr[i]
arr[a] = move t
shallowCopy(t, arr[b + 1])
for i in countdown(b, a):
shallowCopy(arr[i + 1], arr[i])
shallowCopy(arr[a], t)
proc setJustSeen*(r: RoutingTable, n: Node, seen = true) =
## If seen, move `n` to the head (most recently seen) of its bucket.
proc setJustSeen*(r: RoutingTable, n: Node) =
## Move `n` to the head (most recently seen) of its bucket.
## If `n` is not in the routing table, do nothing.
let b = r.bucketForNode(n.id)
if seen:
let idx = b.nodes.find(n)
if idx >= 0:
if idx != 0:
b.nodes.moveRight(0, idx - 1)
let idx = b.nodes.find(n)
if idx >= 0:
if idx != 0:
b.nodes.moveRight(0, idx - 1)
if not alreadySeen(n): # first time seeing the node
dht_routing_table_nodes.inc(labelValues = ["seen"])
n.registerSeen(seen)
if not n.seen:
b.nodes[0].seen = true
routing_table_nodes.inc(labelValues = ["seen"])
proc nodeToRevalidate*(r: RoutingTable): Node =
## Return a node to revalidate. The least recently seen node from a random
@ -561,7 +537,7 @@ proc nodeToRevalidate*(r: RoutingTable): Node =
return b.nodes[^1]
proc randomNodes*(r: RoutingTable, maxAmount: int,
pred: proc(x: Node): bool {.gcsafe, noSideEffect, raises: [].} = nil): seq[Node] =
pred: proc(x: Node): bool {.gcsafe, noSideEffect.} = nil): seq[Node] =
## Get a `maxAmount` of random nodes from the routing table with the `pred`
## predicate function applied as filter on the nodes selected.
var maxAmount = maxAmount
@ -584,8 +560,7 @@ proc randomNodes*(r: RoutingTable, maxAmount: int,
# while it will take less total time compared to e.g. an (async)
# randomLookup, the time might be wasted as all nodes are possibly seen
# already.
# We check against the number of nodes to avoid an infinite loop in case of a filter.
while len(result) < maxAmount and len(seen) < sz:
while len(seen) < maxAmount:
let bucket = r.rng[].sample(r.buckets)
if bucket.nodes.len != 0:
let node = r.rng[].sample(bucket.nodes)

View File

@ -0,0 +1,62 @@
# codex-dht - Codex DHT
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
#
## Session cache as mentioned at
## https://github.com/ethereum/devp2p/blob/master/discv5/discv5-theory.md#session-cache
##
{.push raises: [Defect].}
import
std/options,
stint, stew/endians2, stew/shims/net,
node, lru
export lru
const
aesKeySize* = 128 div 8
keySize = sizeof(NodeId) +
16 + # max size of ip address (ipv6)
2 # Sizeof port
type
AesKey* = array[aesKeySize, byte]
SessionKey* = array[keySize, byte]
SessionValue* = array[sizeof(AesKey) + sizeof(AesKey), byte]
Sessions* = LRUCache[SessionKey, SessionValue]
func makeKey(id: NodeId, address: Address): SessionKey =
var pos = 0
result[pos ..< pos+sizeof(id)] = toBytes(id)
pos.inc(sizeof(id))
case address.ip.family
of IpAddressFamily.IpV4:
result[pos ..< pos+sizeof(address.ip.address_v4)] = address.ip.address_v4
of IpAddressFamily.IpV6:
result[pos ..< pos+sizeof(address.ip.address_v6)] = address.ip.address_v6
pos.inc(sizeof(address.ip.address_v6))
result[pos ..< pos+sizeof(address.port)] = toBytes(address.port.uint16)
func store*(s: var Sessions, id: NodeId, address: Address, r, w: AesKey) =
var value: array[sizeof(r) + sizeof(w), byte]
value[0 .. 15] = r
value[16 .. ^1] = w
s.put(makeKey(id, address), value)
func load*(s: var Sessions, id: NodeId, address: Address, r, w: var AesKey): bool =
let res = s.get(makeKey(id, address))
if res.isSome():
let val = res.get()
copyMem(addr r[0], unsafeAddr val[0], sizeof(r))
copyMem(addr w[0], unsafeAddr val[sizeof(r)], sizeof(w))
return true
else:
return false
func del*(s: var Sessions, id: NodeId, address: Address) =
s.del(makeKey(id, address))

View File

@ -6,10 +6,10 @@
#
import
chronicles,
results,
std/[net, options, strutils, sugar],
pkg/stew/[byteutils, arrayops],
std/[options, strutils, sugar],
pkg/stew/[results, byteutils, arrayops],
stew/endians2,
stew/shims/net,
stew/base64,
libp2p/crypto/crypto,
libp2p/crypto/secp,
@ -58,7 +58,7 @@ proc incSeqNo*(
proc update*(
r: var SignedPeerRecord,
pk: crypto.PrivateKey,
ip: Option[IpAddress],
ip: Option[ValidIpAddress],
tcpPort, udpPort: Option[Port] = none[Port]()):
RecordResult[void] =
## Update a `SignedPeerRecord` with given ip address, tcp port, udp port and optional
@ -97,8 +97,9 @@ proc update*(
if udpPort.isNone and tcpPort.isNone:
return err "No existing address in SignedPeerRecord with no port provided"
let ipAddr = ip.get
let ipAddr = try: ValidIpAddress.init(ip.get)
except ValueError as e:
return err ("Existing address contains invalid address: " & $e.msg).cstring
if tcpPort.isSome:
transProto = IpTransportProtocol.tcpProtocol
transProtoPort = tcpPort.get
@ -122,13 +123,9 @@ proc update*(
.mapErr((e: string) => e.cstring)
existingIp =
if existingNetProtoFam == MultiCodec.codec("ip6"):
IpAddress(
family: IPv6, address_v6: array[16, byte].initCopyFrom(existingNetProtoAddr)
)
ipv6 array[16, byte].initCopyFrom(existingNetProtoAddr)
else:
IpAddress(
family: IPv4, address_v4: array[4, byte].initCopyFrom(existingNetProtoAddr)
)
ipv4 array[4, byte].initCopyFrom(existingNetProtoAddr)
ipAddr = ip.get(existingIp)
@ -226,7 +223,7 @@ proc init*(
T: type SignedPeerRecord,
seqNum: uint64,
pk: PrivateKey,
ip: Option[IpAddress],
ip: Option[ValidIpAddress],
tcpPort, udpPort: Option[Port]):
RecordResult[T] =
## Initialize a `SignedPeerRecord` with given sequence number, private key, optional
@ -241,7 +238,9 @@ proc init*(
tcpPort, udpPort
var
ipAddr = static parseIpAddress("127.0.0.1")
ipAddr = try: ValidIpAddress.init("127.0.0.1")
except ValueError as e:
return err ("Existing address contains invalid address: " & $e.msg).cstring
proto: IpTransportProtocol
protoPort: Port

View File

@ -0,0 +1,217 @@
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Everything below the handling of ordinary messages
import
std/[tables, options],
bearssl/rand,
chronos,
chronicles,
libp2p/crypto/crypto,
stew/shims/net,
"."/[node, encoding, sessions]
const
handshakeTimeout* = 2.seconds ## timeout for the reply on the
## whoareyou message
responseTimeout* = 4.seconds ## timeout for the response of a request-response
## call
type
Transport* [Client] = ref object
client: Client
bindAddress: Address ## UDP binding address
transp: DatagramTransport
pendingRequests: Table[AESGCMNonce, PendingRequest]
codec*: Codec
rng: ref HmacDrbgContext
PendingRequest = object
node: Node
message: seq[byte]
proc sendToA(t: Transport, a: Address, data: seq[byte]) =
let ta = initTAddress(a.ip, a.port)
let f = t.transp.sendTo(ta, data)
f.callback = proc(data: pointer) {.gcsafe.} =
if f.failed:
# Could be `TransportUseClosedError` in case the transport is already
# closed, or could be `TransportOsError` in case of a socket error.
# In the latter case this would probably mostly occur if the network
# interface underneath gets disconnected or similar.
# TODO: Should this kind of error be propagated upwards? Probably, but
# it should not stop the process as that would reset the discovery
# progress in case there is even a small window of no connection.
# One case that needs this error available upwards is when revalidating
# nodes. Else the revalidation might end up clearing the routing tabl
# because of ping failures due to own network connection failure.
warn "Discovery send failed", msg = f.readError.msg
proc send(t: Transport, n: Node, data: seq[byte]) =
doAssert(n.address.isSome())
t.sendToA(n.address.get(), data)
proc sendMessage*(t: Transport, toId: NodeId, toAddr: Address, message: seq[byte]) =
let (data, _) = encodeMessagePacket(t.rng[], t.codec, toId, toAddr,
message)
t.sendToA(toAddr, data)
# TODO: This could be improved to do the clean-up immediatily in case a non
# whoareyou response does arrive, but we would need to store the AuthTag
# somewhere
proc registerRequest(t: Transport, n: Node, message: seq[byte],
nonce: AESGCMNonce) =
let request = PendingRequest(node: n, message: message)
if not t.pendingRequests.hasKeyOrPut(nonce, request):
sleepAsync(responseTimeout).addCallback() do(data: pointer):
t.pendingRequests.del(nonce)
##Todo: remove dependence on message. This should be higher
proc sendMessage*(t: Transport, toNode: Node, message: seq[byte]) =
doAssert(toNode.address.isSome())
let address = toNode.address.get()
let (data, nonce) = encodeMessagePacket(t.rng[], t.codec,
toNode.id, address, message)
t.registerRequest(toNode, message, nonce)
t.send(toNode, data)
proc sendWhoareyou(t: Transport, toId: NodeId, a: Address,
requestNonce: AESGCMNonce, node: Option[Node]) =
let key = HandshakeKey(nodeId: toId, address: a)
if not t.codec.hasHandshake(key):
let
recordSeq = if node.isSome(): node.get().record.seqNum
else: 0
pubkey = if node.isSome(): some(node.get().pubkey)
else: none(PublicKey)
let data = encodeWhoareyouPacket(t.rng[], t.codec, toId, a, requestNonce,
recordSeq, pubkey)
sleepAsync(handshakeTimeout).addCallback() do(data: pointer):
# TODO: should we still provide cancellation in case handshake completes
# correctly?
t.codec.handshakes.del(key)
trace "Send whoareyou", dstId = toId, address = a
t.sendToA(a, data)
else:
debug "Node with this id already has ongoing handshake, ignoring packet"
proc receive*(t: Transport, a: Address, packet: openArray[byte]) =
let decoded = t.codec.decodePacket(a, packet)
if decoded.isOk:
let packet = decoded[]
case packet.flag
of OrdinaryMessage:
if packet.messageOpt.isSome():
let message = packet.messageOpt.get()
trace "Received message packet", srcId = packet.srcId, address = a,
kind = message.kind, p = $packet
t.client.handleMessage(packet.srcId, a, message)
else:
trace "Not decryptable message packet received",
srcId = packet.srcId, address = a
t.sendWhoareyou(packet.srcId, a, packet.requestNonce,
t.client.getNode(packet.srcId))
of Flag.Whoareyou:
trace "Received whoareyou packet", address = a
var pr: PendingRequest
if t.pendingRequests.take(packet.whoareyou.requestNonce, pr):
let toNode = pr.node
# This is a node we previously contacted and thus must have an address.
doAssert(toNode.address.isSome())
let address = toNode.address.get()
let data = encodeHandshakePacket(
t.rng[],
t.codec,
toNode.id,
address,
pr.message,
packet.whoareyou,
toNode.pubkey
).expect("Valid handshake packet to encode")
trace "Send handshake message packet", dstId = toNode.id, address
t.send(toNode, data)
else:
debug "Timed out or unrequested whoareyou packet", address = a
of HandshakeMessage:
trace "Received handshake message packet", srcId = packet.srcIdHs,
address = a, kind = packet.message.kind
t.client.handleMessage(packet.srcIdHs, a, packet.message)
# For a handshake message it is possible that we received an newer SPR.
# In that case we can add/update it to the routing table.
if packet.node.isSome():
let node = packet.node.get()
# Lets not add nodes without correct IP in the SPR to the routing table.
# The SPR could contain bogus IPs and although they would get removed
# on the next revalidation, one could spam these as the handshake
# message occurs on (first) incoming messages.
if node.address.isSome() and a == node.address.get():
# TODO: maybe here we could verify that the address matches what we were
# sending the 'whoareyou' message to. In that case, we can set 'seen'
node.seen = true
if t.client.addNode(node):
trace "Added new node to routing table after handshake", node
else:
trace "Packet decoding error", error = decoded.error, address = a
proc processClient[T](transp: DatagramTransport, raddr: TransportAddress):
Future[void] {.async.} =
let t = getUserData[Transport[T]](transp)
# TODO: should we use `peekMessage()` to avoid allocation?
let buf = try: transp.getMessage()
except TransportOsError as e:
# This is likely to be local network connection issues.
warn "Transport getMessage", exception = e.name, msg = e.msg
return
let ip = try: raddr.address()
except ValueError as e:
error "Not a valid IpAddress", exception = e.name, msg = e.msg
return
let a = Address(ip: ValidIpAddress.init(ip), port: raddr.port)
t.receive(a, buf)
proc open*[T](t: Transport[T]) {.raises: [Defect, CatchableError].} =
info "Starting transport", bindAddress = t.bindAddress
# TODO allow binding to specific IP / IPv6 / etc
let ta = initTAddress(t.bindAddress.ip, t.bindAddress.port)
t.transp = newDatagramTransport(processClient[T], udata = t, local = ta)
proc close*(t: Transport) =
t.transp.close
proc closed*(t: Transport) : bool =
t.transp.closed
proc closeWait*(t: Transport) {.async.} =
await t.transp.closeWait
proc newTransport*[T](
client: T,
privKey: PrivateKey,
localNode: Node,
bindPort: Port,
bindIp = IPv4_any(),
rng = newRng()): Transport[T]=
# TODO Consider whether this should be a Defect
doAssert rng != nil, "RNG initialization failed"
Transport[T](
client: client,
bindAddress: Address(ip: ValidIpAddress.init(bindIp), port: bindPort),
codec: Codec(
localNode: localNode,
privKey: privKey,
sessions: Sessions.init(256)),
rng: rng)

335
nimble.lock Normal file
View File

@ -0,0 +1,335 @@
{
"version": 2,
"packages": {
"nim": {
"version": "1.6.14",
"vcsRevision": "71ba2e7f3c5815d956b1ae0341b0743242b8fec6",
"url": "https://github.com/nim-lang/Nim.git",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "f9ce6fa986a4e75514fe26d4c773789b8897eb18"
}
},
"unittest2": {
"version": "0.0.2",
"vcsRevision": "02c49b8a994dd3f9eddfaab45262f9b8fa507f8e",
"url": "https://github.com/status-im/nim-unittest2.git",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "a7f3331cabb5fad0d04c93be0aad1f020f9c8033"
}
},
"stew": {
"version": "0.1.0",
"vcsRevision": "e18f5a62af2ade7a1fd1d39635d4e04d944def08",
"url": "https://github.com/status-im/nim-stew.git",
"downloadMethod": "git",
"dependencies": [
"unittest2"
],
"checksums": {
"sha1": "2a80972f66597bf87d820dca8164d89d3bb24c6d"
}
},
"nimcrypto": {
"version": "0.5.4",
"vcsRevision": "a5742a9a214ac33f91615f3862c7b099aec43b00",
"url": "https://github.com/cheatfate/nimcrypto.git",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "f76c87707cd4e96355b8bb6ef27e7f8b0aac1e08"
}
},
"secp256k1": {
"version": "0.5.2",
"vcsRevision": "5340cf188168d6afcafc8023770d880f067c0b2f",
"url": "https://github.com/status-im/nim-secp256k1.git",
"downloadMethod": "git",
"dependencies": [
"stew",
"nimcrypto"
],
"checksums": {
"sha1": "ae9cbea4487be94a06653ffee075a7f1bd1e231e"
}
},
"bearssl": {
"version": "0.1.5",
"vcsRevision": "f4c4233de453cb7eac0ce3f3ffad6496295f83ab",
"url": "https://github.com/status-im/nim-bearssl.git",
"downloadMethod": "git",
"dependencies": [
"unittest2"
],
"checksums": {
"sha1": "dabf4aaac8969fb10281ebd9ff51875d37eeaaa9"
}
},
"httputils": {
"version": "0.3.0",
"vcsRevision": "e88e231dfcef4585fe3b2fbd9b664dbd28a88040",
"url": "https://github.com/status-im/nim-http-utils.git",
"downloadMethod": "git",
"dependencies": [
"stew",
"unittest2"
],
"checksums": {
"sha1": "dd0dcef76616ad35922944671c49222c8a17fb1f"
}
},
"chronos": {
"version": "3.0.11",
"vcsRevision": "6525f4ce1d1a7eba146e5f1a53f6f105077ae686",
"url": "https://github.com/status-im/nim-chronos.git",
"downloadMethod": "git",
"dependencies": [
"stew",
"bearssl",
"httputils",
"unittest2"
],
"checksums": {
"sha1": "8cdf821ecc76fb91fdfb5191cad31f813822fcb2"
}
},
"metrics": {
"version": "0.0.1",
"vcsRevision": "743f81d4f6c6ebf0ac02389f2392ff8b4235bee5",
"url": "https://github.com/status-im/nim-metrics.git",
"downloadMethod": "git",
"dependencies": [
"chronos"
],
"checksums": {
"sha1": "6274c7ae424b871bc21ca3a6b6713971ff6a8095"
}
},
"testutils": {
"version": "0.5.0",
"vcsRevision": "dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34",
"url": "https://github.com/status-im/nim-testutils",
"downloadMethod": "git",
"dependencies": [
"unittest2"
],
"checksums": {
"sha1": "756d0757c4dd06a068f9d38c7f238576ba5ee897"
}
},
"faststreams": {
"version": "0.3.0",
"vcsRevision": "1b561a9e71b6bdad1c1cdff753418906037e9d09",
"url": "https://github.com/status-im/nim-faststreams.git",
"downloadMethod": "git",
"dependencies": [
"stew",
"testutils",
"chronos",
"unittest2"
],
"checksums": {
"sha1": "97edf9797924af48566a0af8267203dc21d80c77"
}
},
"serialization": {
"version": "0.1.0",
"vcsRevision": "493d18b8292fc03aa4f835fd825dea1183f97466",
"url": "https://github.com/status-im/nim-serialization.git",
"downloadMethod": "git",
"dependencies": [
"faststreams",
"unittest2",
"stew"
],
"checksums": {
"sha1": "893921d41eb4e90a635442f02dd17b5f90bcbb00"
}
},
"json_serialization": {
"version": "0.1.0",
"vcsRevision": "e5b18fb710c3d0167ec79f3b892f5a7a1bc6d1a4",
"url": "https://github.com/status-im/nim-json-serialization.git",
"downloadMethod": "git",
"dependencies": [
"serialization",
"stew"
],
"checksums": {
"sha1": "bdcdeefca4e2b31710a23cc817aa6abfa0d041e2"
}
},
"chronicles": {
"version": "0.10.3",
"vcsRevision": "7631f7b2ee03398cb1512a79923264e8f9410af6",
"url": "https://github.com/status-im/nim-chronicles.git",
"downloadMethod": "git",
"dependencies": [
"testutils",
"json_serialization"
],
"checksums": {
"sha1": "2b6795cc40a687d3716b617e70d96e5af361c4af"
}
},
"dnsclient": {
"version": "0.3.4",
"vcsRevision": "23214235d4784d24aceed99bbfe153379ea557c8",
"url": "https://github.com/ba0f3/dnsclient.nim",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "65262c7e533ff49d6aca5539da4bc6c6ce132f40"
}
},
"zlib": {
"version": "0.1.0",
"vcsRevision": "74cdeb54b21bededb5a515d36f608bc1850555a2",
"url": "https://github.com/status-im/nim-zlib",
"downloadMethod": "git",
"dependencies": [
"stew"
],
"checksums": {
"sha1": "01d330dc4c1924e56b1559ee73bc760e526f635c"
}
},
"websock": {
"version": "0.1.0",
"vcsRevision": "7b2ed397d6e4c37ea4df08ae82aeac7ff04cd180",
"url": "https://github.com/status-im/nim-websock.git",
"downloadMethod": "git",
"dependencies": [
"chronos",
"httputils",
"chronicles",
"stew",
"nimcrypto",
"bearssl",
"zlib"
],
"checksums": {
"sha1": "d27f126527be59f5a0dc35303cb37b82d4e2770b"
}
},
"libp2p": {
"version": "1.0.0",
"vcsRevision": "a3e9d1ed80c048cd5abc839cbe0863cefcedc702",
"url": "https://github.com/status-im/nim-libp2p.git",
"downloadMethod": "git",
"dependencies": [
"nimcrypto",
"dnsclient",
"bearssl",
"chronicles",
"chronos",
"metrics",
"secp256k1",
"stew",
"websock"
],
"checksums": {
"sha1": "65e473566f19f7f9a3529745e7181fb58d30b5ef"
}
},
"combparser": {
"version": "0.2.0",
"vcsRevision": "ba4464c005d7617c008e2ed2ebc1ba52feb469c6",
"url": "https://github.com/PMunch/combparser.git",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "a3635260961a893b88f69aac19f1b24e032a7e97"
}
},
"asynctest": {
"version": "0.3.2",
"vcsRevision": "a236a5f0f3031573ac2cb082b63dbf6e170e06e7",
"url": "https://github.com/status-im/asynctest.git",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "0ef50d086659835b0a23a4beb77cb11747695448"
}
},
"questionable": {
"version": "0.10.6",
"vcsRevision": "30e4184a99c8c1ba329925912d2c5d4b09acf8cc",
"url": "https://github.com/status-im/questionable.git",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "ca2d1e2e0be6566b4bf13261b29645721d01673d"
}
},
"upraises": {
"version": "0.1.0",
"vcsRevision": "ff4f8108e44fba9b35cac535ab63d3927e8fd3c2",
"url": "https://github.com/markspanbroek/upraises.git",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "a0243c8039e12d547dbb2e9c73789c16bb8bc956"
}
},
"sqlite3_abi": {
"version": "3.40.1.1",
"vcsRevision": "362e1bd9f689ad9f5380d9d27f0705b3d4dfc7d3",
"url": "https://github.com/arnetheduck/nim-sqlite3-abi",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "8e91db8156a82383d9c48f53b33e48f4e93077b1"
}
},
"protobuf_serialization": {
"version": "0.2.0",
"vcsRevision": "27b400fdf3bd8ce7120ca66fc1de39d3f1a5804a",
"url": "https://github.com/status-im/nim-protobuf-serialization",
"downloadMethod": "git",
"dependencies": [
"stew",
"faststreams",
"serialization",
"combparser"
],
"checksums": {
"sha1": "9c30c45b92900b425b147aeceae87bee6295dd80"
}
},
"datastore": {
"version": "0.0.1",
"vcsRevision": "0cde8aeb67c59fd0ac95496dc6b5e1168d6632aa",
"url": "https://github.com/status-im/nim-datastore",
"downloadMethod": "git",
"dependencies": [
"asynctest",
"chronos",
"questionable",
"sqlite3_abi",
"stew",
"unittest2",
"upraises"
],
"checksums": {
"sha1": "2c03bb47de97962d2a64be1ed0a8161cd9d65159"
}
},
"stint": {
"version": "0.0.1",
"vcsRevision": "036c71d06a6b22f8f967ba9d54afd2189c3872ca",
"url": "https://github.com/status-im/nim-stint",
"downloadMethod": "git",
"dependencies": [
"stew"
],
"checksums": {
"sha1": "0f187a2115315ca898e5f9a30c5e506cf6057062"
}
}
},
"tasks": {}
}

View File

@ -1,73 +0,0 @@
import std / [os, strutils, sequtils]
task testAll, "Run DHT tests":
exec "nim c -r test.nim"
rmFile "./test"
task compileParallelTests, "Compile parallel tests":
exec "nim c --hints:off --verbosity:0 dht/test_providers.nim"
exec "nim c --hints:off --verbosity:0 dht/test_providermngr.nim"
exec "nim c --hints:off --verbosity:0 discv5/test_discoveryv5.nim"
exec "nim c --hints:off --verbosity:0 discv5/test_discoveryv5_encoding.nim"
task test, "Run DHT tests":
# compile with trace logging to make sure it doesn't crash
exec "nim c -d:testsAll -d:chronicles_enabled=on -d:chronicles_log_level=TRACE test.nim"
rmFile "./test"
compileParallelTestsTask()
exec "nim c -r -d:testsAll --verbosity:0 testAllParallel.nim"
rmFile "./testAllParallel"
task testPart1, "Run DHT tests A":
compileParallelTestsTask()
exec "nim c -r -d:testsPart1 testAllParallel.nim"
rmFile "./testAllParallel"
task testPart2, "Run DHT tests B":
compileParallelTestsTask()
exec "nim c -r -d:testsPart2 testAllParallel.nim"
rmFile "./testAllParallel"
task coverage, "generates code coverage report":
var (output, exitCode) = gorgeEx("which lcov")
if exitCode != 0:
echo ""
echo " ************************** ⛔️ ERROR ⛔️ **************************"
echo " ** **"
echo " ** ERROR: lcov not found, it must be installed to run code **"
echo " ** coverage locally **"
echo " ** **"
echo " *****************************************************************"
echo ""
quit 1
(output, exitCode) = gorgeEx("gcov --version")
if output.contains("Apple LLVM"):
echo ""
echo " ************************* ⚠️ WARNING ⚠️ *************************"
echo " ** **"
echo " ** WARNING: Using Apple's llvm-cov in place of gcov, which **"
echo " ** emulates an old version of gcov (4.2.0) and therefore **"
echo " ** coverage results will differ than those on CI (which **"
echo " ** uses a much newer version of gcov). **"
echo " ** **"
echo " *****************************************************************"
echo ""
var nimSrcs = ""
for f in walkDirRec(".", {pcFile}):
if f.endswith(".nim"): nimSrcs.add " " & f.absolutePath.quoteShell()
echo "======== Running Tests ======== "
exec("nim c -r coverage.nim")
exec("rm nimcache/*.c")
rmDir("coverage"); mkDir("coverage")
echo " ======== Running LCOV ======== "
exec("lcov --capture --directory nimcache --output-file coverage/coverage.info")
exec("lcov --extract coverage/coverage.info --output-file coverage/coverage.f.info " & nimSrcs)
echo " ======== Generating HTML coverage report ======== "
exec("genhtml coverage/coverage.f.info --output-directory coverage/report ")
echo " ======== Opening HTML coverage report in browser... ======== "
if findExe("open") != "":
exec("open coverage/report/index.html")

View File

@ -1,17 +1,20 @@
import
std/net,
bearssl/rand,
chronos,
libp2p/crypto/[crypto, secp],
libp2p/multiaddress,
codexdht/discv5/[node, routing_table, spr],
codexdht/discv5/protocol as discv5_protocol
libp2pdht/discv5/[node, routing_table, spr],
libp2pdht/discv5/crypto as dhtcrypto,
libp2pdht/discv5/protocol as discv5_protocol,
stew/shims/net
export net
proc localAddress*(port: int): Address =
Address(ip: IPv4_loopback(), port: Port(port))
Address(ip: ValidIpAddress.init("127.0.0.1"), port: Port(port))
proc example*(T: type PrivateKey, rng: ref HmacDrbgContext): PrivateKey =
PrivateKey.random(PKScheme.Secp256k1, rng[]).expect("Valid rng for private key")
PrivateKey.random(rng[]).expect("Valid rng for private key")
proc example*(T: type NodeId, rng: ref HmacDrbgContext): NodeId =
let
@ -50,8 +53,8 @@ proc nodeIdInNodes*(id: NodeId, nodes: openArray[Node]): bool =
for n in nodes:
if id == n.id: return true
proc generateNode*(privKey: PrivateKey, port: int,
ip: IpAddress = parseIpAddress("127.0.0.1")): Node =
proc generateNode*(privKey: PrivateKey, port: int = 20302,
ip: ValidIpAddress = ValidIpAddress.init("127.0.0.1")): Node =
let
port = Port(port)
@ -64,51 +67,51 @@ proc generateNRandomNodes*(rng: ref HmacDrbgContext, n: int): seq[Node] =
for i in 1..n:
let
privKey = PrivateKey.example(rng)
node = privKey.generateNode(port = 20402 + 10*n)
node = privKey.generateNode()
res.add(node)
res
proc nodeAndPrivKeyAtDistance*(n: Node, rng: var HmacDrbgContext, d: uint32,
ip: IpAddress = parseIpAddress("127.0.0.1")): (Node, PrivateKey) =
ip: ValidIpAddress = ValidIpAddress.init("127.0.0.1")): (Node, PrivateKey) =
while true:
let
privKey = PrivateKey.random(rng).expect("Valid rng for private key")
node = privKey.generateNode(port = 21302 + 10*d.int, ip = ip)
node = privKey.generateNode(ip = ip)
if logDistance(n.id, node.id) == d:
return (node, privKey)
proc nodeAtDistance*(n: Node, rng: var HmacDrbgContext, d: uint32,
ip: IpAddress = parseIpAddress("127.0.0.1")): Node =
ip: ValidIpAddress = ValidIpAddress.init("127.0.0.1")): Node =
let (node, _) = n.nodeAndPrivKeyAtDistance(rng, d, ip)
node
proc nodesAtDistance*(
n: Node, rng: var HmacDrbgContext, d: uint32, amount: int,
ip: IpAddress = parseIpAddress("127.0.0.1")): seq[Node] =
ip: ValidIpAddress = ValidIpAddress.init("127.0.0.1")): seq[Node] =
for i in 0..<amount:
result.add(nodeAtDistance(n, rng, d, ip))
proc nodesAtDistanceUniqueIp*(
n: Node, rng: var HmacDrbgContext, d: uint32, amount: int,
ip: IpAddress = parseIpAddress("127.0.0.1")): seq[Node] =
ip: ValidIpAddress = ValidIpAddress.init("127.0.0.1")): seq[Node] =
var ta = initTAddress(ip, Port(0))
for i in 0..<amount:
ta.inc()
result.add(nodeAtDistance(n, rng, d, ta.address()))
result.add(nodeAtDistance(n, rng, d, ValidIpAddress.init(ta.address())))
proc addSeenNode*(d: discv5_protocol.Protocol, n: Node): bool =
# Add it as a seen node, warning: for testing convenience only!
n.registerSeen()
n.seen = true
d.addNode(n)
func udpExample*(_: type MultiAddress): MultiAddress =
## creates a new udp MultiAddress on a random port
MultiAddress.init("/ip4/0.0.0.0/udp/0")
## creates a new udp multiaddress on a random port
Multiaddress.init("/ip4/0.0.0.0/udp/0")
func udpExamples*(_: type MultiAddress, count: int): seq[MultiAddress] =
var res: seq[MultiAddress] = @[]
for i in 1..count:
res.add MultiAddress.init("/ip4/0.0.0.0/udp/" & $i).get
res.add Multiaddress.init("/ip4/0.0.0.0/udp/" & $i).get
return res
proc toSignedPeerRecord*(privKey: PrivateKey) : SignedPeerRecord =

View File

@ -2,14 +2,15 @@
import std/sequtils
import pkg/chronos
import pkg/asynctest/chronos/unittest
import pkg/asynctest
import pkg/datastore
from pkg/libp2p import PeerId
import pkg/libp2p
import codexdht/private/eth/p2p/discoveryv5/spr
import codexdht/private/eth/p2p/discoveryv5/providers
import codexdht/discv5/node
import codexdht/private/eth/p2p/discoveryv5/lru
import libp2pdht/dht
import libp2pdht/private/eth/p2p/discoveryv5/spr
import libp2pdht/private/eth/p2p/discoveryv5/providers
import libp2pdht/discv5/node
import libp2pdht/private/eth/p2p/discoveryv5/lru
import ./test_helper
suite "Test Providers Manager simple":
@ -100,10 +101,10 @@ suite "Test Providers Manager multiple":
not (await manager.contains(nodeIds[49]))
not (await manager.contains(nodeIds[99]))
test "Should remove by PeerId with associated keys":
(await (manager.remove(providers[0].data.peerId, true))).tryGet
(await (manager.remove(providers[5].data.peerId, true))).tryGet
(await (manager.remove(providers[9].data.peerId, true))).tryGet
test "Should remove by PeerId":
(await (manager.remove(providers[0].data.peerId))).tryGet
(await (manager.remove(providers[5].data.peerId))).tryGet
(await (manager.remove(providers[9].data.peerId))).tryGet
for id in nodeIds:
check:
@ -116,22 +117,6 @@ suite "Test Providers Manager multiple":
not (await manager.contains(providers[5].data.peerId))
not (await manager.contains(providers[9].data.peerId))
test "Should not return keys without provider":
for id in nodeIds:
check:
(await manager.get(id)).tryGet.len == 10
for provider in providers:
(await (manager.remove(provider.data.peerId))).tryGet
for id in nodeIds:
check:
(await manager.get(id)).tryGet.len == 0
for provider in providers:
check:
not (await manager.contains(provider.data.peerId))
suite "Test providers with cache":
let
rng = newRng()
@ -180,9 +165,9 @@ suite "Test providers with cache":
not (await manager.contains(nodeIds[99]))
test "Should remove by PeerId":
(await (manager.remove(providers[0].data.peerId, true))).tryGet
(await (manager.remove(providers[5].data.peerId, true))).tryGet
(await (manager.remove(providers[9].data.peerId, true))).tryGet
(await (manager.remove(providers[0].data.peerId))).tryGet
(await (manager.remove(providers[5].data.peerId))).tryGet
(await (manager.remove(providers[9].data.peerId))).tryGet
for id in nodeIds:
check:
@ -234,24 +219,6 @@ suite "Test Provider Maintenance":
for id in nodeIds:
check: (await manager.get(id)).tryGet.len == 0
test "Should not cleanup unexpired":
let
unexpired = PrivateKey.example(rng).toSignedPeerRecord()
(await manager.add(nodeIds[0], unexpired, ttl = 1.minutes)).tryGet
await sleepAsync(500.millis)
await manager.store.cleanupExpired()
let
unexpiredProvs = (await manager.get(nodeIds[0])).tryGet
check:
unexpiredProvs.len == 1
await (unexpired.data.peerId in manager)
(await manager.remove(nodeIds[0])).tryGet
test "Should cleanup orphaned":
for id in nodeIds:
check: (await manager.get(id)).tryGet.len == 0

View File

@ -10,15 +10,18 @@
{.used.}
import
std/[options],
asynctest/chronos/unittest2,
std/[options, sequtils],
asynctest,
bearssl/rand,
chronicles,
chronos,
nimcrypto,
libp2p/crypto/[crypto, secp],
libp2p/[multiaddress, multicodec, multihash, routing_record, signed_envelope],
codexdht/discv5/crypto as dhtcrypto,
codexdht/discv5/protocol as discv5_protocol,
libp2pdht/dht,
libp2pdht/discv5/crypto as dhtcrypto,
libp2pdht/discv5/protocol as discv5_protocol,
stew/byteutils,
test_helper
proc bootstrapNodes(
@ -31,7 +34,7 @@ proc bootstrapNodes(
debug "---- STARTING BOOSTRAPS ---"
for i in 0..<nodecount:
let privKey = PrivateKey.example(rng)
let node = initDiscoveryNode(rng, privKey, localAddress(23302 + i), bootnodes)
let node = initDiscoveryNode(rng, privKey, localAddress(20302 + i), bootnodes)
await node.start()
result.add((node, privKey))
if delay > 0:
@ -50,13 +53,13 @@ proc bootstrapNetwork(
bootNodeKey = PrivateKey.fromHex(
"a2b50376a79b1a8c8a3296485572bdfbf54708bb46d3c25d73d2723aaaf6a617")
.expect("Valid private key hex")
bootNodeAddr = localAddress(25311)
bootNodeAddr = localAddress(20301)
bootNode = initDiscoveryNode(rng, bootNodeKey, bootNodeAddr, @[]) # just a shortcut for new and open
#waitFor bootNode.bootstrap() # immediate, since no bootnodes are defined above
var res = await bootstrapNodes(nodecount - 1,
@[bootNode.localNode.record],
@[bootnode.localNode.record],
rng,
delay)
res.insert((bootNode, bootNodeKey), 0)
@ -122,6 +125,7 @@ suite "Providers Tests: node alone":
debug "Providers:", providers
check (providers.len == 0)
suite "Providers Tests: two nodes":
var

View File

@ -2,12 +2,12 @@
import
std/tables,
chronos, chronicles, stint, asynctest/chronos/unittest,
chronos, chronicles, stint, asynctest, stew/shims/net,
stew/byteutils, bearssl/rand,
libp2p/crypto/crypto,
codexdht/discv5/[transport, spr, node, routing_table, encoding, sessions, nodes_verification],
codexdht/discv5/crypto as dhtcrypto,
codexdht/discv5/protocol as discv5_protocol,
libp2pdht/discv5/[transport, spr, node, routing_table, encoding, sessions, nodes_verification],
libp2pdht/discv5/crypto as dhtcrypto,
libp2pdht/discv5/protocol as discv5_protocol,
../dht/test_helper
suite "Discovery v5 Tests":
@ -22,13 +22,13 @@ suite "Discovery v5 Tests":
pk = PrivateKey.example(rng)
targetPk = PrivateKey.example(rng)
node = initDiscoveryNode(rng, pk, localAddress(20302))
targetNode = targetPk.generateNode(port=26302)
targetNode = targetPk.generateNode()
check node.addNode(targetNode)
for i in 0..<1000:
let pk = PrivateKey.example(rng)
discard node.addNode(pk.generateNode(port=27302+i))
discard node.addNode(pk.generateNode())
let n = node.getNode(targetNode.id)
check n.isSome()
@ -265,7 +265,7 @@ suite "Discovery v5 Tests":
# Generate 1000 random nodes and add to our main node's routing table
for i in 0..<1000:
discard mainNode.addSeenNode(generateNode(PrivateKey.example(rng), port=28302+i)) # for testing only!
discard mainNode.addSeenNode(generateNode(PrivateKey.example(rng))) # for testing only!
let
neighbours = mainNode.neighbours(mainNode.localNode.id)
@ -287,7 +287,7 @@ suite "Discovery v5 Tests":
await mainNode.closeWait()
await testNode.closeWait()
proc testLookupTargets(fast: bool = false): Future[bool] {.async.} =
proc testLookupTargets(fast: bool = false) {.async.} =
const
nodeCount = 17
@ -306,9 +306,9 @@ suite "Discovery v5 Tests":
for t in nodes:
if n != t:
let pong = await n.ping(t.localNode)
check pong.isOk()
if pong.isErr():
echo pong.error
return false
# check (await n.ping(t.localNode)).isOk()
for i in 1 ..< nodeCount:
@ -318,19 +318,16 @@ suite "Discovery v5 Tests":
let target = nodes[i]
let discovered = await nodes[nodeCount-1].lookup(target.localNode.id, fast = fast)
debug "Lookup result", target = target.localNode, discovered
if discovered[0] != target.localNode:
return false
check discovered[0] == target.localNode
for node in nodes:
await node.closeWait()
return true
test "Lookup targets":
check await testLookupTargets()
await testLookupTargets()
test "Lookup targets using traditional findNode":
check await testLookupTargets(fast = true)
await testLookupTargets(fast = true)
test "Resolve target":
let
@ -415,37 +412,31 @@ suite "Discovery v5 Tests":
await mainNode.closeWait()
await lookupNode.closeWait()
test "Random nodes, also with filter":
let
lookupNode = initDiscoveryNode(rng, PrivateKey.example(rng), localAddress(20301))
targetNode = initDiscoveryNode(rng, PrivateKey.example(rng), localAddress(20302))
otherNode = initDiscoveryNode(rng, PrivateKey.example(rng), localAddress(20303))
anotherNode = initDiscoveryNode(rng, PrivateKey.example(rng), localAddress(20304))
# We no longer support field filtering
# test "Random nodes with spr field filter":
# let
# lookupNode = initDiscoveryNode(rng, PrivateKey.example(rng), localAddress(20301))
# targetNode = generateNode(PrivateKey.example(rng))
# otherNode = generateNode(PrivateKey.example(rng))
# anotherNode = generateNode(PrivateKey.example(rng))
check:
lookupNode.addNode(targetNode.localNode.record)
lookupNode.addNode(otherNode.localNode.record)
lookupNode.addNode(anotherNode.localNode.record)
# check:
# lookupNode.addNode(targetNode)
# lookupNode.addNode(otherNode)
# lookupNode.addNode(anotherNode)
let discovered = lookupNode.randomNodes(10)
check discovered.len == 3
let discoveredFiltered = lookupNode.randomNodes(10,
proc(n: Node) : bool = n.address.get.port == Port(20302))
check discoveredFiltered.len == 1 and discoveredFiltered.contains(targetNode.localNode)
let discoveredEmpty = lookupNode.randomNodes(10,
proc(n: Node) : bool = n.address.get.port == Port(20305))
check discoveredEmpty.len == 0
await lookupNode.closeWait()
await targetNode.closeWait()
await otherNode.closeWait()
await anotherNode.closeWait()
# let discovered = lookupNode.randomNodes(10)
# check discovered.len == 3
# let discoveredFiltered = lookupNode.randomNodes(10,
# ("test", @[byte 1,2,3,4]))
# check discoveredFiltered.len == 1 and discoveredFiltered.contains(targetNode)
# await lookupNode.closeWait()
test "New protocol with spr":
let
privKey = PrivateKey.example(rng)
ip = some(parseIpAddress("127.0.0.1"))
ip = some(ValidIpAddress.init("127.0.0.1"))
port = Port(20301)
node = newProtocol(privKey, ip, some(port), some(port), bindPort = port,
rng = rng)
@ -540,7 +531,7 @@ suite "Discovery v5 Tests":
let
port = Port(9000)
fromNoderecord = SignedPeerRecord.init(1, PrivateKey.example(rng),
some(parseIpAddress("11.12.13.14")),
some(ValidIpAddress.init("11.12.13.14")),
some(port), some(port))[]
fromNode = newNode(fromNoderecord)[]
privKey = PrivateKey.example(rng)
@ -552,7 +543,7 @@ suite "Discovery v5 Tests":
block: # Duplicates
let
record = SignedPeerRecord.init(
1, privKey, some(parseIpAddress("12.13.14.15")),
1, privKey, some(ValidIpAddress.init("12.13.14.15")),
some(port), some(port))[]
# Exact duplicates
@ -562,7 +553,7 @@ suite "Discovery v5 Tests":
# Node id duplicates
let recordSameId = SignedPeerRecord.init(
1, privKey, some(parseIpAddress("212.13.14.15")),
1, privKey, some(ValidIpAddress.init("212.13.14.15")),
some(port), some(port))[]
records.add(recordSameId)
nodes = verifyNodesRecords(records, fromNode, limit, targetDistance)
@ -571,7 +562,7 @@ suite "Discovery v5 Tests":
block: # No address
let
recordNoAddress = SignedPeerRecord.init(
1, privKey, none(IpAddress), some(port), some(port))[]
1, privKey, none(ValidIpAddress), some(port), some(port))[]
records = [recordNoAddress]
test = verifyNodesRecords(records, fromNode, limit, targetDistance)
check test.len == 0
@ -579,7 +570,7 @@ suite "Discovery v5 Tests":
block: # Invalid address - site local
let
recordInvalidAddress = SignedPeerRecord.init(
1, privKey, some(parseIpAddress("10.1.2.3")),
1, privKey, some(ValidIpAddress.init("10.1.2.3")),
some(port), some(port))[]
records = [recordInvalidAddress]
test = verifyNodesRecords(records, fromNode, limit, targetDistance)
@ -588,7 +579,7 @@ suite "Discovery v5 Tests":
block: # Invalid address - loopback
let
recordInvalidAddress = SignedPeerRecord.init(
1, privKey, some(parseIpAddress("127.0.0.1")),
1, privKey, some(ValidIpAddress.init("127.0.0.1")),
some(port), some(port))[]
records = [recordInvalidAddress]
test = verifyNodesRecords(records, fromNode, limit, targetDistance)
@ -597,7 +588,7 @@ suite "Discovery v5 Tests":
block: # Invalid distance
let
recordInvalidDistance = SignedPeerRecord.init(
1, privKey, some(parseIpAddress("12.13.14.15")),
1, privKey, some(ValidIpAddress.init("12.13.14.15")),
some(port), some(port))[]
records = [recordInvalidDistance]
test = verifyNodesRecords(records, fromNode, limit, @[0'u16])
@ -606,7 +597,7 @@ suite "Discovery v5 Tests":
block: # Invalid distance but distance validation is disabled
let
recordInvalidDistance = SignedPeerRecord.init(
1, privKey, some(parseIpAddress("12.13.14.15")),
1, privKey, some(ValidIpAddress.init("12.13.14.15")),
some(port), some(port))[]
records = [recordInvalidDistance]
test = verifyNodesRecords(records, fromNode, limit)
@ -633,12 +624,12 @@ suite "Discovery v5 Tests":
let
privKey = PrivateKey.example(rng)
enrRec = SignedPeerRecord.init(1, privKey,
some(parseIpAddress("127.0.0.1")), some(Port(9000)),
some(ValidIpAddress.init("127.0.0.1")), some(Port(9000)),
some(Port(9000))).expect("Properly intialized private key")
sendNode = newNode(enrRec).expect("Properly initialized record")
var codec = Codec(localNode: sendNode, privKey: privKey, sessions: Sessions.init(5))
let (packet, _, _) = encodeMessagePacket(rng[], codec,
let (packet, _) = encodeMessagePacket(rng[], codec,
receiveNode.localNode.id, receiveNode.localNode.address.get(), @[])
receiveNode.transport.receive(a, packet)
@ -662,13 +653,13 @@ suite "Discovery v5 Tests":
let
privKey = PrivateKey.example(rng)
enrRec = SignedPeerRecord.init(1, privKey,
some(parseIpAddress("127.0.0.1")), some(Port(9000)),
some(ValidIpAddress.init("127.0.0.1")), some(Port(9000)),
some(Port(9000))).expect("Properly intialized private key")
sendNode = newNode(enrRec).expect("Properly initialized record")
var codec = Codec(localNode: sendNode, privKey: privKey, sessions: Sessions.init(5))
for i in 0 ..< 5:
let a = localAddress(20303 + i)
let (packet, _, _) = encodeMessagePacket(rng[], codec,
let (packet, _) = encodeMessagePacket(rng[], codec,
receiveNode.localNode.id, receiveNode.localNode.address.get(), @[])
receiveNode.transport.receive(a, packet)
@ -693,14 +684,14 @@ suite "Discovery v5 Tests":
a = localAddress(20303)
privKey = PrivateKey.example(rng)
enrRec = SignedPeerRecord.init(1, privKey,
some(parseIpAddress("127.0.0.1")), some(Port(9000)),
some(ValidIpAddress.init("127.0.0.1")), some(Port(9000)),
some(Port(9000))).expect("Properly intialized private key")
sendNode = newNode(enrRec).expect("Properly initialized record")
var codec = Codec(localNode: sendNode, privKey: privKey, sessions: Sessions.init(5))
var firstRequestNonce: AESGCMNonce
for i in 0 ..< 5:
let (packet, requestNonce, _) = encodeMessagePacket(rng[], codec,
let (packet, requestNonce) = encodeMessagePacket(rng[], codec,
receiveNode.localNode.id, receiveNode.localNode.address.get(), @[])
receiveNode.transport.receive(a, packet)
if i == 0:

View File

@ -2,13 +2,14 @@
import
std/[options, sequtils, tables],
asynctest/chronos/unittest2,
asynctest/unittest2,
bearssl/rand,
chronos,
libp2p/crypto/secp,
codexdht/discv5/[messages, messages_encoding, encoding, spr, node, sessions],
codexdht/discv5/crypto,
libp2pdht/discv5/[messages, messages_encoding, encoding, spr, node, sessions],
libp2pdht/discv5/crypto,
stew/byteutils,
stew/shims/net,
stint,
../dht/test_helper
@ -274,12 +275,12 @@ suite "Discovery v5.1 Packet Encodings Test Vectors":
let
enrRecA = SignedPeerRecord.init(1, privKeyA,
some(parseIpAddress("127.0.0.1")), some(Port(9001)),
some(Port(9001))).expect("Properly intialized private key")
some(ValidIpAddress.init("127.0.0.1")), some(Port(9000)),
some(Port(9000))).expect("Properly intialized private key")
enrRecB = SignedPeerRecord.init(1, privKeyB,
some(parseIpAddress("127.0.0.1")), some(Port(9001)),
some(Port(9001))).expect("Properly intialized private key")
some(ValidIpAddress.init("127.0.0.1")), some(Port(9000)),
some(Port(9000))).expect("Properly intialized private key")
nodeA = newNode(enrRecA).expect("Properly initialized record")
nodeB = newNode(enrRecB).expect("Properly initialized record")
@ -507,12 +508,12 @@ suite "Discovery v5.1 Additional Encode/Decode":
let
enrRecA = SignedPeerRecord.init(1, privKeyA,
some(parseIpAddress("127.0.0.1")), some(Port(9001)),
some(Port(9001))).expect("Properly intialized private key")
some(ValidIpAddress.init("127.0.0.1")), some(Port(9000)),
some(Port(9000))).expect("Properly intialized private key")
enrRecB = SignedPeerRecord.init(1, privKeyB,
some(parseIpAddress("127.0.0.1")), some(Port(9001)),
some(Port(9001))).expect("Properly intialized private key")
some(ValidIpAddress.init("127.0.0.1")), some(Port(9000)),
some(Port(9000))).expect("Properly intialized private key")
nodeA = newNode(enrRecA).expect("Properly initialized record")
nodeB = newNode(enrRecB).expect("Properly initialized record")
@ -525,7 +526,7 @@ suite "Discovery v5.1 Additional Encode/Decode":
reqId = RequestId.init(rng[])
message = encodeMessage(m, reqId)
let (data, nonce, _) = encodeMessagePacket(rng[], codecA, nodeB.id,
let (data, nonce) = encodeMessagePacket(rng[], codecA, nodeB.id,
nodeB.address.get(), message)
let decoded = codecB.decodePacket(nodeA.address.get(), data)
@ -641,7 +642,7 @@ suite "Discovery v5.1 Additional Encode/Decode":
codecB.sessions.store(nodeA.id, nodeA.address.get(), secrets.initiatorKey,
secrets.recipientKey)
let (data, nonce, _) = encodeMessagePacket(rng[], codecA, nodeB.id,
let (data, nonce) = encodeMessagePacket(rng[], codecA, nodeB.id,
nodeB.address.get(), message)
let decoded = codecB.decodePacket(nodeA.address.get(), data)

View File

@ -1,6 +0,0 @@
import ./dht/test_providers
import ./dht/test_providermngr
import ./discv5/test_discoveryv5
import ./discv5/test_discoveryv5_encoding
{.warning[UnusedImport]: off.}

View File

@ -1,13 +0,0 @@
# Package
version = "0.4.0"
author = "Status Research & Development GmbH"
description = "Tests for Logos Storage DHT"
license = "MIT"
installFiles = @["build.nims"]
# Dependencies
requires "asynctest >= 0.5.2 & < 0.6.0"
requires "unittest2 <= 0.0.9"
include "build.nims"

5
tests/testAll.nim Normal file
View File

@ -0,0 +1,5 @@
import
./dht/[test_providers, test_providermngr],
./discv5/[test_discoveryv5, test_discoveryv5_encoding]
{.warning[UnusedImport]: off.}

View File

@ -1,22 +0,0 @@
# import
# ./dht/[test_providers, test_providermngr],
# ./discv5/[test_discoveryv5, test_discoveryv5_encoding]
import osproc
var cmds: seq[string]
when defined(testsPart1) or defined(testsAll):
cmds.add [
"nim c -r --hints:off --verbosity:0 dht/test_providers.nim",
"nim c -r --hints:off --verbosity:0 dht/test_providermngr.nim",
]
when defined(testsPart2) or defined(testsAll):
cmds.add [
"nim c -r --hints:off --verbosity:0 discv5/test_discoveryv5.nim",
"nim c -r --hints:off --verbosity:0 discv5/test_discoveryv5_encoding.nim",
]
echo "Running Test Commands: ", cmds
quit execProcesses(cmds)

2
vendor/atlas.workspace vendored Normal file
View File

@ -0,0 +1,2 @@
deps=""
resolver="MaxVer"