Merge pull request #2 from status-im/setup-repo

Setup repo + move code from nimbus / nim-eth
This commit is contained in:
Oskar Thorén 2020-05-06 15:06:39 +08:00 committed by GitHub
commit 46fb777ccb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
67 changed files with 7908 additions and 10828 deletions

36
.appveyor.yml Normal file
View File

@ -0,0 +1,36 @@
version: '{build}'
image: Visual Studio 2015
init: # Scripts called at the very beginning
# Enable paths > 260 characters
- ps: Set-ItemProperty -Path 'HKLM:\SYSTEM\CurrentControlSet\Control\FileSystem' -Name 'LongPathsEnabled' -Value 1
- git config --global core.longpaths true
cache:
- NimBinaries
matrix:
# We always want 32 and 64-bit compilation
fast_finish: false
platform:
- x64
- x86
install:
# use the newest versions documented here: https://www.appveyor.com/docs/windows-images-software/#mingw-msys-cygwin
- IF "%PLATFORM%" == "x86" SET PATH=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%PATH%
- IF "%PLATFORM%" == "x64" SET PATH=C:\mingw-w64\x86_64-8.1.0-posix-seh-rt_v6-rev0\mingw64\bin;%PATH%
build_script:
# the 32-bit build is done on a 64-bit image, so we need to override the architecture
- mingw32-make -j2 ARCH_OVERRIDE=%PLATFORM% CI_CACHE=NimBinaries update
- mingw32-make -j2 ARCH_OVERRIDE=%PLATFORM% fetch-dlls
test_script:
- mingw32-make -j2 ARCH_OVERRIDE=%PLATFORM% LOG_LEVEL=TRACE
- build\wakunode.exe --help
- mingw32-make -j2 ARCH_OVERRIDE=%PLATFORM% test
deploy: off

22
.gitignore vendored
View File

@ -1 +1,21 @@
build
/nimcache
# Executables shall be put in an ignored build/ directory
/build
# Nimble packages
/vendor/.nimble
# ntags/ctags output
/tags
# a symlink that can't be added to the repo because of Windows
/waku.nims
# Ignore dynamic, static libs and libtool archive files
*.so
*.dylib
*.a
*.la
*.exe
*.dll

99
.gitmodules vendored
View File

@ -1,15 +1,100 @@
[submodule "vendor/nimbus"]
path = vendor/nimbus
url = git@github.com:status-im/nimbus.git
[submodule "vendor/nim-eth"]
path = vendor/nim-eth
url = git@github.com:status-im/nim-eth.git
url = https://github.com/status-im/nim-eth.git
ignore = dirty
branch = master
[submodule "vendor/nim-secp256k1"]
path = vendor/nim-secp256k1
url = git@github.com:status-im/nim-secp256k1.git
url = https://github.com/status-im/nim-secp256k1.git
ignore = dirty
branch = master
[submodule "vendor/nim-libp2p"]
path = vendor/nim-libp2p
url = git@github.com:status-im/nim-libp2p.git
url = https://github.com/status-im/nim-libp2p.git
ignore = dirty
branch = master
[submodule "vendor/nim-stew"]
path = vendor/nim-stew
url = git@github.com:status-im/nim-stew.git
url = https://github.com/status-im/nim-stew.git
ignore = dirty
branch = master
[submodule "vendor/nimbus-build-system"]
path = vendor/nimbus-build-system
url = https://github.com/status-im/nimbus-build-system.git
ignore = dirty
branch = master
[submodule "vendor/nim-nat-traversal"]
path = vendor/nim-nat-traversal
url = https://github.com/status-im/nim-nat-traversal.git
ignore = dirty
branch = master
[submodule "vendor/nim-libbacktrace"]
path = vendor/nim-libbacktrace
url = https://github.com/status-im/nim-libbacktrace.git
ignore = dirty
branch = master
[submodule "vendor/nim-confutils"]
path = vendor/nim-confutils
url = https://github.com/status-im/nim-confutils.git
ignore = dirty
branch = master
[submodule "vendor/nim-chronicles"]
path = vendor/nim-chronicles
url = https://github.com/status-im/nim-chronicles.git
ignore = dirty
branch = master
[submodule "vendor/nim-faststreams"]
path = vendor/nim-faststreams
url = https://github.com/status-im/nim-faststreams.git
ignore = dirty
branch = master
[submodule "vendor/nim-chronos"]
path = vendor/nim-chronos
url = https://github.com/status-im/nim-chronos.git
ignore = dirty
branch = master
[submodule "vendor/nim-json-serialization"]
path = vendor/nim-json-serialization
url = https://github.com/status-im/nim-json-serialization.git
ignore = dirty
branch = master
[submodule "vendor/nim-serialization"]
path = vendor/nim-serialization
url = https://github.com/status-im/nim-serialization.git
ignore = dirty
branch = master
[submodule "vendor/nimcrypto"]
path = vendor/nimcrypto
url = https://github.com/cheatfate/nimcrypto.git
ignore = dirty
branch = master
[submodule "vendor/nim-metrics"]
path = vendor/nim-metrics
url = https://github.com/status-im/nim-metrics.git
ignore = dirty
branch = master
[submodule "vendor/nim-stint"]
path = vendor/nim-stint
url = https://github.com/status-im/nim-stint.git
ignore = dirty
branch = master
[submodule "vendor/nim-json-rpc"]
path = vendor/nim-json-rpc
url = https://github.com/status-im/nim-json-rpc.git
ignore = dirty
branch = master
[submodule "vendor/nim-http-utils"]
path = vendor/nim-http-utils
url = https://github.com/status-im/nim-http-utils.git
ignore = dirty
branch = master
[submodule "vendor/news"]
path = vendor/news
url = https://github.com/tormund/news.git
ignore = dirty
branch = master
[submodule "vendor/nim-bearssl"]
path = vendor/nim-bearssl
url = https://github.com/status-im/nim-bearssl.git
ignore = dirty
branch = master

48
.travis.yml Normal file
View File

@ -0,0 +1,48 @@
language: c
dist: bionic
# https://docs.travis-ci.com/user/caching/
cache:
ccache: true
directories:
- vendor/nimbus-build-system/vendor/Nim/bin
git:
# when multiple CI builds are queued, the tested commit needs to be in the last X commits cloned with "--depth X"
depth: 10
matrix:
include:
- os: linux
arch: amd64
sudo: required
env:
- NPROC=2
before_install:
- export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/lib"
- sudo apt-get -q update
- os: linux
arch: arm64
sudo: required
env:
- NPROC=6 # Worth trying more than 2 parallel jobs: https://travis-ci.community/t/no-cache-support-on-arm64/5416/8
# (also used to get a different cache key than the amd64 one)
before_install:
- export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/lib"
- sudo apt-get -q update
- sudo apt-get install -y libpcre3-dev
- os: osx
before_install:
- HOMEBREW_NO_AUTO_UPDATE=1 HOMEBREW_NO_INSTALL_CLEANUP=1 brew install ccache
env:
- NPROC=2
script:
- set -e # fail fast
# Building Nim-1.0.4 takes up to 10 minutes on Travis - the time limit after which jobs are cancelled for having no output
- make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC}" V=1 update # to allow a newer Nim version to be detected
- make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC}" LOG_LEVEL=TRACE
- build/wakunode --help
- make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC}" test

205
LICENSE-APACHEv2 Normal file
View File

@ -0,0 +1,205 @@
beacon_chain is licensed under the Apache License version 2
Copyright (c) 2018 Status Research & Development GmbH
-----------------------------------------------------
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2018 Status Research & Development GmbH
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
LICENSE-MIT Normal file
View File

@ -0,0 +1,25 @@
beacon_chain is licensed under the MIT License
Copyright (c) 2018 Status Research & Development GmbH
-----------------------------------------------------
The MIT License (MIT)
Copyright (c) 2018 Status Research & Development GmbH
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

109
Makefile
View File

@ -1,19 +1,102 @@
all: wakunode start_network quicksim
# Copyright (c) 2020 Status Research & Development GmbH. Licensed under
# either of:
# - Apache License, version 2.0
# - MIT license
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
start_network: node/v0/start_network.nim
nim c --threads:on -o:build/start_network node/v0/start_network.nim
SHELL := bash # the shell used internally by Make
quicksim: node/v0/quicksim.nim
nim c --threads:on -o:build/quicksim node/v0/quicksim.nim
# used inside the included makefiles
BUILD_SYSTEM_DIR := vendor/nimbus-build-system
wakunode: node/v0/wakunode.nim
nim c --threads:on -o:build/wakunode node/v0/wakunode.nim
# we don't want an error here, so we can handle things later, in the ".DEFAULT" target
-include $(BUILD_SYSTEM_DIR)/makefiles/variables.mk
wakunode2: node/v2/wakunode.nim
nim c --threads:on -o:build/wakunode2 node/v2/wakunode.nim
.PHONY: \
all \
deps \
update \
wakunode \
test \
clean \
libbacktrace
quicksim2: node/v2/quicksim.nim
nim c --threads:on -o:build/quicksim2 node/v2/quicksim.nim
ifeq ($(NIM_PARAMS),)
# "variables.mk" was not included, so we update the submodules.
GIT_SUBMODULE_UPDATE := git submodule update --init --recursive
.DEFAULT:
+@ echo -e "Git submodules not found. Running '$(GIT_SUBMODULE_UPDATE)'.\n"; \
$(GIT_SUBMODULE_UPDATE); \
echo
# Now that the included *.mk files appeared, and are newer than this file, Make will restart itself:
# https://www.gnu.org/software/make/manual/make.html#Remaking-Makefiles
#
# After restarting, it will execute its original goal, so we don't have to start a child Make here
# with "$(MAKE) $(MAKECMDGOALS)". Isn't hidden control flow great?
protocol2: protocol/v2/waku_protocol.nim
nim c --threads:on -o:build/protocol2 protocol/v2/waku_protocol.nim
else # "variables.mk" was included. Business as usual until the end of this file.
# default target, because it's the first one that doesn't start with '.'
all: | wakunode wakusim
# must be included after the default target
-include $(BUILD_SYSTEM_DIR)/makefiles/targets.mk
# "-d:release" implies "--stacktrace:off" and it cannot be added to config.nims
ifeq ($(USE_LIBBACKTRACE), 0)
NIM_PARAMS := $(NIM_PARAMS) -d:debug -d:disable_libbacktrace
else
NIM_PARAMS := $(NIM_PARAMS) -d:release
endif
deps: | deps-common waku.nims
ifneq ($(USE_LIBBACKTRACE), 0)
deps: | libbacktrace
endif
#- deletes and recreates "waku.nims" which on Windows is a copy instead of a proper symlink
update: | update-common
rm -rf waku.nims && \
$(MAKE) waku.nims $(HANDLE_OUTPUT)
# a phony target, because teaching `make` how to do conditional recompilation of Nim projects is too complicated
wakunode: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim wakunode $(NIM_PARAMS) waku.nims
wakusim: | build deps wakunode
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim wakusim $(NIM_PARAMS) waku.nims
wakunode2: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim wakunode2 $(NIM_PARAMS) waku.nims
wakusim2: | build deps wakunode2
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim wakusim2 $(NIM_PARAMS) waku.nims
protocol2:
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim protocol2 $(NIM_PARAMS) waku.nims
# symlink
waku.nims:
ln -s waku.nimble $@
# nim-libbacktrace
libbacktrace:
+ $(MAKE) -C vendor/nim-libbacktrace --no-print-directory BUILD_CXX_LIB=0
# builds and runs the test suite
test: | build deps
$(ENV_SCRIPT) nim test $(NIM_PARAMS) waku.nims
# usual cleaning
clean: | clean-common
rm -rf build/{wakunode,quicksim,start_network,all_tests}
ifneq ($(USE_LIBBACKTRACE), 0)
+ $(MAKE) -C vendor/nim-libbacktrace clean $(HANDLE_OUTPUT)
endif
endif # "variables.mk" was not included

57
config.nims Normal file
View File

@ -0,0 +1,57 @@
if defined(release):
switch("nimcache", "nimcache/release/$projectName")
else:
switch("nimcache", "nimcache/debug/$projectName")
if defined(windows):
# disable timestamps in Windows PE headers - https://wiki.debian.org/ReproducibleBuilds/TimestampsInPEBinaries
switch("passL", "-Wl,--no-insert-timestamp")
# increase stack size
switch("passL", "-Wl,--stack,8388608")
# https://github.com/nim-lang/Nim/issues/4057
--tlsEmulation:off
if defined(i386):
# set the IMAGE_FILE_LARGE_ADDRESS_AWARE flag so we can use PAE, if enabled, and access more than 2 GiB of RAM
switch("passL", "-Wl,--large-address-aware")
# The dynamic Chronicles output currently prevents us from using colors on Windows
# because these require direct manipulations of the stdout File object.
switch("define", "chronicles_colors=off")
# This helps especially for 32-bit x86, which sans SSE2 and newer instructions
# requires quite roundabout code generation for cryptography, and other 64-bit
# and larger arithmetic use cases, along with register starvation issues. When
# engineering a more portable binary release, this should be tweaked but still
# use at least -msse2 or -msse3.
if defined(disableMarchNative):
switch("passC", "-msse3")
else:
switch("passC", "-march=native")
if defined(windows):
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65782
# ("-fno-asynchronous-unwind-tables" breaks Nim's exception raising, sometimes)
switch("passC", "-mno-avx512vl")
--threads:on
--opt:speed
--excessiveStackTrace:on
# enable metric collection
--define:metrics
# for heap-usage-by-instance-type metrics and object base-type strings
--define:nimTypeNames
# the default open files limit is too low on macOS (512), breaking the
# "--debugger:native" build. It can be increased with `ulimit -n 1024`.
if not defined(macosx):
# add debugging symbols and original files and line numbers
--debugger:native
if not (defined(windows) and defined(i386)) and not defined(disable_libbacktrace):
# light-weight stack traces using libbacktrace and libunwind
--define:nimStackTraceOverride
switch("import", "libbacktrace")
--define:nimOldCaseObjects # https://github.com/status-im/nim-confutils/issues/9
# `switch("warning[CaseTransition]", "off")` fails with "Error: invalid command line option: '--warning[CaseTransition]'"
switch("warning", "CaseTransition:off")

8
env.sh Executable file
View File

@ -0,0 +1,8 @@
#!/bin/bash
# We use ${BASH_SOURCE[0]} instead of $0 to allow sourcing this file
# and we fall back to a Zsh-specific special var to also support Zsh.
REL_PATH="$(dirname ${BASH_SOURCE[0]:-${(%):-%x}})"
ABS_PATH="$(cd ${REL_PATH}; pwd)"
source ${ABS_PATH}/vendor/nimbus-build-system/scripts/env.sh

View File

@ -1,11 +0,0 @@
# Package
version = "0.1.0"
author = "Status Research & Development GmbH"
description = "Waku, Private P2P Messaging for Resource-Rerestricted Devices"
license = "MIT"
srcDir = "src"
#bin = @["build/waku"]
# Dependencies
requires "nim >= 1.2.0"

View File

@ -1,5 +0,0 @@
# Waku Node
TODO.
See README in `v0` folder for instructions on how to run a node.

View File

@ -1,48 +0,0 @@
global:
scrape_interval: 1s
scrape_configs:
- job_name: "wakusim"
static_configs:
- targets: ['127.0.0.1:8010']
labels:
node: '0'
- targets: ['127.0.0.1:8011']
labels:
node: '1'
- targets: ['127.0.0.1:8012']
labels:
node: '2'
- targets: ['127.0.0.1:8013']
labels:
node: '3'
- targets: ['127.0.0.1:8014']
labels:
node: '4'
- targets: ['127.0.0.1:8015']
labels:
node: '5'
- targets: ['127.0.0.1:8016']
labels:
node: '6'
- targets: ['127.0.0.1:8017']
labels:
node: '7'
- targets: ['127.0.0.1:8018']
labels:
node: '8'
- targets: ['127.0.0.1:8019']
labels:
node: '9'
- targets: ['127.0.0.1:8020']
labels:
node: '10'
- targets: ['127.0.0.1:8021']
labels:
node: '11'
- targets: ['127.0.0.1:8008']
labels:
node: '12'
- targets: ['127.0.0.1:8009']
labels:
node: '13'

File diff suppressed because it is too large Load Diff

7
tests/all_tests.nim Normal file
View File

@ -0,0 +1,7 @@
import
./test_waku_connect,
./test_waku_config,
./test_waku_bridge,
./test_waku_mail,
./test_rpc_waku

32
tests/test_helpers.nim Normal file
View File

@ -0,0 +1,32 @@
import
unittest, chronos, strutils,
eth/[keys, p2p]
var nextPort = 30303
proc localAddress*(port: int): Address =
let port = Port(port)
result = Address(udpPort: port, tcpPort: port,
ip: parseIpAddress("127.0.0.1"))
proc setupTestNode*(capabilities: varargs[ProtocolInfo, `protocolInfo`]): EthereumNode =
let keys1 = KeyPair.random()[]
result = newEthereumNode(keys1, localAddress(nextPort), 1, nil,
addAllCapabilities = false)
nextPort.inc
for capability in capabilities:
result.addCapability capability
template asyncTest*(name, body: untyped) =
test name:
proc scenario {.async.} = body
waitFor scenario()
template procSuite*(name, body: untyped) =
proc suitePayload =
suite name:
body
suitePayload()
template sourceDir*: string = currentSourcePath.rsplit(DirSep, 1)[0]

237
tests/test_rpc_waku.nim Normal file
View File

@ -0,0 +1,237 @@
import
unittest, strformat, options, stew/byteutils, json_rpc/[rpcserver, rpcclient],
eth/common as eth_common, eth/[rlp, keys, p2p],
../waku/protocol/v1/waku_protocol,
../waku/node/v1/rpc/[hexstrings, rpc_types, waku, key_storage],
./test_helpers
from os import DirSep, ParDir
## Generate client convenience marshalling wrappers from forward declarations
## For testing, ethcallsigs needs to be kept in sync with ../waku/node/v1/rpc/waku
const sigPath = &"{sourceDir}{DirSep}{ParDir}{DirSep}waku{DirSep}node{DirSep}v1{DirSep}rpc{DirSep}wakucallsigs.nim"
createRpcSigs(RpcSocketClient, sigPath)
proc setupNode(capabilities: varargs[ProtocolInfo, `protocolInfo`]): EthereumNode =
let
keypair = KeyPair.random()[]
srvAddress = Address(ip: parseIpAddress("0.0.0.0"), tcpPort: Port(30303),
udpPort: Port(30303))
result = newEthereumNode(keypair, srvAddress, 1, nil, "waku test rpc",
addAllCapabilities = false)
for capability in capabilities:
result.addCapability capability
proc doTests {.async.} =
var ethNode = setupNode(Waku)
# Create Ethereum RPCs
let rpcPort = 8545
var
rpcServer = newRpcSocketServer(["localhost:" & $rpcPort])
client = newRpcSocketClient()
let keys = newKeyStorage()
setupWakuRPC(ethNode, keys, rpcServer)
# Begin tests
rpcServer.start()
await client.connect("localhost", Port(rpcPort))
suite "Waku Remote Procedure Calls":
test "waku_version":
check await(client.waku_version()) == wakuVersionStr
test "waku_info":
let info = await client.waku_info()
check info.maxMessageSize == defaultMaxMsgSize
test "waku_setMaxMessageSize":
let testValue = 1024'u64
check await(client.waku_setMaxMessageSize(testValue)) == true
var info = await client.waku_info()
check info.maxMessageSize == testValue
expect ValueError:
discard await(client.waku_setMaxMessageSize(defaultMaxMsgSize + 1))
info = await client.waku_info()
check info.maxMessageSize == testValue
test "waku_setMinPoW":
let testValue = 0.0001
check await(client.waku_setMinPoW(testValue)) == true
let info = await client.waku_info()
check info.minPow == testValue
# test "waku_markTrustedPeer":
# TODO: need to connect a peer to test
test "waku asymKey tests":
let keyID = await client.waku_newKeyPair()
check:
await(client.waku_hasKeyPair(keyID)) == true
await(client.waku_deleteKeyPair(keyID)) == true
await(client.waku_hasKeyPair(keyID)) == false
expect ValueError:
discard await(client.waku_deleteKeyPair(keyID))
let privkey = "0x5dc5381cae54ba3174dc0d46040fe11614d0cc94d41185922585198b4fcef9d3"
let pubkey = "0x04e5fd642a0f630bbb1e4cd7df629d7b8b019457a9a74f983c0484a045cebb176def86a54185b50bbba6bbf97779173695e92835d63109c23471e6da382f922fdb"
let keyID2 = await client.waku_addPrivateKey(privkey)
check:
await(client.waku_getPublicKey(keyID2)) == pubkey.toPublicKey
await(client.waku_getPrivateKey(keyID2)).toRaw() == privkey.toPrivateKey.toRaw()
await(client.waku_hasKeyPair(keyID2)) == true
await(client.waku_deleteKeyPair(keyID2)) == true
await(client.waku_hasKeyPair(keyID2)) == false
expect ValueError:
discard await(client.waku_deleteKeyPair(keyID2))
test "waku symKey tests":
let keyID = await client.waku_newSymKey()
check:
await(client.waku_hasSymKey(keyID)) == true
await(client.waku_deleteSymKey(keyID)) == true
await(client.waku_hasSymKey(keyID)) == false
expect ValueError:
discard await(client.waku_deleteSymKey(keyID))
let symKey = "0x0000000000000000000000000000000000000000000000000000000000000001"
let keyID2 = await client.waku_addSymKey(symKey)
check:
await(client.waku_getSymKey(keyID2)) == symKey.toSymKey
await(client.waku_hasSymKey(keyID2)) == true
await(client.waku_deleteSymKey(keyID2)) == true
await(client.waku_hasSymKey(keyID2)) == false
expect ValueError:
discard await(client.waku_deleteSymKey(keyID2))
let keyID3 = await client.waku_generateSymKeyFromPassword("password")
let keyID4 = await client.waku_generateSymKeyFromPassword("password")
let keyID5 = await client.waku_generateSymKeyFromPassword("nimbus!")
check:
await(client.waku_getSymKey(keyID3)) ==
await(client.waku_getSymKey(keyID4))
await(client.waku_getSymKey(keyID3)) !=
await(client.waku_getSymKey(keyID5))
await(client.waku_hasSymKey(keyID3)) == true
await(client.waku_deleteSymKey(keyID3)) == true
await(client.waku_hasSymKey(keyID3)) == false
expect ValueError:
discard await(client.waku_deleteSymKey(keyID3))
# Some defaults for the filter & post tests
let
ttl = 30'u64
topicStr = "0x12345678"
payload = "0x45879632"
# A very low target and long time so we are sure the test never fails
# because of this
powTarget = 0.001
powTime = 1.0
test "waku filter create and delete":
let
topic = topicStr.toTopic()
symKeyID = await client.waku_newSymKey()
options = WakuFilterOptions(symKeyID: some(symKeyID),
topics: some(@[topic]))
filterID = await client.waku_newMessageFilter(options)
check:
filterID.string.isValidIdentifier
await(client.waku_deleteMessageFilter(filterID)) == true
expect ValueError:
discard await(client.waku_deleteMessageFilter(filterID))
test "waku symKey post and filter loop":
let
topic = topicStr.toTopic()
symKeyID = await client.waku_newSymKey()
options = WakuFilterOptions(symKeyID: some(symKeyID),
topics: some(@[topic]))
filterID = await client.waku_newMessageFilter(options)
message = WakuPostMessage(symKeyID: some(symKeyID),
ttl: ttl,
topic: some(topic),
payload: payload.HexDataStr,
powTime: powTime,
powTarget: powTarget)
check:
await(client.waku_setMinPoW(powTarget)) == true
await(client.waku_post(message)) == true
let messages = await client.waku_getFilterMessages(filterID)
check:
messages.len == 1
messages[0].sig.isNone()
messages[0].recipientPublicKey.isNone()
messages[0].ttl == ttl
messages[0].topic == topic
messages[0].payload == hexToSeqByte(payload)
messages[0].padding.len > 0
messages[0].pow >= powTarget
await(client.waku_deleteMessageFilter(filterID)) == true
test "waku asymKey post and filter loop":
let
topic = topicStr.toTopic()
privateKeyID = await client.waku_newKeyPair()
options = WakuFilterOptions(privateKeyID: some(privateKeyID))
filterID = await client.waku_newMessageFilter(options)
pubKey = await client.waku_getPublicKey(privateKeyID)
message = WakuPostMessage(pubKey: some(pubKey),
ttl: ttl,
topic: some(topic),
payload: payload.HexDataStr,
powTime: powTime,
powTarget: powTarget)
check:
await(client.waku_setMinPoW(powTarget)) == true
await(client.waku_post(message)) == true
let messages = await client.waku_getFilterMessages(filterID)
check:
messages.len == 1
messages[0].sig.isNone()
messages[0].recipientPublicKey.get() == pubKey
messages[0].ttl == ttl
messages[0].topic == topic
messages[0].payload == hexToSeqByte(payload)
messages[0].padding.len > 0
messages[0].pow >= powTarget
await(client.waku_deleteMessageFilter(filterID)) == true
test "waku signature in post and filter loop":
let
topic = topicStr.toTopic()
symKeyID = await client.waku_newSymKey()
privateKeyID = await client.waku_newKeyPair()
pubKey = await client.waku_getPublicKey(privateKeyID)
options = WakuFilterOptions(symKeyID: some(symKeyID),
topics: some(@[topic]),
sig: some(pubKey))
filterID = await client.waku_newMessageFilter(options)
message = WakuPostMessage(symKeyID: some(symKeyID),
sig: some(privateKeyID),
ttl: ttl,
topic: some(topic),
payload: payload.HexDataStr,
powTime: powTime,
powTarget: powTarget)
check:
await(client.waku_setMinPoW(powTarget)) == true
await(client.waku_post(message)) == true
let messages = await client.waku_getFilterMessages(filterID)
check:
messages.len == 1
messages[0].sig.get() == pubKey
messages[0].recipientPublicKey.isNone()
messages[0].ttl == ttl
messages[0].topic == topic
messages[0].payload == hexToSeqByte(payload)
messages[0].padding.len > 0
messages[0].pow >= powTarget
await(client.waku_deleteMessageFilter(filterID)) == true
rpcServer.stop()
rpcServer.close()
waitFor doTests()

View File

@ -0,0 +1,95 @@
#
# Waku
# (c) Copyright 2019
# Status Research & Development GmbH
#
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
import
sequtils, unittest, tables, chronos, eth/p2p, eth/p2p/peer_pool,
eth/p2p/rlpx_protocols/whisper_protocol as whisper,
../waku/protocol/v1/waku_protocol as waku,
../waku/protocol/v1/waku_bridge,
./test_helpers
let safeTTL = 5'u32
let waitInterval = waku.messageInterval + 150.milliseconds
procSuite "Waku - Whisper bridge tests":
# Waku Whisper node has both capabilities, listens to Whisper and Waku and
# relays traffic between the two.
var
nodeWakuWhisper = setupTestNode(Whisper, Waku) # This will be the bridge
nodeWhisper = setupTestNode(Whisper)
nodeWaku = setupTestNode(Waku)
nodeWakuWhisper.startListening()
let bridgeNode = newNode(nodeWakuWhisper.toENode())
nodeWakuWhisper.shareMessageQueue()
waitFor nodeWhisper.peerPool.connectToNode(bridgeNode)
waitFor nodeWaku.peerPool.connectToNode(bridgeNode)
asyncTest "WakuWhisper and Whisper peers connected":
check:
nodeWhisper.peerPool.connectedNodes.len() == 1
nodeWaku.peerPool.connectedNodes.len() == 1
asyncTest "Whisper - Waku communcation via bridge":
# topic whisper node subscribes to, waku node posts to
let topic1 = [byte 0x12, 0, 0, 0]
# topic waku node subscribes to, whisper node posts to
let topic2 = [byte 0x34, 0, 0, 0]
var payloads = [repeat(byte 0, 10), repeat(byte 1, 10)]
var futures = [newFuture[int](), newFuture[int]()]
proc handler1(msg: whisper.ReceivedMessage) =
check msg.decoded.payload == payloads[0]
futures[0].complete(1)
proc handler2(msg: waku.ReceivedMessage) =
check msg.decoded.payload == payloads[1]
futures[1].complete(1)
var filter1 = whisper.subscribeFilter(nodeWhisper,
whisper.initFilter(topics = @[topic1]), handler1)
var filter2 = waku.subscribeFilter(nodeWaku,
waku.initFilter(topics = @[topic2]), handler2)
check:
# Message should also end up in the Whisper node its queue via the bridge
waku.postMessage(nodeWaku, ttl = safeTTL + 1, topic = topic1,
payload = payloads[0]) == true
# Message should also end up in the Waku node its queue via the bridge
whisper.postMessage(nodeWhisper, ttl = safeTTL, topic = topic2,
payload = payloads[1]) == true
nodeWhisper.protocolState(Whisper).queue.items.len == 1
nodeWaku.protocolState(Waku).queue.items.len == 1
# waitInterval*2 as messages have to pass the bridge also (2 hops)
await allFutures(futures).withTimeout(waitInterval*2)
# Relay can receive Whisper & Waku messages
nodeWakuWhisper.protocolState(Whisper).queue.items.len == 2
nodeWakuWhisper.protocolState(Waku).queue.items.len == 2
# Whisper node can receive Waku messages (via bridge)
nodeWhisper.protocolState(Whisper).queue.items.len == 2
# Waku node can receive Whisper messages (via bridge)
nodeWaku.protocolState(Waku).queue.items.len == 2
whisper.unsubscribeFilter(nodeWhisper, filter1) == true
waku.unsubscribeFilter(nodeWaku, filter2) == true
# XXX: This reads a bit weird, but eh
waku.resetMessageQueue(nodeWaku)
whisper.resetMessageQueue(nodeWhisper)
# shared queue so Waku and Whisper should be set to 0
waku.resetMessageQueue(nodeWakuWhisper)
check:
nodeWhisper.protocolState(Whisper).queue.items.len == 0
nodeWaku.protocolState(Waku).queue.items.len == 0
nodeWakuWhisper.protocolState(Whisper).queue.items.len == 0
nodeWakuWhisper.protocolState(Waku).queue.items.len == 0

View File

@ -0,0 +1,64 @@
#
# Waku
# (c) Copyright 2020
# Status Research & Development GmbH
#
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
import
sequtils, options, unittest, times,
../waku/protocol/v1/waku_protocol
suite "Waku envelope validation":
test "should validate and allow envelope according to config":
let ttl = 1'u32
let topic = [byte 1, 2, 3, 4]
let config = WakuConfig(powRequirement: 0, bloom: some(topic.topicBloom()),
isLightNode: false, maxMsgSize: defaultMaxMsgSize)
let env = Envelope(expiry:epochTime().uint32 + ttl, ttl: ttl, topic: topic,
data: repeat(byte 9, 256), nonce: 0)
check env.valid()
let msg = initMessage(env)
check msg.allowed(config)
test "should invalidate envelope due to ttl 0":
let ttl = 0'u32
let topic = [byte 1, 2, 3, 4]
let env = Envelope(expiry:epochTime().uint32 + ttl, ttl: ttl, topic: topic,
data: repeat(byte 9, 256), nonce: 0)
check env.valid() == false
test "should invalidate envelope due to expired":
let ttl = 1'u32
let topic = [byte 1, 2, 3, 4]
let env = Envelope(expiry:epochTime().uint32, ttl: ttl, topic: topic,
data: repeat(byte 9, 256), nonce: 0)
check env.valid() == false
test "should invalidate envelope due to in the future":
let ttl = 1'u32
let topic = [byte 1, 2, 3, 4]
# there is currently a 2 second tolerance, hence the + 3
let env = Envelope(expiry:epochTime().uint32 + ttl + 3, ttl: ttl,
topic: topic, data: repeat(byte 9, 256), nonce: 0)
check env.valid() == false
test "should not allow envelope due to bloom filter":
let topic = [byte 1, 2, 3, 4]
let wrongTopic = [byte 9, 8, 7, 6]
let config = WakuConfig(powRequirement: 0,
bloom: some(wrongTopic.topicBloom()),
isLightNode: false, maxMsgSize: defaultMaxMsgSize)
let env = Envelope(expiry:100000 , ttl: 30, topic: topic,
data: repeat(byte 9, 256), nonce: 0)
let msg = initMessage(env)
check msg.allowed(config) == false

557
tests/test_waku_connect.nim Normal file
View File

@ -0,0 +1,557 @@
#
# Waku
# (c) Copyright 2019
# Status Research & Development GmbH
#
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
import
sequtils, tables, unittest, chronos, eth/[keys, p2p], eth/p2p/peer_pool,
../waku/protocol/v1/waku_protocol,
./test_helpers
const
safeTTL = 5'u32
waitInterval = messageInterval + 150.milliseconds
conditionTimeoutMs = 3000.milliseconds
proc resetMessageQueues(nodes: varargs[EthereumNode]) =
for node in nodes:
node.resetMessageQueue()
# check on a condition until true or return a future containing false
# if timeout expires first
proc eventually(timeout: Duration, condition: proc(): bool {.gcsafe.}):
Future[bool] =
let wrappedCondition = proc(): Future[bool] {.async.} =
let f = newFuture[bool]()
while not condition():
await sleepAsync(100.milliseconds)
f.complete(true)
return await f
return withTimeout(wrappedCondition(), timeout)
suite "Waku connections":
asyncTest "Waku connections":
var
n1 = setupTestNode(Waku)
n2 = setupTestNode(Waku)
n3 = setupTestNode(Waku)
n4 = setupTestNode(Waku)
var topics: seq[Topic]
n1.protocolState(Waku).config.topics = some(topics)
n2.protocolState(Waku).config.topics = some(topics)
n3.protocolState(Waku).config.topics = none(seq[Topic])
n4.protocolState(Waku).config.topics = none(seq[Topic])
n1.startListening()
n3.startListening()
let
p1 = await n2.rlpxConnect(newNode(n1.toENode()))
p2 = await n2.rlpxConnect(newNode(n3.toENode()))
p3 = await n4.rlpxConnect(newNode(n3.toENode()))
check:
p1.isNil
p2.isNil == false
p3.isNil == false
asyncTest "Filters with encryption and signing":
var node1 = setupTestNode(Waku)
var node2 = setupTestNode(Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let encryptKeyPair = KeyPair.random()[]
let signKeyPair = KeyPair.random()[]
var symKey: SymKey
let topic = [byte 0x12, 0, 0, 0]
var filters: seq[string] = @[]
var payloads = [repeat(byte 1, 10), repeat(byte 2, 10),
repeat(byte 3, 10), repeat(byte 4, 10)]
var futures = [newFuture[int](), newFuture[int](),
newFuture[int](), newFuture[int]()]
proc handler1(msg: ReceivedMessage) =
var count {.global.}: int
check msg.decoded.payload == payloads[0] or
msg.decoded.payload == payloads[1]
count += 1
if count == 2: futures[0].complete(1)
proc handler2(msg: ReceivedMessage) =
check msg.decoded.payload == payloads[1]
futures[1].complete(1)
proc handler3(msg: ReceivedMessage) =
var count {.global.}: int
check msg.decoded.payload == payloads[2] or
msg.decoded.payload == payloads[3]
count += 1
if count == 2: futures[2].complete(1)
proc handler4(msg: ReceivedMessage) =
check msg.decoded.payload == payloads[3]
futures[3].complete(1)
# Filters
# filter for encrypted asym
filters.add(node1.subscribeFilter(initFilter(
privateKey = some(encryptKeyPair.seckey), topics = @[topic]), handler1))
# filter for encrypted asym + signed
filters.add(node1.subscribeFilter(initFilter(some(signKeyPair.pubkey),
privateKey = some(encryptKeyPair.seckey), topics = @[topic]), handler2))
# filter for encrypted sym
filters.add(node1.subscribeFilter(initFilter(symKey = some(symKey),
topics = @[topic]), handler3))
# filter for encrypted sym + signed
filters.add(node1.subscribeFilter(initFilter(some(signKeyPair.pubkey),
symKey = some(symKey), topics = @[topic]), handler4))
# Messages
check:
# encrypted asym
node2.postMessage(some(encryptKeyPair.pubkey), ttl = safeTTL,
topic = topic, payload = payloads[0]) == true
# encrypted asym + signed
node2.postMessage(some(encryptKeyPair.pubkey),
src = some(signKeyPair.seckey), ttl = safeTTL,
topic = topic, payload = payloads[1]) == true
# encrypted sym
node2.postMessage(symKey = some(symKey), ttl = safeTTL, topic = topic,
payload = payloads[2]) == true
# encrypted sym + signed
node2.postMessage(symKey = some(symKey),
src = some(signKeyPair.seckey),
ttl = safeTTL, topic = topic,
payload = payloads[3]) == true
node2.protocolState(Waku).queue.items.len == 4
check:
await allFutures(futures).withTimeout(waitInterval)
node1.protocolState(Waku).queue.items.len == 4
for filter in filters:
check node1.unsubscribeFilter(filter) == true
asyncTest "Filters with topics":
var node1 = setupTestNode(Waku)
var node2 = setupTestNode(Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let topic1 = [byte 0x12, 0, 0, 0]
let topic2 = [byte 0x34, 0, 0, 0]
var payloads = [repeat(byte 0, 10), repeat(byte 1, 10)]
var futures = [newFuture[int](), newFuture[int]()]
proc handler1(msg: ReceivedMessage) =
check msg.decoded.payload == payloads[0]
futures[0].complete(1)
proc handler2(msg: ReceivedMessage) =
check msg.decoded.payload == payloads[1]
futures[1].complete(1)
var filter1 = node1.subscribeFilter(initFilter(topics = @[topic1]), handler1)
var filter2 = node1.subscribeFilter(initFilter(topics = @[topic2]), handler2)
check:
node2.postMessage(ttl = safeTTL + 1, topic = topic1,
payload = payloads[0]) == true
node2.postMessage(ttl = safeTTL, topic = topic2,
payload = payloads[1]) == true
node2.protocolState(Waku).queue.items.len == 2
await allFutures(futures).withTimeout(waitInterval)
node1.protocolState(Waku).queue.items.len == 2
node1.unsubscribeFilter(filter1) == true
node1.unsubscribeFilter(filter2) == true
asyncTest "Filters with PoW":
var node1 = setupTestNode(Waku)
var node2 = setupTestNode(Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let topic = [byte 0x12, 0, 0, 0]
var payload = repeat(byte 0, 10)
var futures = [newFuture[int](), newFuture[int]()]
proc handler1(msg: ReceivedMessage) =
check msg.decoded.payload == payload
futures[0].complete(1)
proc handler2(msg: ReceivedMessage) =
check msg.decoded.payload == payload
futures[1].complete(1)
var filter1 = node1.subscribeFilter(
initFilter(topics = @[topic], powReq = 0), handler1)
var filter2 = node1.subscribeFilter(
initFilter(topics = @[topic], powReq = 1_000_000), handler2)
check:
node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true
(await futures[0].withTimeout(waitInterval)) == true
(await futures[1].withTimeout(waitInterval)) == false
node1.protocolState(Waku).queue.items.len == 1
node1.unsubscribeFilter(filter1) == true
node1.unsubscribeFilter(filter2) == true
asyncTest "Filters with queues":
var node1 = setupTestNode(Waku)
var node2 = setupTestNode(Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let topic = [byte 0, 0, 0, 0]
let payload = repeat(byte 0, 10)
var filter = node1.subscribeFilter(initFilter(topics = @[topic]))
for i in countdown(10, 1):
check node2.postMessage(ttl = safeTTL, topic = topic,
payload = payload) == true
await sleepAsync(waitInterval)
check:
node1.getFilterMessages(filter).len() == 10
node1.getFilterMessages(filter).len() == 0
node1.unsubscribeFilter(filter) == true
asyncTest "Local filter notify":
var node1 = setupTestNode(Waku)
var node2 = setupTestNode(Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let topic = [byte 0, 0, 0, 0]
var filter = node1.subscribeFilter(initFilter(topics = @[topic]))
check:
node1.postMessage(ttl = safeTTL, topic = topic,
payload = repeat(byte 4, 10)) == true
node1.getFilterMessages(filter).len() == 1
node1.unsubscribeFilter(filter) == true
asyncTest "Bloomfilter blocking":
var node1 = setupTestNode(Waku)
var node2 = setupTestNode(Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let sendTopic1 = [byte 0x12, 0, 0, 0]
let sendTopic2 = [byte 0x34, 0, 0, 0]
let filterTopics = @[[byte 0x34, 0, 0, 0],[byte 0x56, 0, 0, 0]]
let payload = repeat(byte 0, 10)
var f: Future[int] = newFuture[int]()
proc handler(msg: ReceivedMessage) =
check msg.decoded.payload == payload
f.complete(1)
var filter = node1.subscribeFilter(
initFilter(topics = filterTopics), handler)
await node1.setBloomFilter(node1.filtersToBloom())
check:
node2.postMessage(ttl = safeTTL, topic = sendTopic1,
payload = payload) == true
node2.protocolState(Waku).queue.items.len == 1
(await f.withTimeout(waitInterval)) == false
node1.protocolState(Waku).queue.items.len == 0
resetMessageQueues(node1, node2)
f = newFuture[int]()
check:
node2.postMessage(ttl = safeTTL, topic = sendTopic2,
payload = payload) == true
node2.protocolState(Waku).queue.items.len == 1
await f.withTimeout(waitInterval)
f.read() == 1
node1.protocolState(Waku).queue.items.len == 1
node1.unsubscribeFilter(filter) == true
await node1.setBloomFilter(fullBloom())
asyncTest "PoW blocking":
var node1 = setupTestNode(Waku)
var node2 = setupTestNode(Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let topic = [byte 0, 0, 0, 0]
let payload = repeat(byte 0, 10)
await node1.setPowRequirement(1_000_000)
check:
node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true
node2.protocolState(Waku).queue.items.len == 1
await sleepAsync(waitInterval)
check:
node1.protocolState(Waku).queue.items.len == 0
resetMessageQueues(node1, node2)
await node1.setPowRequirement(0.0)
check:
node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true
node2.protocolState(Waku).queue.items.len == 1
await sleepAsync(waitInterval)
check:
node1.protocolState(Waku).queue.items.len == 1
asyncTest "Queue pruning":
var node1 = setupTestNode(Waku)
var node2 = setupTestNode(Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let topic = [byte 0, 0, 0, 0]
let payload = repeat(byte 0, 10)
# We need a minimum TTL of 2 as when set to 1 there is a small chance that
# it is already expired after messageInterval due to rounding down of float
# to uint32 in postMessage()
let lowerTTL = 2'u32 # Lower TTL as we need to wait for messages to expire
for i in countdown(10, 1):
check node2.postMessage(ttl = lowerTTL, topic = topic, payload = payload)
check node2.protocolState(Waku).queue.items.len == 10
await sleepAsync(waitInterval)
check node1.protocolState(Waku).queue.items.len == 10
await sleepAsync(milliseconds((lowerTTL+1)*1000))
check node1.protocolState(Waku).queue.items.len == 0
check node2.protocolState(Waku).queue.items.len == 0
asyncTest "P2P post":
var node1 = setupTestNode(Waku)
var node2 = setupTestNode(Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let topic = [byte 0, 0, 0, 0]
var f: Future[int] = newFuture[int]()
proc handler(msg: ReceivedMessage) =
check msg.decoded.payload == repeat(byte 4, 10)
f.complete(1)
var filter = node1.subscribeFilter(initFilter(topics = @[topic],
allowP2P = true), handler)
# Need to be sure that node1 is added in the peerpool of node2 as
# postMessage with target will iterate over the peers
require await eventually(conditionTimeoutMs,
proc(): bool = node2.peerPool.len == 1)
check:
node1.setPeerTrusted(toNodeId(node2.keys.pubkey)) == true
node2.postMessage(ttl = 10, topic = topic,
payload = repeat(byte 4, 10),
targetPeer = some(toNodeId(node1.keys.pubkey))) == true
await f.withTimeout(waitInterval)
f.read() == 1
node1.protocolState(Waku).queue.items.len == 0
node2.protocolState(Waku).queue.items.len == 0
node1.unsubscribeFilter(filter) == true
asyncTest "Light node posting":
var ln = setupTestNode(Waku)
await ln.setLightNode(true)
var fn = setupTestNode(Waku)
fn.startListening()
await ln.peerPool.connectToNode(newNode(fn.toENode()))
let topic = [byte 0, 0, 0, 0]
check:
ln.peerPool.connectedNodes.len() == 1
# normal post
ln.postMessage(ttl = safeTTL, topic = topic,
payload = repeat(byte 0, 10)) == true
ln.protocolState(Waku).queue.items.len == 1
# TODO: add test on message relaying
asyncTest "Connect two light nodes":
var ln1 = setupTestNode(Waku)
var ln2 = setupTestNode(Waku)
await ln1.setLightNode(true)
await ln2.setLightNode(true)
ln2.startListening()
let peer = await ln1.rlpxConnect(newNode(ln2.toENode()))
check peer.isNil == true
asyncTest "Waku set-topic-interest":
var
wakuTopicNode = setupTestNode(Waku)
wakuNode = setupTestNode(Waku)
let
topic1 = [byte 0xDA, 0xDA, 0xDA, 0xAA]
topic2 = [byte 0xD0, 0xD0, 0xD0, 0x00]
wrongTopic = [byte 0x4B, 0x1D, 0x4B, 0x1D]
# Set one topic so we are not considered a full node
wakuTopicNode.protocolState(Waku).config.topics = some(@[topic1])
wakuNode.startListening()
await wakuTopicNode.peerPool.connectToNode(newNode(wakuNode.toENode()))
# Update topic interest
check:
await setTopicInterest(wakuTopicNode, @[topic1, topic2])
let payload = repeat(byte 0, 10)
check:
wakuNode.postMessage(ttl = safeTTL, topic = topic1, payload = payload)
wakuNode.postMessage(ttl = safeTTL, topic = topic2, payload = payload)
wakuNode.postMessage(ttl = safeTTL, topic = wrongTopic, payload = payload)
wakuNode.protocolState(Waku).queue.items.len == 3
await sleepAsync(waitInterval)
check:
wakuTopicNode.protocolState(Waku).queue.items.len == 2
asyncTest "Waku set-minimum-pow":
var
wakuPowNode = setupTestNode(Waku)
wakuNode = setupTestNode(Waku)
wakuNode.startListening()
await wakuPowNode.peerPool.connectToNode(newNode(wakuNode.toENode()))
# Update minimum pow
await setPowRequirement(wakuPowNode, 1.0)
await sleepAsync(waitInterval)
check:
wakuNode.peerPool.len == 1
# check powRequirement is updated
for peer in wakuNode.peerPool.peers:
check:
peer.state(Waku).powRequirement == 1.0
asyncTest "Waku set-light-node":
var
wakuLightNode = setupTestNode(Waku)
wakuNode = setupTestNode(Waku)
wakuNode.startListening()
await wakuLightNode.peerPool.connectToNode(newNode(wakuNode.toENode()))
# Update minimum pow
await setLightNode(wakuLightNode, true)
await sleepAsync(waitInterval)
check:
wakuNode.peerPool.len == 1
# check lightNode is updated
for peer in wakuNode.peerPool.peers:
check:
peer.state(Waku).isLightNode
asyncTest "Waku set-bloom-filter":
var
wakuBloomNode = setupTestNode(Waku)
wakuNode = setupTestNode(Waku)
bloom = fullBloom()
topics = @[[byte 0xDA, 0xDA, 0xDA, 0xAA]]
# Set topic interest
discard await wakuBloomNode.setTopicInterest(topics)
wakuBloomNode.startListening()
await wakuNode.peerPool.connectToNode(newNode(wakuBloomNode.toENode()))
# Sanity check
check:
wakuNode.peerPool.len == 1
# check bloom filter is updated
for peer in wakuNode.peerPool.peers:
check:
peer.state(Waku).bloom == bloom
peer.state(Waku).topics == some(topics)
let hasBloomNodeConnectedCondition = proc(): bool =
wakuBloomNode.peerPool.len == 1
# wait for the peer to be connected on the other side
let hasBloomNodeConnected =
await eventually(conditionTimeoutMs, hasBloomNodeConnectedCondition)
# check bloom filter is updated
check:
hasBloomNodeConnected
# disable one bit in the bloom filter
bloom[0] = 0x0
# and set it
await setBloomFilter(wakuBloomNode, bloom)
let bloomFilterUpdatedCondition = proc(): bool =
for peer in wakuNode.peerPool.peers:
return peer.state(Waku).bloom == bloom and
peer.state(Waku).topics == none(seq[Topic])
let bloomFilterUpdated =
await eventually(conditionTimeoutMs, bloomFilterUpdatedCondition)
# check bloom filter is updated
check:
bloomFilterUpdated
asyncTest "Waku topic-interest":
var
wakuTopicNode = setupTestNode(Waku)
wakuNode = setupTestNode(Waku)
let
topic1 = [byte 0xDA, 0xDA, 0xDA, 0xAA]
topic2 = [byte 0xD0, 0xD0, 0xD0, 0x00]
wrongTopic = [byte 0x4B, 0x1D, 0x4B, 0x1D]
wakuTopicNode.protocolState(Waku).config.topics = some(@[topic1, topic2])
wakuNode.startListening()
await wakuTopicNode.peerPool.connectToNode(newNode(wakuNode.toENode()))
let payload = repeat(byte 0, 10)
check:
wakuNode.postMessage(ttl = safeTTL, topic = topic1, payload = payload)
wakuNode.postMessage(ttl = safeTTL, topic = topic2, payload = payload)
wakuNode.postMessage(ttl = safeTTL, topic = wrongTopic, payload = payload)
wakuNode.protocolState(Waku).queue.items.len == 3
await eventually(conditionTimeoutMs,
proc (): bool = wakuTopicNode.protocolState(Waku).queue.items.len == 2)
asyncTest "Waku topic-interest versus bloom filter":
var
wakuTopicNode = setupTestNode(Waku)
wakuNode = setupTestNode(Waku)
let
topic1 = [byte 0xDA, 0xDA, 0xDA, 0xAA]
topic2 = [byte 0xD0, 0xD0, 0xD0, 0x00]
bloomTopic = [byte 0x4B, 0x1D, 0x4B, 0x1D]
# It was checked that the topics don't trigger false positives on the bloom.
wakuTopicNode.protocolState(Waku).config.topics = some(@[topic1, topic2])
wakuTopicNode.protocolState(Waku).config.bloom = some(toBloom([bloomTopic]))
wakuNode.startListening()
await wakuTopicNode.peerPool.connectToNode(newNode(wakuNode.toENode()))
let payload = repeat(byte 0, 10)
check:
wakuNode.postMessage(ttl = safeTTL, topic = topic1, payload = payload)
wakuNode.postMessage(ttl = safeTTL, topic = topic2, payload = payload)
wakuNode.postMessage(ttl = safeTTL, topic = bloomTopic, payload = payload)
wakuNode.protocolState(Waku).queue.items.len == 3
await sleepAsync(waitInterval)
check:
wakuTopicNode.protocolState(Waku).queue.items.len == 2

116
tests/test_waku_mail.nim Normal file
View File

@ -0,0 +1,116 @@
import
unittest, chronos, tables, sequtils, times,
eth/[p2p, async_utils], eth/p2p/peer_pool,
../waku/protocol/v1/[waku_protocol, waku_mail],
./test_helpers
const
transmissionTimeout = chronos.milliseconds(100)
proc waitForConnected(node: EthereumNode) {.async.} =
while node.peerPool.connectedNodes.len == 0:
await sleepAsync(chronos.milliseconds(1))
procSuite "Waku Mail Client":
var client = setupTestNode(Waku)
var simpleServer = setupTestNode(Waku)
simpleServer.startListening()
let simpleServerNode = newNode(simpleServer.toENode())
let clientNode = newNode(client.toENode())
waitFor client.peerPool.connectToNode(simpleServerNode)
require:
waitFor simpleServer.waitForConnected().withTimeout(transmissionTimeout)
asyncTest "Two peers connected":
check:
client.peerPool.connectedNodes.len() == 1
simpleServer.peerPool.connectedNodes.len() == 1
asyncTest "Mail Request and Request Complete":
let
topic = [byte 0, 0, 0, 0]
bloom = toBloom(@[topic])
lower = 0'u32
upper = epochTime().uint32
limit = 100'u32
request = MailRequest(lower: lower, upper: upper, bloom: @bloom,
limit: limit)
var symKey: SymKey
check client.setPeerTrusted(simpleServerNode.id)
var cursorFut = client.requestMail(simpleServerNode.id, request, symKey, 1)
# Simple mailserver part
let peer = simpleServer.peerPool.connectedNodes[clientNode]
var f = peer.nextMsg(Waku.p2pRequest)
require await f.withTimeout(transmissionTimeout)
let response = f.read()
let decoded = decode(response.envelope.data, symKey = some(symKey))
require decoded.isSome()
var rlp = rlpFromBytes(decoded.get().payload)
let output = rlp.read(MailRequest)
check:
output.lower == lower
output.upper == upper
output.bloom == bloom
output.limit == limit
var dummy: Hash
await peer.p2pRequestComplete(dummy, dummy, @[])
check await cursorFut.withTimeout(transmissionTimeout)
asyncTest "Mail Send":
let topic = [byte 0x12, 0x34, 0x56, 0x78]
let payload = repeat(byte 0, 10)
var f = newFuture[int]()
proc handler(msg: ReceivedMessage) =
check msg.decoded.payload == payload
f.complete(1)
let filter = subscribeFilter(client,
initFilter(topics = @[topic], allowP2P = true), handler)
check:
client.setPeerTrusted(simpleServerNode.id)
# ttl 0 to show that ttl should be ignored
# TODO: perhaps not the best way to test this, means no PoW calculation
# may be done, and not sure if that is OK?
simpleServer.postMessage(ttl = 0, topic = topic, payload = payload,
targetPeer = some(clientNode.id))
await f.withTimeout(transmissionTimeout)
client.unsubscribeFilter(filter)
asyncTest "Multiple Client Request and Complete":
var count = 5
proc customHandler(peer: Peer, envelope: Envelope)=
var envelopes: seq[Envelope]
traceAsyncErrors peer.p2pMessage(envelopes)
var cursor: seq[byte]
count = count - 1
if count == 0:
cursor = @[]
else:
cursor = @[byte count]
var dummy: Hash
traceAsyncErrors peer.p2pRequestComplete(dummy, dummy, cursor)
simpleServer.enableMailServer(customHandler)
check client.setPeerTrusted(simpleServerNode.id)
var request: MailRequest
var symKey: SymKey
let cursor =
await client.requestMail(simpleServerNode.id, request, symKey, 5)
require cursor.isSome()
check:
cursor.get().len == 0
count == 0
# TODO: Also check for received envelopes.

1
vendor/news vendored Submodule

@ -0,0 +1 @@
Subproject commit 8ea2ee260207c57a4b495931508569e181bb7b93

1
vendor/nim-bearssl vendored Submodule

@ -0,0 +1 @@
Subproject commit 68c6d27304245c948526487b37e10951acf7dbc8

1
vendor/nim-chronicles vendored Submodule

@ -0,0 +1 @@
Subproject commit af184ae47e20672b68d20e7cacd3b726533548e1

1
vendor/nim-chronos vendored Submodule

@ -0,0 +1 @@
Subproject commit 357baa52a0ea3da699c8229d97230c7f4340da92

1
vendor/nim-confutils vendored Submodule

@ -0,0 +1 @@
Subproject commit 6e5d570490989c753d4645ba9173ef9358d302bb

1
vendor/nim-faststreams vendored Submodule

@ -0,0 +1 @@
Subproject commit caab5c917ffd4a4c583dad9c59d2e1012eee9baa

1
vendor/nim-http-utils vendored Submodule

@ -0,0 +1 @@
Subproject commit 33d70b9f378591e074838d6608a4468940137357

1
vendor/nim-json-rpc vendored Submodule

@ -0,0 +1 @@
Subproject commit 5c0d0961114bcaaf3da52d5918bf0b85ef0e4ce9

1
vendor/nim-json-serialization vendored Submodule

@ -0,0 +1 @@
Subproject commit cb695d175fd809f6262f1ebc329e7c0b1d4505f5

1
vendor/nim-libbacktrace vendored Submodule

@ -0,0 +1 @@
Subproject commit fc63983c73ba541413f12ee70a2b6f025b03efea

1
vendor/nim-metrics vendored Submodule

@ -0,0 +1 @@
Subproject commit 65c91f1a7b5bf83968d0bebab74d14ddb6e9a432

1
vendor/nim-nat-traversal vendored Submodule

@ -0,0 +1 @@
Subproject commit 2403c33929c74f2d150f50dc8bc3a598af70661a

1
vendor/nim-serialization vendored Submodule

@ -0,0 +1 @@
Subproject commit 9f085c33a9dc381bcf455225ccfad893b3d0361e

1
vendor/nim-stint vendored Submodule

@ -0,0 +1 @@
Subproject commit 9e49b00148884a01d61478ae5d2c69b543b93ceb

1
vendor/nimbus vendored

@ -1 +0,0 @@
Subproject commit ff028982d6fc414c17d98d4c5e6d6d3856c0c598

1
vendor/nimbus-build-system vendored Submodule

@ -0,0 +1 @@
Subproject commit e8e1f1c2cff5aebf74dff07b1cf119134267922b

1
vendor/nimcrypto vendored Submodule

@ -0,0 +1 @@
Subproject commit 30d0ceaba02c0b966515f98873a0404786fbf796

57
waku.nimble Normal file
View File

@ -0,0 +1,57 @@
mode = ScriptMode.Verbose
### Package
version = "0.1.0"
author = "Status Research & Development GmbH"
description = "Waku, Private P2P Messaging for Resource-Rerestricted Devices"
license = "MIT or Apache License 2.0"
srcDir = "src"
#bin = @["build/waku"]
### Dependencies
requires "nim >= 1.2.0",
"chronicles",
"confutils",
"chronos",
"eth",
"json_rpc",
"libbacktrace",
"nimcrypto",
"stew",
"stint",
"metrics",
"libp2p" # For wakunode v2
### Helper functions
proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
if not dirExists "build":
mkDir "build"
# allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims"
var extra_params = params
for i in 2..<paramCount():
extra_params &= " " & paramStr(i)
exec "nim " & lang & " --out:build/" & name & " " & extra_params & " " & srcDir & name & ".nim"
proc test(name: string, lang = "c") =
buildBinary name, "tests/", "-d:chronicles_log_level=ERROR"
exec "build/" & name
### Tasks
task test, "Run tests":
test "all_tests"
task wakunode, "Build Waku cli":
buildBinary "wakunode", "waku/node/v1/", "-d:chronicles_log_level=TRACE"
task wakusim, "Build Waku simulation tools":
buildBinary "quicksim", "waku/node/v1/", "-d:chronicles_log_level=INFO"
buildBinary "start_network", "waku/node/v1/", "-d:chronicles_log_level=DEBUG"
task protocol2, "Build the experimental Waku protocol":
buildBinary "waku_protocol", "waku/protocol/v2/", "-d:chronicles_log_level=DEBUG"
task wakunode2, "Build Experimental Waku cli":
buildBinary "wakunode", "waku/node/v2/", "-d:chronicles_log_level=TRACE"
task wakusim2, "Build Experimental Waku simulation tools":
buildBinary "quicksim", "waku/node/v2/", "-d:chronicles_log_level=INFO"

5
waku/node/README.md Normal file
View File

@ -0,0 +1,5 @@
# Waku Node
TODO.
See README in `v1` folder for instructions on how to run a node.

View File

@ -1,8 +1,9 @@
# Introduction
`wakunode` is a cli application that allows you to run a
[Waku](https://github.com/vacp2p/specs/blob/master/waku.md) enabled node.
[Waku](https://specs.vac.dev/waku/waku.html) enabled node.
The application and Waku specification are still experimental and fully in flux.
The Waku specification is still in draft and thus this implementation will
change accordingly.
Additionally the original Whisper (EIP-627) protocol can also be enabled as can
an experimental Whisper - Waku bridging option.
@ -19,11 +20,15 @@ More information on the installation of these can be found [here](https://github
## Build & Run
```bash
make # The first `make` invocation will update all Git submodules and prompt you to run `make` again.
# It's only required once per Git clone. You'll run `make update` after each `git pull`, in the future,
# to keep those submodules up to date.
# The first `make` invocation will update all Git submodules.
# You'll run `make update` after each `git pull`, in the future, to keep those submodules up to date.
make wakunode
# See available command line options
./build/wakunode --help
# Connect the client directly with the Status test fleet
./build/wakunode --log-level:debug --discovery:off --fleet:test --log-metrics
```
# Using Metrics
@ -87,9 +92,9 @@ This dashboard can be found at `./waku/metrics/waku-sim-all-nodes-grafana-dashbo
# Spec support
*This section last updated April 7, 2020*
*This section last updated April 21, 2020*
This client of Waku is spec compliant with [Waku spec v0.4](https://specs.vac.dev/waku/waku.html).
This client of Waku is spec compliant with [Waku spec v1.0](https://specs.vac.dev/waku/waku.html).
It doesn't yet implement the following recommended features:
- No support for rate limiting

View File

@ -1,6 +1,5 @@
import
confutils/defs, chronicles, chronos,
eth/keys, eth/p2p/rlpx_protocols/waku_protocol
confutils/defs, chronicles, chronos, eth/keys
type
Fleet* = enum

View File

@ -9,8 +9,8 @@ RUN apt-get -qq update \
ARG GIT_REVISION
RUN cd /root \
&& git clone https://github.com/status-im/nimbus.git \
&& cd nimbus \
&& git clone https://github.com/status-im/nim-waku.git \
&& cd nim-waku \
&& git reset --hard ${GIT_REVISION} \
&& { make &>/dev/null || true; } \
&& make -j$(nproc) update \
@ -28,7 +28,7 @@ RUN apt-get -qq update \
&& apt-get -qq clean \
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
COPY --from=build /root/nimbus/build/wakunode /usr/bin/wakunode
COPY --from=build /root/nim-waku/build/wakunode /usr/bin/wakunode
MAINTAINER Kim De Mey <kimdemey@status.im>
LABEL description="Wakunode: Waku and Whisper client"

View File

@ -0,0 +1,24 @@
global:
scrape_interval: 1s
scrape_configs:
- job_name: "wakusim"
static_configs:
- targets: ['127.0.0.1:8010']
labels:
node: '0'
- targets: ['127.0.0.1:8011']
labels:
node: '1'
- targets: ['127.0.0.1:8012']
labels:
node: '2'
- targets: ['127.0.0.1:8013']
labels:
node: '3'
- targets: ['127.0.0.1:8008']
labels:
node: '4'
- targets: ['127.0.0.1:8009']
labels:
node: '5'

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
import
os, strformat, chronicles, json_rpc/[rpcclient, rpcserver], nimcrypto/sysrand,
eth/common as eth_common, eth/keys, eth/p2p/rlpx_protocols/waku_protocol,
../../vendor/nimbus/nimbus/rpc/[hexstrings, rpc_types, waku],
eth/common as eth_common, eth/keys,
../../protocol/v1/waku_protocol, ./rpc/[hexstrings, rpc_types],
options as what # TODO: Huh? Redefinition?
from os import DirSep
@ -33,18 +33,18 @@ let
symKey = "0x0000000000000000000000000000000000000000000000000000000000000001"
topics = generateTopics()
symKeyID = waitFor lightNode.waku_addSymKey(symKey)
options = WhisperFilterOptions(symKeyID: some(symKeyID),
options = WakuFilterOptions(symKeyID: some(symKeyID),
topics: some(topics))
filterID = waitFor lightNode.waku_newMessageFilter(options)
symKeyID2 = waitFor lightNode2.waku_addSymKey(symKey)
options2 = WhisperFilterOptions(symKeyID: some(symKeyID2),
options2 = WakuFilterOptions(symKeyID: some(symKeyID2),
topics: some(topics))
filterID2 = waitFor lightNode2.waku_newMessageFilter(options2)
symkeyID3 = waitFor trafficNode.waku_addSymKey(symKey)
var message = WhisperPostMessage(symKeyID: some(symkeyID3),
var message = WakuPostMessage(symKeyID: some(symkeyID3),
ttl: 30,
topic: some(topics[0]),
payload: "0x45879632".HexDataStr,

View File

@ -0,0 +1,225 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
## This module implements the Ethereum hexadecimal string formats for JSON
## See: https://github.com/ethereum/wiki/wiki/JSON-RPC#hex-value-encoding
#[
Note:
The following types are converted to hex strings when marshalled to JSON:
* Hash256
* UInt256
* seq[byte]
* openArray[seq]
* PublicKey
* PrivateKey
* SymKey
* Topic
* Bytes
]#
import
stint, stew/byteutils, eth/[keys, rlp], eth/common/eth_types,
../../../protocol/v1/waku_protocol
type
HexDataStr* = distinct string
Identifier* = distinct string # 32 bytes, no 0x prefix!
HexStrings = HexDataStr | Identifier
# Hex validation
template hasHexHeader(value: string): bool =
if value.len >= 2 and value[0] == '0' and value[1] in {'x', 'X'}: true
else: false
template isHexChar(c: char): bool =
if c notin {'0'..'9'} and
c notin {'a'..'f'} and
c notin {'A'..'F'}: false
else: true
func isValidHexQuantity*(value: string): bool =
if not value.hasHexHeader:
return false
# No leading zeros (but allow 0x0)
if value.len < 3 or (value.len > 3 and value[2] == '0'): return false
for i in 2 ..< value.len:
let c = value[i]
if not c.isHexChar:
return false
return true
func isValidHexData*(value: string, header = true): bool =
if header and not value.hasHexHeader:
return false
# Must be even number of digits
if value.len mod 2 != 0: return false
# Leading zeros are allowed
for i in 2 ..< value.len:
let c = value[i]
if not c.isHexChar:
return false
return true
template isValidHexData(value: string, hexLen: int, header = true): bool =
value.len == hexLen and value.isValidHexData(header)
func isValidIdentifier*(value: string): bool =
# 32 bytes for Whisper ID, no 0x prefix
result = value.isValidHexData(64, false)
func isValidPublicKey*(value: string): bool =
# 65 bytes for Public Key plus 1 byte for 0x prefix
result = value.isValidHexData(132)
func isValidPrivateKey*(value: string): bool =
# 32 bytes for Private Key plus 1 byte for 0x prefix
result = value.isValidHexData(66)
func isValidSymKey*(value: string): bool =
# 32 bytes for Private Key plus 1 byte for 0x prefix
result = value.isValidHexData(66)
func isValidHash256*(value: string): bool =
# 32 bytes for Hash256 plus 1 byte for 0x prefix
result = value.isValidHexData(66)
func isValidTopic*(value: string): bool =
# 4 bytes for Topic plus 1 byte for 0x prefix
result = value.isValidHexData(10)
const
SInvalidData = "Invalid hex data format for Ethereum"
proc validateHexData*(value: string) {.inline.} =
if unlikely(not value.isValidHexData):
raise newException(ValueError, SInvalidData & ": " & value)
# Initialisation
proc hexDataStr*(value: string): HexDataStr {.inline.} =
value.validateHexData
result = value.HexDataStr
# Converters for use in RPC
import json
from json_rpc/rpcserver import expect
proc `%`*(value: HexStrings): JsonNode =
result = %(value.string)
# Overloads to support expected representation of hex data
proc `%`*(value: Hash256): JsonNode =
#result = %("0x" & $value) # More clean but no lowercase :(
result = %("0x" & value.data.toHex)
proc `%`*(value: UInt256): JsonNode =
result = %("0x" & value.toString(16))
proc `%`*(value: PublicKey): JsonNode =
result = %("0x04" & $value)
proc `%`*(value: PrivateKey): JsonNode =
result = %("0x" & $value)
proc `%`*(value: SymKey): JsonNode =
result = %("0x" & value.toHex)
proc `%`*(value: waku_protocol.Topic): JsonNode =
result = %("0x" & value.toHex)
proc `%`*(value: seq[byte]): JsonNode =
result = %("0x" & value.toHex)
# Helpers for the fromJson procs
proc toPublicKey*(key: string): PublicKey {.inline.} =
result = PublicKey.fromHex(key[4 .. ^1]).tryGet()
proc toPrivateKey*(key: string): PrivateKey {.inline.} =
result = PrivateKey.fromHex(key[2 .. ^1]).tryGet()
proc toSymKey*(key: string): SymKey {.inline.} =
hexToByteArray(key[2 .. ^1], result)
proc toTopic*(topic: string): waku_protocol.Topic {.inline.} =
hexToByteArray(topic[2 .. ^1], result)
# Marshalling from JSON to Nim types that includes format checking
func invalidMsg(name: string): string = "When marshalling from JSON, parameter \"" & name & "\" is not valid"
proc fromJson*(n: JsonNode, argName: string, result: var HexDataStr) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not hexStr.isValidHexData:
raise newException(ValueError, invalidMsg(argName) & " as Ethereum data \"" & hexStr & "\"")
result = hexStr.hexDataStr
proc fromJson*(n: JsonNode, argName: string, result: var Identifier) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not hexStr.isValidIdentifier:
raise newException(ValueError, invalidMsg(argName) & " as a identifier \"" & hexStr & "\"")
result = hexStr.Identifier
proc fromJson*(n: JsonNode, argName: string, result: var UInt256) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not (hexStr.len <= 66 and hexStr.isValidHexQuantity):
raise newException(ValueError, invalidMsg(argName) & " as a UInt256 \"" & hexStr & "\"")
result = readUintBE[256](hexToPaddedByteArray[32](hexStr))
proc fromJson*(n: JsonNode, argName: string, result: var PublicKey) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not hexStr.isValidPublicKey:
raise newException(ValueError, invalidMsg(argName) & " as a public key \"" & hexStr & "\"")
result = hexStr.toPublicKey
proc fromJson*(n: JsonNode, argName: string, result: var PrivateKey) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not hexStr.isValidPrivateKey:
raise newException(ValueError, invalidMsg(argName) & " as a private key \"" & hexStr & "\"")
result = hexStr.toPrivateKey
proc fromJson*(n: JsonNode, argName: string, result: var SymKey) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not hexStr.isValidSymKey:
raise newException(ValueError, invalidMsg(argName) & " as a symmetric key \"" & hexStr & "\"")
result = toSymKey(hexStr)
proc fromJson*(n: JsonNode, argName: string, result: var waku_protocol.Topic) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not hexStr.isValidTopic:
raise newException(ValueError, invalidMsg(argName) & " as a topic \"" & hexStr & "\"")
result = toTopic(hexStr)
# Following procs currently required only for testing, the `createRpcSigs` macro
# requires it as it will convert the JSON results back to the original Nim
# types, but it needs the `fromJson` calls for those specific Nim types to do so
proc fromJson*(n: JsonNode, argName: string, result: var seq[byte]) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not hexStr.isValidHexData:
raise newException(ValueError, invalidMsg(argName) & " as a hex data \"" & hexStr & "\"")
result = hexToSeqByte(hexStr)
proc fromJson*(n: JsonNode, argName: string, result: var Hash256) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not hexStr.isValidHash256:
raise newException(ValueError, invalidMsg(argName) & " as a Hash256 \"" & hexStr & "\"")
hexToByteArray(hexStr, result.data)

View File

@ -0,0 +1,22 @@
#
# Nimbus
# (c) Copyright 2019
# Status Research & Development GmbH
#
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
import tables, eth/keys, eth/p2p/rlpx_protocols/whisper/whisper_types
type
KeyStorage* = ref object
asymKeys*: Table[string, KeyPair]
symKeys*: Table[string, SymKey]
KeyGenerationError* = object of CatchableError
proc newKeyStorage*(): KeyStorage =
new(result)
result.asymKeys = initTable[string, KeyPair]()
result.symKeys = initTable[string, SymKey]()

View File

@ -0,0 +1,58 @@
import
hexstrings, options, eth/[keys, rlp],
../../../protocol/v1/waku_protocol
#[
Notes:
* Some of the types suppose 'null' when there is no appropriate value.
To allow for this, you can use Option[T] or use refs so the JSON transform can convert to `JNull`.
* Parameter objects from users must have their data verified so will use EthAddressStr instead of EthAddres, for example
* Objects returned to the user can use native Waku types, where hexstrings provides converters to hex strings.
This is because returned arrays in JSON is
a) not an efficient use of space
b) not the format the user expects (for example addresses are expected to be hex strings prefixed by "0x")
]#
type
WakuInfo* = object
# Returned to user
minPow*: float64 # Current minimum PoW requirement.
# TODO: may be uint32
maxMessageSize*: uint64 # Current message size limit in bytes.
memory*: int # Memory size of the floating messages in bytes.
messages*: int # Number of floating messages.
WakuFilterOptions* = object
# Parameter from user
symKeyID*: Option[Identifier] # ID of symmetric key for message decryption.
privateKeyID*: Option[Identifier] # ID of private (asymmetric) key for message decryption.
sig*: Option[PublicKey] # (Optional) Public key of the signature.
minPow*: Option[float64] # (Optional) Minimal PoW requirement for incoming messages.
topics*: Option[seq[waku_protocol.Topic]] # (Optional when asym key): Array of possible topics (or partial topics).
allowP2P*: Option[bool] # (Optional) Indicates if this filter allows processing of direct peer-to-peer messages.
WakuFilterMessage* = object
# Returned to user
sig*: Option[PublicKey] # Public key who signed this message.
recipientPublicKey*: Option[PublicKey] # The recipients public key.
ttl*: uint64 # Time-to-live in seconds.
timestamp*: uint64 # Unix timestamp of the message generation.
topic*: waku_protocol.Topic # 4 Bytes: Message topic.
payload*: seq[byte] # Decrypted payload.
padding*: seq[byte] # (Optional) Padding (byte array of arbitrary length).
pow*: float64 # Proof of work value.
hash*: Hash # Hash of the enveloped message.
WakuPostMessage* = object
# Parameter from user
symKeyID*: Option[Identifier] # ID of symmetric key for message encryption.
pubKey*: Option[PublicKey] # Public key for message encryption.
sig*: Option[Identifier] # (Optional) ID of the signing key.
ttl*: uint64 # Time-to-live in seconds.
topic*: Option[waku_protocol.Topic] # Message topic (mandatory when key is symmetric).
payload*: HexDataStr # Payload to be encrypted.
padding*: Option[HexDataStr] # (Optional) Padding (byte array of arbitrary length).
powTime*: float64 # Maximal time in seconds to be spent on proof of work.
powTarget*: float64 # Minimal PoW target required for this message.
# TODO: EnodeStr
targetPeer*: Option[string] # (Optional) Peer ID (for peer-to-peer message only).

364
waku/node/v1/rpc/waku.nim Normal file
View File

@ -0,0 +1,364 @@
import
json_rpc/rpcserver, tables, options, sequtils,
eth/[common, rlp, keys, p2p],
nimcrypto/[sysrand, hmac, sha2, pbkdf2],
rpc_types, hexstrings, key_storage,
../../../protocol/v1/waku_protocol
from stew/byteutils import hexToSeqByte, hexToByteArray
# Blatant copy of Whisper RPC but for the Waku protocol
proc setupWakuRPC*(node: EthereumNode, keys: KeyStorage, rpcsrv: RpcServer) =
rpcsrv.rpc("waku_version") do() -> string:
## Returns string of the current Waku protocol version.
result = wakuVersionStr
rpcsrv.rpc("waku_info") do() -> WakuInfo:
## Returns diagnostic information about the Waku node.
let config = node.protocolState(Waku).config
result = WakuInfo(minPow: config.powRequirement,
maxMessageSize: config.maxMsgSize,
memory: 0,
messages: 0)
# TODO: uint32 instead of uint64 is OK here, but needs to be added in json_rpc
rpcsrv.rpc("waku_setMaxMessageSize") do(size: uint64) -> bool:
## Sets the maximal message size allowed by this node.
## Incoming and outgoing messages with a larger size will be rejected.
## Waku message size can never exceed the limit imposed by the underlying
## P2P protocol (10 Mb).
##
## size: Message size in bytes.
##
## Returns true on success and an error on failure.
result = node.setMaxMessageSize(size.uint32)
if not result:
raise newException(ValueError, "Invalid size")
rpcsrv.rpc("waku_setMinPoW") do(pow: float) -> bool:
## Sets the minimal PoW required by this node.
##
## pow: The new PoW requirement.
##
## Returns true on success and an error on failure.
# Note: `setPowRequirement` does not raise on failures of sending the update
# to the peers. Hence in theory this should not causes errors.
await node.setPowRequirement(pow)
result = true
# TODO: change string in to ENodeStr with extra checks
rpcsrv.rpc("waku_markTrustedPeer") do(enode: string) -> bool:
## Marks specific peer trusted, which will allow it to send historic
## (expired) messages.
## Note: This function is not adding new nodes, the node needs to exists as
## a peer.
##
## enode: Enode of the trusted peer.
##
## Returns true on success and an error on failure.
# TODO: It will now require an enode://pubkey@ip:port uri
# could also accept only the pubkey (like geth)?
let peerNode = newNode(enode)
result = node.setPeerTrusted(peerNode.id)
if not result:
raise newException(ValueError, "Not a peer")
rpcsrv.rpc("waku_newKeyPair") do() -> Identifier:
## Generates a new public and private key pair for message decryption and
## encryption.
##
## Returns key identifier on success and an error on failure.
result = generateRandomID().Identifier
keys.asymKeys.add(result.string, KeyPair.random().tryGet())
rpcsrv.rpc("waku_addPrivateKey") do(key: PrivateKey) -> Identifier:
## Stores the key pair, and returns its ID.
##
## key: Private key as hex bytes.
##
## Returns key identifier on success and an error on failure.
result = generateRandomID().Identifier
keys.asymKeys.add(result.string, key.toKeyPair().tryGet())
rpcsrv.rpc("waku_deleteKeyPair") do(id: Identifier) -> bool:
## Deletes the specifies key if it exists.
##
## id: Identifier of key pair
##
## Returns true on success and an error on failure.
var unneeded: KeyPair
result = keys.asymKeys.take(id.string, unneeded)
if not result:
raise newException(ValueError, "Invalid key id")
rpcsrv.rpc("waku_hasKeyPair") do(id: Identifier) -> bool:
## Checks if the Waku node has a private key of a key pair matching the
## given ID.
##
## id: Identifier of key pair
##
## Returns (true or false) on success and an error on failure.
result = keys.asymkeys.hasKey(id.string)
rpcsrv.rpc("waku_getPublicKey") do(id: Identifier) -> PublicKey:
## Returns the public key for identity ID.
##
## id: Identifier of key pair
##
## Returns public key on success and an error on failure.
# Note: key not found exception as error in case not existing
result = keys.asymkeys[id.string].pubkey
rpcsrv.rpc("waku_getPrivateKey") do(id: Identifier) -> PrivateKey:
## Returns the private key for identity ID.
##
## id: Identifier of key pair
##
## Returns private key on success and an error on failure.
# Note: key not found exception as error in case not existing
result = keys.asymkeys[id.string].seckey
rpcsrv.rpc("waku_newSymKey") do() -> Identifier:
## Generates a random symmetric key and stores it under an ID, which is then
## returned. Can be used encrypting and decrypting messages where the key is
## known to both parties.
##
## Returns key identifier on success and an error on failure.
result = generateRandomID().Identifier
var key: SymKey
if randomBytes(key) != key.len:
raise newException(KeyGenerationError, "Failed generating key")
keys.symKeys.add(result.string, key)
rpcsrv.rpc("waku_addSymKey") do(key: SymKey) -> Identifier:
## Stores the key, and returns its ID.
##
## key: The raw key for symmetric encryption as hex bytes.
##
## Returns key identifier on success and an error on failure.
result = generateRandomID().Identifier
keys.symKeys.add(result.string, key)
rpcsrv.rpc("waku_generateSymKeyFromPassword") do(password: string) -> Identifier:
## Generates the key from password, stores it, and returns its ID.
##
## password: Password.
##
## Returns key identifier on success and an error on failure.
## Warning: an empty string is used as salt because the shh RPC API does not
## allow for passing a salt. A very good password is necessary (calculate
## yourself what that means :))
var ctx: HMAC[sha256]
var symKey: SymKey
if pbkdf2(ctx, password, "", 65356, symKey) != sizeof(SymKey):
raise newException(KeyGenerationError, "Failed generating key")
result = generateRandomID().Identifier
keys.symKeys.add(result.string, symKey)
rpcsrv.rpc("waku_hasSymKey") do(id: Identifier) -> bool:
## Returns true if there is a key associated with the name string.
## Otherwise, returns false.
##
## id: Identifier of key.
##
## Returns (true or false) on success and an error on failure.
result = keys.symkeys.hasKey(id.string)
rpcsrv.rpc("waku_getSymKey") do(id: Identifier) -> SymKey:
## Returns the symmetric key associated with the given ID.
##
## id: Identifier of key.
##
## Returns Raw key on success and an error on failure.
# Note: key not found exception as error in case not existing
result = keys.symkeys[id.string]
rpcsrv.rpc("waku_deleteSymKey") do(id: Identifier) -> bool:
## Deletes the key associated with the name string if it exists.
##
## id: Identifier of key.
##
## Returns (true or false) on success and an error on failure.
var unneeded: SymKey
result = keys.symKeys.take(id.string, unneeded)
if not result:
raise newException(ValueError, "Invalid key id")
rpcsrv.rpc("waku_subscribe") do(id: string,
options: WakuFilterOptions) -> Identifier:
## Creates and registers a new subscription to receive notifications for
## inbound Waku messages. Returns the ID of the newly created
## subscription.
##
## id: identifier of function call. In case of Waku must contain the
## value "messages".
## options: WakuFilterOptions
##
## Returns the subscription ID on success, the error on failure.
# TODO: implement subscriptions, only for WS & IPC?
discard
rpcsrv.rpc("waku_unsubscribe") do(id: Identifier) -> bool:
## Cancels and removes an existing subscription.
##
## id: Subscription identifier
##
## Returns true on success, the error on failure
result = node.unsubscribeFilter(id.string)
if not result:
raise newException(ValueError, "Invalid filter id")
proc validateOptions[T,U,V](asym: Option[T], sym: Option[U], topic: Option[V]) =
if (asym.isSome() and sym.isSome()) or (asym.isNone() and sym.isNone()):
raise newException(ValueError,
"Either privateKeyID/pubKey or symKeyID must be present")
if asym.isNone() and topic.isNone():
raise newException(ValueError, "Topic mandatory with symmetric key")
rpcsrv.rpc("waku_newMessageFilter") do(options: WakuFilterOptions) -> Identifier:
## Create a new filter within the node. This filter can be used to poll for
## new messages that match the set of criteria.
##
## options: WakuFilterOptions
##
## Returns filter identifier on success, error on failure
# Check if either symKeyID or privateKeyID is present, and not both
# Check if there are Topics when symmetric key is used
validateOptions(options.privateKeyID, options.symKeyID, options.topics)
var
src: Option[PublicKey]
privateKey: Option[PrivateKey]
symKey: Option[SymKey]
topics: seq[waku_protocol.Topic]
powReq: float64
allowP2P: bool
src = options.sig
if options.privateKeyID.isSome():
privateKey = some(keys.asymKeys[options.privateKeyID.get().string].seckey)
if options.symKeyID.isSome():
symKey= some(keys.symKeys[options.symKeyID.get().string])
if options.minPow.isSome():
powReq = options.minPow.get()
if options.topics.isSome():
topics = options.topics.get()
if options.allowP2P.isSome():
allowP2P = options.allowP2P.get()
let filter = initFilter(src, privateKey, symKey, topics, powReq, allowP2P)
result = node.subscribeFilter(filter).Identifier
# TODO: Should we do this here "automatically" or separate it in another
# RPC call? Is there a use case for that?
# Same could be said about bloomfilter, except that there is a use case
# there to have a full node no matter what message filters.
# Could also be moved to waku_protocol.nim
let config = node.protocolState(Waku).config
if config.topics.isSome():
try:
# TODO: an addTopics call would probably be more useful
let result = await node.setTopicInterest(config.topics.get().concat(filter.topics))
if not result:
raise newException(ValueError, "Too many topics")
except CatchableError:
trace "setTopics error occured"
elif config.isLightNode:
try:
await node.setBloomFilter(node.filtersToBloom())
except CatchableError:
trace "setBloomFilter error occured"
rpcsrv.rpc("waku_deleteMessageFilter") do(id: Identifier) -> bool:
## Uninstall a message filter in the node.
##
## id: Filter identifier as returned when the filter was created.
##
## Returns true on success, error on failure.
result = node.unsubscribeFilter(id.string)
if not result:
raise newException(ValueError, "Invalid filter id")
rpcsrv.rpc("waku_getFilterMessages") do(id: Identifier) -> seq[WakuFilterMessage]:
## Retrieve messages that match the filter criteria and are received between
## the last time this function was called and now.
##
## id: ID of filter that was created with `waku_newMessageFilter`.
##
## Returns array of messages on success and an error on failure.
let messages = node.getFilterMessages(id.string)
for msg in messages:
result.add WakuFilterMessage(
sig: msg.decoded.src,
recipientPublicKey: msg.dst,
ttl: msg.ttl,
topic: msg.topic,
timestamp: msg.timestamp,
payload: msg.decoded.payload,
# Note: waku_protocol padding is an Option as there is the
# possibility of 0 padding in case of custom padding.
padding: msg.decoded.padding.get(@[]),
pow: msg.pow,
hash: msg.hash)
rpcsrv.rpc("waku_post") do(message: WakuPostMessage) -> bool:
## Creates a Waku message and injects it into the network for
## distribution.
##
## message: Waku message to post.
##
## Returns true on success and an error on failure.
# Check if either symKeyID or pubKey is present, and not both
# Check if there is a Topic when symmetric key is used
validateOptions(message.pubKey, message.symKeyID, message.topic)
var
sigPrivKey: Option[PrivateKey]
symKey: Option[SymKey]
topic: waku_protocol.Topic
padding: Option[seq[byte]]
targetPeer: Option[NodeId]
if message.sig.isSome():
sigPrivKey = some(keys.asymKeys[message.sig.get().string].seckey)
if message.symKeyID.isSome():
symKey = some(keys.symKeys[message.symKeyID.get().string])
# Note: If no topic it will be defaulted to 0x00000000
if message.topic.isSome():
topic = message.topic.get()
if message.padding.isSome():
padding = some(hexToSeqByte(message.padding.get().string))
if message.targetPeer.isSome():
targetPeer = some(newNode(message.targetPeer.get()).id)
result = node.postMessage(message.pubKey,
symKey,
sigPrivKey,
ttl = message.ttl.uint32,
topic = topic,
payload = hexToSeqByte(message.payload.string),
padding = padding,
powTime = message.powTime,
powTarget = message.powTarget,
targetPeer = targetPeer)
if not result:
raise newException(ValueError, "Message could not be posted")

View File

@ -1,5 +1,5 @@
proc waku_version(): string
proc waku_info(): WhisperInfo
proc waku_info(): WakuInfo
proc waku_setMaxMessageSize(size: uint64): bool
proc waku_setMinPoW(pow: float): bool
proc waku_markTrustedPeer(enode: string): bool
@ -18,10 +18,10 @@ proc waku_hasSymKey(id: Identifier): bool
proc waku_getSymKey(id: Identifier): SymKey
proc waku_deleteSymKey(id: Identifier): bool
proc waku_newMessageFilter(options: WhisperFilterOptions): Identifier
proc waku_newMessageFilter(options: WakuFilterOptions): Identifier
proc waku_deleteMessageFilter(id: Identifier): bool
proc waku_getFilterMessages(id: Identifier): seq[WhisperFilterMessage]
proc waku_post(message: WhisperPostMessage): bool
proc waku_getFilterMessages(id: Identifier): seq[WakuFilterMessage]
proc waku_post(message: WakuPostMessage): bool
proc wakusim_generateTraffic(amount: int): bool
proc wakusim_generateRandomTraffic(amount: int): bool

View File

@ -0,0 +1,32 @@
import
json_rpc/rpcserver, stew/endians2, nimcrypto/sysrand,
eth/[p2p, async_utils],
../../../protocol/v1/waku_protocol
proc generateTraffic(node: EthereumNode, amount = 100) {.async.} =
var topicNumber = 0'u32
let payload = @[byte 0]
for i in 0..<amount:
discard waku_protocol.postMessage(node, ttl = 10,
topic = toBytesLE(i.uint32), payload = payload)
await sleepAsync(1.milliseconds)
proc generateRandomTraffic(node: EthereumNode, amount = 100) {.async.} =
var topic: array[4, byte]
let payload = @[byte 0]
for i in 0..<amount:
while randomBytes(topic) != 4:
discard
discard waku_protocol.postMessage(node, ttl = 10, topic = topic,
payload = payload)
await sleepAsync(1.milliseconds)
proc setupWakuSimRPC*(node: EthereumNode, rpcsrv: RpcServer) =
rpcsrv.rpc("wakusim_generateTraffic") do(amount: int) -> bool:
traceAsyncErrors node.generateTraffic(amount)
return true
rpcsrv.rpc("wakusim_generateRandomTraffic") do(amount: int) -> bool:
traceAsyncErrors node.generateRandomTraffic(amount)
return true

View File

@ -3,8 +3,9 @@ import
chronicles/topics_registry, # TODO: What? Need this for setLoglevel, weird.
eth/[keys, p2p, async_utils], eth/common/utils, eth/net/nat,
eth/p2p/[discovery, enode, peer_pool, bootnodes, whispernodes],
eth/p2p/rlpx_protocols/[whisper_protocol, waku_protocol, waku_bridge],
../../vendor/nimbus/nimbus/rpc/[waku, wakusim, key_storage]
eth/p2p/rlpx_protocols/whisper_protocol,
../../protocol/v1/[waku_protocol, waku_bridge],
./rpc/[waku, wakusim, key_storage]
const clientId = "Nimbus waku node"

View File

@ -1,6 +1,6 @@
import
confutils/defs, chronicles, chronos,
eth/keys, eth/p2p/rlpx_protocols/waku_protocol
eth/keys
type
Fleet* = enum

View File

@ -2,8 +2,8 @@ import
os, strformat, chronicles, json_rpc/[rpcclient, rpcserver], nimcrypto/sysrand,
eth/common as eth_common, eth/keys,
# XXX: Replace me
eth/p2p/rlpx_protocols/waku_protocol,
../../vendor/nimbus/nimbus/rpc/[hexstrings, rpc_types, waku],
../../protocol/v1/waku_protocol,
../v1/rpc/[hexstrings, rpc_types, waku],
rpc/wakurpc,
options as what # TODO: Huh? Redefinition?
@ -14,7 +14,7 @@ template sourceDir: string = currentSourcePath.rsplit(DirSep, 1)[0]
const sigWakuPath = &"{sourceDir}{DirSep}rpc{DirSep}wakucallsigs.nim"
createRpcSigs(RpcHttpClient, sigWakuPath)
# More minimal than v0 quicksim, just RPC client for now
# More minimal than v1 quicksim, just RPC client for now
let node1 = newRpcHttpClient()
#let node2 = newRpcHttpClient()

View File

@ -1,7 +1,7 @@
# NOTE: Taken from v0, only version exists right now
# NOTE: Taken from v1, only version exists right now
proc waku_version(): string
proc waku_info(): WhisperInfo
proc waku_info(): WakuInfo
proc waku_setMaxMessageSize(size: uint64): bool
proc waku_setMinPoW(pow: float): bool
proc waku_markTrustedPeer(enode: string): bool
@ -20,10 +20,10 @@ proc waku_hasSymKey(id: Identifier): bool
proc waku_getSymKey(id: Identifier): SymKey
proc waku_deleteSymKey(id: Identifier): bool
proc waku_newMessageFilter(options: WhisperFilterOptions): Identifier
proc waku_newMessageFilter(options: WakuFilterOptions): Identifier
proc waku_deleteMessageFilter(id: Identifier): bool
proc waku_getFilterMessages(id: Identifier): seq[WhisperFilterMessage]
proc waku_post(message: WhisperPostMessage): bool
proc waku_getFilterMessages(id: Identifier): seq[WakuFilterMessage]
proc waku_post(message: WakuPostMessage): bool
proc wakusim_generateTraffic(amount: int): bool
proc wakusim_generateRandomTraffic(amount: int): bool

View File

@ -2,12 +2,10 @@ import
json_rpc/rpcserver, tables, options,
eth/[common, rlp, keys, p2p],
#DevP2P impl
#eth/p2p/rlpx_protocols/waku_protocol,
# ../../../protocol/v1/waku_protocol,
../../../protocol/v2/waku_protocol,
nimcrypto/[sysrand, hmac, sha2, pbkdf2],
../../../vendor/nimbus/nimbus/rpc/rpc_types,
../../../vendor/nimbus/nimbus/rpc/hexstrings,
../../../vendor/nimbus/nimbus/rpc/key_storage
../../v1/rpc/[rpc_types, hexstrings, key_storage]
from stew/byteutils import hexToSeqByte, hexToByteArray

View File

@ -5,7 +5,7 @@ import
eth/p2p/[discovery, enode, peer_pool, bootnodes, whispernodes],
eth/p2p/rlpx_protocols/[whisper_protocol, waku_protocol, waku_bridge],
# TODO remove me
../../vendor/nimbus/nimbus/rpc/[wakusim, key_storage],
../v1/rpc/[wakusim, key_storage],
../../vendor/nim-libp2p/libp2p/standard_setup,
../../vendor/nim-libp2p/libp2p/multiaddress,
../../vendor/nim-libp2p/libp2p/crypto/crypto,

View File

@ -0,0 +1,17 @@
#
# Waku - Whisper Bridge
# (c) Copyright 2019
# Status Research & Development GmbH
#
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
#
import
eth/p2p,
eth/p2p/rlpx_protocols/whisper_protocol,
./waku_protocol
proc shareMessageQueue*(node: EthereumNode) =
node.protocolState(Waku).queue = node.protocolState(Whisper).queue

View File

@ -0,0 +1,86 @@
#
# Waku Mail Client & Server
# (c) Copyright 2019
# Status Research & Development GmbH
#
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
#
import
chronos,
eth/[p2p, async_utils],
./waku_protocol
const
requestCompleteTimeout = chronos.seconds(5)
type
Cursor = seq[byte]
MailRequest* = object
lower*: uint32 ## Unix timestamp; oldest requested envelope's creation time
upper*: uint32 ## Unix timestamp; newest requested envelope's creation time
bloom*: seq[byte] ## Bloom filter to apply on the envelopes
limit*: uint32 ## Maximum amount of envelopes to return
cursor*: Cursor ## Optional cursor
proc requestMail*(node: EthereumNode, peerId: NodeId, request: MailRequest,
symKey: SymKey, requests = 10): Future[Option[Cursor]] {.async.} =
## Send p2p mail request and check request complete.
## If result is none, and error occured. If result is a none empty cursor,
## more envelopes are available.
# TODO: Perhaps don't go the recursive route or could use the actual response
# proc to implement this (via a handler) and store the necessary data in the
# WakuPeer object.
# TODO: Several requestMail calls in parallel can create issues with handling
# the wrong response to a request. Can additionaly check the requestId but
# that would only solve it half. Better to use the requestResponse mechanism.
# TODO: move this check out of requestMail?
let peer = node.getPeer(peerId, Waku)
if not peer.isSome():
error "Invalid peer"
return result
elif not peer.get().state(Waku).trusted:
return result
var writer = initRlpWriter()
writer.append(request)
let payload = writer.finish()
let data = encode(Payload(payload: payload, symKey: some(symKey)))
if not data.isSome():
error "Encoding of payload failed"
return result
# TODO: should this envelope be valid in terms of ttl, PoW, etc.?
let env = Envelope(expiry:0, ttl: 0, data: data.get(), nonce: 0)
# Send the request
traceAsyncErrors peer.get().p2pRequest(env)
# Wait for the Request Complete packet
var f = peer.get().nextMsg(Waku.p2pRequestComplete)
if await f.withTimeout(requestCompleteTimeout):
let response = f.read()
# TODO: I guess the idea is to check requestId (Hash) also?
let requests = requests - 1
# If there is cursor data, do another request
if response.cursor.len > 0 and requests > 0:
var newRequest = request
newRequest.cursor = response.cursor
return await requestMail(node, peerId, newRequest, symKey, requests)
else:
return some(response.cursor)
else:
error "p2pRequestComplete timeout"
return result
proc p2pRequestHandler(peer: Peer, envelope: Envelope) =
# Mail server p2p request implementation
discard
proc enableMailServer*(node: EthereumNode, customHandler: P2PRequestHandler) =
node.protocolState(Waku).p2pRequestHandler = customHandler
proc enableMailServer*(node: EthereumNode) =
node.protocolState(Waku).p2pRequestHandler = p2pRequestHandler

View File

@ -0,0 +1,649 @@
#
# Waku
# (c) Copyright 2018-2019
# Status Research & Development GmbH
#
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
#
## Waku
## *******
##
## Waku is a fork of Whisper.
##
## Waku is a gossip protocol that synchronizes a set of messages across nodes
## with attention given to sender and recipient anonymitiy. Messages are
## categorized by a topic and stay alive in the network based on a time-to-live
## measured in seconds. Spam prevention is based on proof-of-work, where large
## or long-lived messages must spend more work.
##
## Implementation should be according to Waku specification defined here:
## https://github.com/vacp2p/specs/blob/master/waku/waku.md
##
## Example usage
## ----------
## First an `EthereumNode` needs to be created, either with all capabilities set
## or with specifically the Waku capability set.
## The latter can be done like this:
##
## .. code-block::nim
## var node = newEthereumNode(keypair, address, netId, nil,
## addAllCapabilities = false)
## node.addCapability Waku
##
## Now calls such as ``postMessage`` and ``subscribeFilter`` can be done.
## However, they only make real sense after ``connectToNetwork`` was started. As
## else there will be no peers to send and receive messages from.
import
options, tables, times, chronos, chronicles, metrics,
eth/[keys, async_utils, p2p], eth/p2p/rlpx_protocols/whisper/whisper_types,
eth/trie/trie_defs
export
whisper_types
logScope:
topics = "waku"
declarePublicCounter dropped_low_pow_envelopes,
"Dropped envelopes because of too low PoW"
declarePublicCounter dropped_too_large_envelopes,
"Dropped envelopes because larger than maximum allowed size"
declarePublicCounter dropped_bloom_filter_mismatch_envelopes,
"Dropped envelopes because not matching with bloom filter"
declarePublicCounter dropped_topic_mismatch_envelopes,
"Dropped envelopes because of not matching topics"
declarePublicCounter dropped_duplicate_envelopes,
"Dropped duplicate envelopes"
const
defaultQueueCapacity = 2048
wakuVersion* = 1 ## Waku version.
wakuVersionStr* = $wakuVersion ## Waku version.
defaultMinPow* = 0.2'f64 ## The default minimum PoW requirement for this node.
defaultMaxMsgSize* = 1024'u32 * 1024'u32 ## The current default and max
## message size. This can never be larger than the maximum RLPx message size.
messageInterval* = chronos.milliseconds(300) ## Interval at which messages are
## send to peers, in ms.
pruneInterval* = chronos.milliseconds(1000) ## Interval at which message
## queue is pruned, in ms.
topicInterestMax = 10000
type
WakuConfig* = object
powRequirement*: float64
bloom*: Option[Bloom]
isLightNode*: bool
maxMsgSize*: uint32
confirmationsEnabled*: bool
rateLimits*: Option[RateLimits]
topics*: Option[seq[Topic]]
WakuPeer = ref object
initialized: bool # when successfully completed the handshake
powRequirement*: float64
bloom*: Bloom
isLightNode*: bool
trusted*: bool
topics*: Option[seq[Topic]]
received: HashSet[Hash]
P2PRequestHandler* = proc(peer: Peer, envelope: Envelope) {.gcsafe.}
WakuNetwork = ref object
queue*: ref Queue
filters*: Filters
config*: WakuConfig
p2pRequestHandler*: P2PRequestHandler
RateLimits* = object
# TODO: uint or specifically uint32?
limitIp*: uint
limitPeerId*: uint
limitTopic*: uint
StatusOptions* = object
powRequirement*: Option[(float64)]
bloomFilter*: Option[Bloom]
lightNode*: Option[bool]
confirmationsEnabled*: Option[bool]
rateLimits*: Option[RateLimits]
topicInterest*: Option[seq[Topic]]
KeyKind* = enum
powRequirementKey,
bloomFilterKey,
lightNodeKey,
confirmationsEnabledKey,
rateLimitsKey,
topicInterestKey
template countSomeFields*(x: StatusOptions): int =
var count = 0
for f in fields(x):
if f.isSome():
inc count
count
proc append*(rlpWriter: var RlpWriter, value: StatusOptions) =
var list = initRlpList(countSomeFields(value))
if value.powRequirement.isSome():
list.append((powRequirementKey, cast[uint64](value.powRequirement.get())))
if value.bloomFilter.isSome():
list.append((bloomFilterKey, @(value.bloomFilter.get())))
if value.lightNode.isSome():
list.append((lightNodeKey, value.lightNode.get()))
if value.confirmationsEnabled.isSome():
list.append((confirmationsEnabledKey, value.confirmationsEnabled.get()))
if value.rateLimits.isSome():
list.append((rateLimitsKey, value.rateLimits.get()))
if value.topicInterest.isSome():
list.append((topicInterestKey, value.topicInterest.get()))
let bytes = list.finish()
rlpWriter.append(rlpFromBytes(bytes))
proc read*(rlp: var Rlp, T: typedesc[StatusOptions]): T =
if not rlp.isList():
raise newException(RlpTypeMismatch,
"List expected, but the source RLP is not a list.")
let sz = rlp.listLen()
# We already know that we are working with a list
doAssert rlp.enterList()
for i in 0 ..< sz:
rlp.tryEnterList()
var k: KeyKind
try:
k = rlp.read(KeyKind)
except RlpTypeMismatch:
# skip unknown keys and their value
rlp.skipElem()
rlp.skipElem()
continue
case k
of powRequirementKey:
let pow = rlp.read(uint64)
result.powRequirement = some(cast[float64](pow))
of bloomFilterKey:
let bloom = rlp.read(seq[byte])
if bloom.len != bloomSize:
raise newException(UselessPeerError, "Bloomfilter size mismatch")
var bloomFilter: Bloom
bloomFilter.bytesCopy(bloom)
result.bloomFilter = some(bloomFilter)
of lightNodeKey:
result.lightNode = some(rlp.read(bool))
of confirmationsEnabledKey:
result.confirmationsEnabled = some(rlp.read(bool))
of rateLimitsKey:
result.rateLimits = some(rlp.read(RateLimits))
of topicInterestKey:
result.topicInterest = some(rlp.read(seq[Topic]))
proc allowed*(msg: Message, config: WakuConfig): bool =
# Check max msg size, already happens in RLPx but there is a specific waku
# max msg size which should always be < RLPx max msg size
if msg.size > config.maxMsgSize:
dropped_too_large_envelopes.inc()
warn "Message size too large", size = msg.size
return false
if msg.pow < config.powRequirement:
dropped_low_pow_envelopes.inc()
warn "Message PoW too low", pow = msg.pow, minPow = config.powRequirement
return false
if config.topics.isSome():
if msg.env.topic notin config.topics.get():
dropped_topic_mismatch_envelopes.inc()
warn "Message topic does not match Waku topic list"
return false
else:
if config.bloom.isSome() and not bloomFilterMatch(config.bloom.get(), msg.bloom):
dropped_bloom_filter_mismatch_envelopes.inc()
warn "Message does not match node bloom filter"
return false
return true
proc run(peer: Peer) {.gcsafe, async.}
proc run(node: EthereumNode, network: WakuNetwork) {.gcsafe, async.}
proc initProtocolState*(network: WakuNetwork, node: EthereumNode) {.gcsafe.} =
new(network.queue)
network.queue[] = initQueue(defaultQueueCapacity)
network.filters = initTable[string, Filter]()
network.config.bloom = some(fullBloom())
network.config.powRequirement = defaultMinPow
network.config.isLightNode = false
# RateLimits and confirmations are not yet implemented so we set confirmations
# to false and we don't pass RateLimits at all.
network.config.confirmationsEnabled = false
network.config.rateLimits = none(RateLimits)
network.config.maxMsgSize = defaultMaxMsgSize
network.config.topics = none(seq[Topic])
asyncCheck node.run(network)
p2pProtocol Waku(version = wakuVersion,
rlpxName = "waku",
peerState = WakuPeer,
networkState = WakuNetwork):
onPeerConnected do (peer: Peer):
trace "onPeerConnected Waku"
let
wakuNet = peer.networkState
wakuPeer = peer.state
let options = StatusOptions(
powRequirement: some(wakuNet.config.powRequirement),
bloomFilter: wakuNet.config.bloom,
lightNode: some(wakuNet.config.isLightNode),
confirmationsEnabled: some(wakuNet.config.confirmationsEnabled),
rateLimits: wakuNet.config.rateLimits,
topicInterest: wakuNet.config.topics)
let m = await peer.status(options,
timeout = chronos.milliseconds(5000))
wakuPeer.powRequirement = m.options.powRequirement.get(defaultMinPow)
wakuPeer.bloom = m.options.bloomFilter.get(fullBloom())
wakuPeer.isLightNode = m.options.lightNode.get(false)
if wakuPeer.isLightNode and wakuNet.config.isLightNode:
# No sense in connecting two light nodes so we disconnect
raise newException(UselessPeerError, "Two light nodes connected")
wakuPeer.topics = m.options.topicInterest
if wakuPeer.topics.isSome():
if wakuPeer.topics.get().len > topicInterestMax:
raise newException(UselessPeerError, "Topic-interest is too large")
if wakuNet.config.topics.isSome():
raise newException(UselessPeerError,
"Two Waku nodes with topic-interest connected")
wakuPeer.received.init()
wakuPeer.trusted = false
wakuPeer.initialized = true
# No timer based queue processing for a light node.
if not wakuNet.config.isLightNode:
traceAsyncErrors peer.run()
debug "Waku peer initialized", peer
handshake:
proc status(peer: Peer, options: StatusOptions)
proc messages(peer: Peer, envelopes: openarray[Envelope]) =
if not peer.state.initialized:
warn "Handshake not completed yet, discarding messages"
return
for envelope in envelopes:
# check if expired or in future, or ttl not 0
if not envelope.valid():
warn "Expired or future timed envelope", peer
# disconnect from peers sending bad envelopes
# await peer.disconnect(SubprotocolReason)
continue
let msg = initMessage(envelope)
if not msg.allowed(peer.networkState.config):
# disconnect from peers sending bad envelopes
# await peer.disconnect(SubprotocolReason)
continue
# This peer send this message thus should not receive it again.
# If this peer has the message in the `received` set already, this means
# it was either already received here from this peer or send to this peer.
# Either way it will be in our queue already (and the peer should know
# this) and this peer is sending duplicates.
# Note: geth does not check if a peer has send a message to them before
# broadcasting this message. This too is seen here as a duplicate message
# (see above comment). If we want to seperate these cases (e.g. when peer
# rating), then we have to add a "peer.state.send" HashSet.
# Note: it could also be a race between the arrival of a message send by
# this node to a peer and that same message arriving from that peer (after
# it was received from another peer) here.
if peer.state.received.containsOrIncl(msg.hash):
dropped_duplicate_envelopes.inc()
trace "Peer sending duplicate messages", peer, hash = $msg.hash
# await peer.disconnect(SubprotocolReason)
continue
# This can still be a duplicate message, but from another peer than
# the peer who send the message.
if peer.networkState.queue[].add(msg):
# notify filters of this message
peer.networkState.filters.notify(msg)
nextID 22
proc statusOptions(peer: Peer, options: StatusOptions) =
if not peer.state.initialized:
warn "Handshake not completed yet, discarding statusOptions"
return
if options.topicInterest.isSome():
peer.state.topics = options.topicInterest
elif options.bloomFilter.isSome():
peer.state.bloom = options.bloomFilter.get()
peer.state.topics = none(seq[Topic])
if options.powRequirement.isSome():
peer.state.powRequirement = options.powRequirement.get()
if options.lightNode.isSome():
peer.state.isLightNode = options.lightNode.get()
nextID 126
proc p2pRequest(peer: Peer, envelope: Envelope) =
if not peer.networkState.p2pRequestHandler.isNil():
peer.networkState.p2pRequestHandler(peer, envelope)
proc p2pMessage(peer: Peer, envelopes: openarray[Envelope]) =
if peer.state.trusted:
# when trusted we can bypass any checks on envelope
for envelope in envelopes:
let msg = Message(env: envelope, isP2P: true)
peer.networkState.filters.notify(msg)
# Following message IDs are not part of EIP-627, but are added and used by
# the Status application, we ignore them for now.
nextID 11
proc batchAcknowledged(peer: Peer) = discard
proc messageResponse(peer: Peer) = discard
nextID 123
requestResponse:
proc p2pSyncRequest(peer: Peer) = discard
proc p2pSyncResponse(peer: Peer) = discard
proc p2pRequestComplete(peer: Peer, requestId: Hash, lastEnvelopeHash: Hash,
cursor: seq[byte]) = discard
# TODO:
# In the current specification the parameters are not wrapped in a regular
# envelope as is done for the P2P Request packet. If we could alter this in
# the spec it would be a cleaner separation between Waku and Mail server /
# client.
# Also, if a requestResponse block is used, a reqestId will automatically
# be added by the protocol DSL.
# However the requestResponse block in combination with p2pRequest cannot be
# used due to the unfortunate fact that the packet IDs are not consecutive,
# and nextID is not recognized in between these. The nextID behaviour could
# be fixed, however it would be cleaner if the specification could be
# changed to have these IDs to be consecutive.
# 'Runner' calls ---------------------------------------------------------------
proc processQueue(peer: Peer) =
# Send to peer all valid and previously not send envelopes in the queue.
var
envelopes: seq[Envelope] = @[]
wakuPeer = peer.state(Waku)
wakuNet = peer.networkState(Waku)
for message in wakuNet.queue.items:
if wakuPeer.received.contains(message.hash):
# trace "message was already send to peer", hash = $message.hash, peer
continue
if message.pow < wakuPeer.powRequirement:
trace "Message PoW too low for peer", pow = message.pow,
powReq = wakuPeer.powRequirement
continue
if wakuPeer.topics.isSome():
if message.env.topic notin wakuPeer.topics.get():
trace "Message does not match topics list"
continue
else:
if not bloomFilterMatch(wakuPeer.bloom, message.bloom):
trace "Message does not match peer bloom filter"
continue
trace "Adding envelope"
envelopes.add(message.env)
wakuPeer.received.incl(message.hash)
if envelopes.len() > 0:
trace "Sending envelopes", amount=envelopes.len
# Ignore failure of sending messages, this could occur when the connection
# gets dropped
traceAsyncErrors peer.messages(envelopes)
proc run(peer: Peer) {.async.} =
while peer.connectionState notin {Disconnecting, Disconnected}:
peer.processQueue()
await sleepAsync(messageInterval)
proc pruneReceived(node: EthereumNode) {.raises: [].} =
if node.peerPool != nil: # XXX: a bit dirty to need to check for this here ...
var wakuNet = node.protocolState(Waku)
for peer in node.protocolPeers(Waku):
if not peer.initialized:
continue
# NOTE: Perhaps alter the queue prune call to keep track of a HashSet
# of pruned messages (as these should be smaller), and diff this with
# the received sets.
peer.received = intersection(peer.received, wakuNet.queue.itemHashes)
proc run(node: EthereumNode, network: WakuNetwork) {.async.} =
while true:
# prune message queue every second
# TTL unit is in seconds, so this should be sufficient?
network.queue[].prune()
# pruning the received sets is not necessary for correct workings
# but simply from keeping the sets growing indefinitely
node.pruneReceived()
await sleepAsync(pruneInterval)
# Private EthereumNode calls ---------------------------------------------------
proc sendP2PMessage(node: EthereumNode, peerId: NodeId,
envelopes: openarray[Envelope]): bool =
for peer in node.peers(Waku):
if peer.remote.id == peerId:
asyncCheck peer.p2pMessage(envelopes)
return true
proc queueMessage(node: EthereumNode, msg: Message): bool =
var wakuNet = node.protocolState(Waku)
# We have to do the same checks here as in the messages proc not to leak
# any information that the message originates from this node.
if not msg.allowed(wakuNet.config):
return false
trace "Adding message to queue", hash = $msg.hash
if wakuNet.queue[].add(msg):
# Also notify our own filters of the message we are sending,
# e.g. msg from local Dapp to Dapp
wakuNet.filters.notify(msg)
return true
# Public EthereumNode calls ----------------------------------------------------
proc postMessage*(node: EthereumNode, pubKey = none[PublicKey](),
symKey = none[SymKey](), src = none[PrivateKey](),
ttl: uint32, topic: Topic, payload: seq[byte],
padding = none[seq[byte]](), powTime = 1'f,
powTarget = defaultMinPow,
targetPeer = none[NodeId]()): bool =
## Post a message on the message queue which will be processed at the
## next `messageInterval`.
##
## NOTE: This call allows a post without encryption. If encryption is
## mandatory it should be enforced a layer up
let payload = encode(Payload(payload: payload, src: src, dst: pubKey,
symKey: symKey, padding: padding))
if payload.isSome():
var env = Envelope(expiry:epochTime().uint32 + ttl,
ttl: ttl, topic: topic, data: payload.get(), nonce: 0)
# Allow lightnode to post only direct p2p messages
if targetPeer.isSome():
return node.sendP2PMessage(targetPeer.get(), [env])
else:
# non direct p2p message can not have ttl of 0
if env.ttl == 0:
return false
var msg = initMessage(env, powCalc = false)
# XXX: make this non blocking or not?
# In its current blocking state, it could be noticed by a peer that no
# messages are send for a while, and thus that mining PoW is done, and
# that next messages contains a message originated from this peer
# zah: It would be hard to execute this in a background thread at the
# moment. We'll need a way to send custom "tasks" to the async message
# loop (e.g. AD2 support for AsyncChannels).
if not msg.sealEnvelope(powTime, powTarget):
return false
# need to check expiry after mining PoW
if not msg.env.valid():
return false
result = node.queueMessage(msg)
# Allows light nodes to post via untrusted messages packet.
# Queue gets processed immediatly as the node sends only its own messages,
# so the privacy ship has already sailed anyhow.
# TODO:
# - Could be still a concern in terms of efficiency, if multiple messages
# need to be send.
# - For Waku Mode, the checks in processQueue are rather useless as the
# idea is to connect only to 1 node? Also refactor in that case.
if node.protocolState(Waku).config.isLightNode:
for peer in node.peers(Waku):
peer.processQueue()
else:
error "Encoding of payload failed"
return false
proc subscribeFilter*(node: EthereumNode, filter: Filter,
handler:FilterMsgHandler = nil): string =
## Initiate a filter for incoming/outgoing messages. Messages can be
## retrieved with the `getFilterMessages` call or with a provided
## `FilterMsgHandler`.
##
## NOTE: This call allows for a filter without decryption. If encryption is
## mandatory it should be enforced a layer up.
return node.protocolState(Waku).filters.subscribeFilter(filter, handler)
proc unsubscribeFilter*(node: EthereumNode, filterId: string): bool =
## Remove a previously subscribed filter.
var filter: Filter
return node.protocolState(Waku).filters.take(filterId, filter)
proc getFilterMessages*(node: EthereumNode, filterId: string): seq[ReceivedMessage] =
## Get all the messages currently in the filter queue. This will reset the
## filter message queue.
return node.protocolState(Waku).filters.getFilterMessages(filterId)
proc filtersToBloom*(node: EthereumNode): Bloom =
## Returns the bloom filter of all topics of all subscribed filters.
return node.protocolState(Waku).filters.toBloom()
proc setPowRequirement*(node: EthereumNode, powReq: float64) {.async.} =
## Sets the PoW requirement for this node, will also send
## this new PoW requirement to all connected peers.
##
## Failures when sending messages to peers will not be reported.
# NOTE: do we need a tolerance of old PoW for some time?
node.protocolState(Waku).config.powRequirement = powReq
var futures: seq[Future[void]] = @[]
let list = StatusOptions(powRequirement: some(powReq))
for peer in node.peers(Waku):
futures.add(peer.statusOptions(list))
# Exceptions from sendMsg will not be raised
await allFutures(futures)
proc setBloomFilter*(node: EthereumNode, bloom: Bloom) {.async.} =
## Sets the bloom filter for this node, will also send
## this new bloom filter to all connected peers.
##
## Failures when sending messages to peers will not be reported.
# NOTE: do we need a tolerance of old bloom filter for some time?
node.protocolState(Waku).config.bloom = some(bloom)
# reset topics
node.protocolState(Waku).config.topics = none(seq[Topic])
var futures: seq[Future[void]] = @[]
let list = StatusOptions(bloomFilter: some(bloom))
for peer in node.peers(Waku):
futures.add(peer.statusOptions(list))
# Exceptions from sendMsg will not be raised
await allFutures(futures)
proc setTopicInterest*(node: EthereumNode, topics: seq[Topic]):
Future[bool] {.async.} =
if topics.len > topicInterestMax:
return false
node.protocolState(Waku).config.topics = some(topics)
var futures: seq[Future[void]] = @[]
let list = StatusOptions(topicInterest: some(topics))
for peer in node.peers(Waku):
futures.add(peer.statusOptions(list))
# Exceptions from sendMsg will not be raised
await allFutures(futures)
return true
proc setMaxMessageSize*(node: EthereumNode, size: uint32): bool =
## Set the maximum allowed message size.
## Can not be set higher than ``defaultMaxMsgSize``.
if size > defaultMaxMsgSize:
warn "size > defaultMaxMsgSize"
return false
node.protocolState(Waku).config.maxMsgSize = size
return true
proc setPeerTrusted*(node: EthereumNode, peerId: NodeId): bool =
## Set a connected peer as trusted.
for peer in node.peers(Waku):
if peer.remote.id == peerId:
peer.state(Waku).trusted = true
return true
proc setLightNode*(node: EthereumNode, isLightNode: bool) {.async.} =
## Set this node as a Waku light node.
node.protocolState(Waku).config.isLightNode = isLightNode
# TODO: Add starting/stopping of `processQueue` loop depending on value of isLightNode.
var futures: seq[Future[void]] = @[]
let list = StatusOptions(lightNode: some(isLightNode))
for peer in node.peers(Waku):
futures.add(peer.statusOptions(list))
# Exceptions from sendMsg will not be raised
await allFutures(futures)
proc configureWaku*(node: EthereumNode, config: WakuConfig) =
## Apply a Waku configuration.
##
## NOTE: Should be run before connection is made with peers as some
## of the settings are only communicated at peer handshake.
node.protocolState(Waku).config = config
proc resetMessageQueue*(node: EthereumNode) =
## Full reset of the message queue.
##
## NOTE: Not something that should be run in normal circumstances.
node.protocolState(Waku).queue[] = initQueue(defaultQueueCapacity)