From ece1de7ab9b81af6394c922c64404ba977c421c1 Mon Sep 17 00:00:00 2001 From: Hanno Cornelius Date: Fri, 2 Jun 2023 18:22:49 +0200 Subject: [PATCH] chore: add and adapt legacy files from nwaku repo --- .editorconfig | 18 + .gitignore | 57 +++ .gitmodules | 105 +++++ LICENSE-APACHEv2 | 205 +++++++++ LICENSE => LICENSE-MIT | 0 Makefile | 132 ++++++ config.nims | 78 ++++ env.sh | 8 + tests/all_tests_waku.nim | 10 + tests/all_tests_whisper.nim | 5 + tests/nim.cfg | 1 + tests/test_helpers.nim | 45 ++ tests/waku/test_rpc_waku.nim | 237 ++++++++++ tests/waku/test_waku_bridge.nim | 98 ++++ tests/waku/test_waku_config.nim | 65 +++ tests/waku/test_waku_connect.nim | 560 +++++++++++++++++++++++ tests/waku/test_waku_mail.nim | 119 +++++ tests/whisper/test_shh.nim | 382 ++++++++++++++++ tests/whisper/test_shh_config.nim | 71 +++ tests/whisper/test_shh_connect.nim | 329 ++++++++++++++ vendor/nim-bearssl | 1 + vendor/nim-chronicles | 1 + vendor/nim-chronos | 1 + vendor/nim-confutils | 1 + vendor/nim-eth | 1 + vendor/nim-faststreams | 1 + vendor/nim-http-utils | 1 + vendor/nim-json-rpc | 1 + vendor/nim-json-serialization | 1 + vendor/nim-libbacktrace | 1 + vendor/nim-metrics | 1 + vendor/nim-nat-traversal | 1 + vendor/nim-secp256k1 | 1 + vendor/nim-serialization | 1 + vendor/nim-stew | 1 + vendor/nim-stint | 1 + vendor/nim-testutils | 1 + vendor/nim-unittest2 | 1 + vendor/nim-websock | 1 + vendor/nimbus-build-system | 1 + vendor/nimcrypto | 1 + waku.nimble | 74 +++ waku/common/utils/nat.nim | 67 +++ waku/node/README.md | 3 + waku/node/config.nim | 164 +++++++ waku/node/nim.cfg | 4 + waku/node/quicksim.nim | 76 ++++ waku/node/rpc/hexstrings.nim | 222 +++++++++ waku/node/rpc/key_storage.nim | 22 + waku/node/rpc/rpc_types.nim | 58 +++ waku/node/rpc/waku.nim | 365 +++++++++++++++ waku/node/rpc/wakucallsigs.nim | 27 ++ waku/node/rpc/wakusim.nim | 31 ++ waku/node/start_network.nim | 204 +++++++++ waku/node/waku_helpers.nim | 16 + waku/node/wakunode1.nim | 150 +++++++ waku/protocol/README.md | 3 + waku/protocol/waku_bridge.nim | 22 + waku/protocol/waku_mail.nim | 91 ++++ waku/protocol/waku_protocol.nim | 694 +++++++++++++++++++++++++++++ waku/whisper/whisper_protocol.nim | 481 ++++++++++++++++++++ waku/whisper/whisper_types.nim | 674 ++++++++++++++++++++++++++++ waku/whisper/whispernodes.nim | 67 +++ 63 files changed, 6061 insertions(+) create mode 100644 .editorconfig create mode 100644 .gitignore create mode 100644 .gitmodules create mode 100644 LICENSE-APACHEv2 rename LICENSE => LICENSE-MIT (100%) create mode 100644 Makefile create mode 100644 config.nims create mode 100644 env.sh create mode 100644 tests/all_tests_waku.nim create mode 100644 tests/all_tests_whisper.nim create mode 100644 tests/nim.cfg create mode 100644 tests/test_helpers.nim create mode 100644 tests/waku/test_rpc_waku.nim create mode 100644 tests/waku/test_waku_bridge.nim create mode 100644 tests/waku/test_waku_config.nim create mode 100644 tests/waku/test_waku_connect.nim create mode 100644 tests/waku/test_waku_mail.nim create mode 100644 tests/whisper/test_shh.nim create mode 100644 tests/whisper/test_shh_config.nim create mode 100644 tests/whisper/test_shh_connect.nim create mode 160000 vendor/nim-bearssl create mode 160000 vendor/nim-chronicles create mode 160000 vendor/nim-chronos create mode 160000 vendor/nim-confutils create mode 160000 vendor/nim-eth create mode 160000 vendor/nim-faststreams create mode 160000 vendor/nim-http-utils create mode 160000 vendor/nim-json-rpc create mode 160000 vendor/nim-json-serialization create mode 160000 vendor/nim-libbacktrace create mode 160000 vendor/nim-metrics create mode 160000 vendor/nim-nat-traversal create mode 160000 vendor/nim-secp256k1 create mode 160000 vendor/nim-serialization create mode 160000 vendor/nim-stew create mode 160000 vendor/nim-stint create mode 160000 vendor/nim-testutils create mode 160000 vendor/nim-unittest2 create mode 160000 vendor/nim-websock create mode 160000 vendor/nimbus-build-system create mode 160000 vendor/nimcrypto create mode 100644 waku.nimble create mode 100644 waku/common/utils/nat.nim create mode 100644 waku/node/README.md create mode 100644 waku/node/config.nim create mode 100644 waku/node/nim.cfg create mode 100644 waku/node/quicksim.nim create mode 100644 waku/node/rpc/hexstrings.nim create mode 100644 waku/node/rpc/key_storage.nim create mode 100644 waku/node/rpc/rpc_types.nim create mode 100644 waku/node/rpc/waku.nim create mode 100644 waku/node/rpc/wakucallsigs.nim create mode 100644 waku/node/rpc/wakusim.nim create mode 100644 waku/node/start_network.nim create mode 100644 waku/node/waku_helpers.nim create mode 100644 waku/node/wakunode1.nim create mode 100644 waku/protocol/README.md create mode 100644 waku/protocol/waku_bridge.nim create mode 100644 waku/protocol/waku_mail.nim create mode 100644 waku/protocol/waku_protocol.nim create mode 100644 waku/whisper/whisper_protocol.nim create mode 100644 waku/whisper/whisper_types.nim create mode 100644 waku/whisper/whispernodes.nim diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..e7f569e --- /dev/null +++ b/.editorconfig @@ -0,0 +1,18 @@ +root = true + +[*] +charset = utf-8 +indent_style = space +indent_size = 2 +end_of_line = lf +trim_trailing_whitespace = true +insert_final_newline = true + + +[{Makefile, *.sh}] +indent_style = tab + +# Trailing spaces in markdown indicate word wrap +[{*.markdown,*.md}] +trim_trailing_spaces = false +max_line_length = 80 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..6e54595 --- /dev/null +++ b/.gitignore @@ -0,0 +1,57 @@ +/nimcache + +# Executables shall be put in an ignored build/ directory +/build + +# Nimble packages +/vendor/.nimble + +# Generated Files +*.generated.nim + +# ntags/ctags output +/tags + +# a symlink that can't be added to the repo because of Windows +/waku.nims + +# Ignore dynamic, static libs and libtool archive files +*.so +*.dylib +*.a +*.la +*.exe +*.dll + +.DS_Store + +# Ignore simulation generated metrics files +/metrics/prometheus +/metrics/waku-sim-all-nodes-grafana-dashboard.json + +*.log +package-lock.json +package.json +node_modules/ +/.update.timestamp + +# Ignore Jetbrains IDE files +.idea/ + +# RLN / keystore +rlnKeystore.json +*.tar.gz + +# Nimbus Build System +nimbus-build-system.paths + +# sqlite db +*.db +*.db-shm +*.db-wal +*.sqlite3 +*.sqlite3-shm +*.sqlite3-wal + +# Ignore autogenerated C-bingings compilation files +/examples/cbindings/libwaku.h diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..49099de --- /dev/null +++ b/.gitmodules @@ -0,0 +1,105 @@ +[submodule "vendor/nimbus-build-system"] + path = vendor/nimbus-build-system + url = https://github.com/status-im/nimbus-build-system.git + ignore = untracked + branch = master +[submodule "vendor/nim-eth"] + path = vendor/nim-eth + url = https://github.com/status-im/nim-eth.git + ignore = untracked + branch = master +[submodule "vendor/nim-stew"] + path = vendor/nim-stew + url = https://github.com/status-im/nim-stew.git + ignore = untracked + branch = master +[submodule "vendor/nim-libbacktrace"] + path = vendor/nim-libbacktrace + url = https://github.com/status-im/nim-libbacktrace.git + ignore = untracked + branch = master +[submodule "vendor/nim-confutils"] + path = vendor/nim-confutils + url = https://github.com/status-im/nim-confutils.git + ignore = untracked + branch = master +[submodule "vendor/nim-chronicles"] + path = vendor/nim-chronicles + url = https://github.com/status-im/nim-chronicles.git + ignore = untracked + branch = master +[submodule "vendor/nim-chronos"] + path = vendor/nim-chronos + url = https://github.com/status-im/nim-chronos.git + ignore = untracked + branch = master +[submodule "vendor/nim-metrics"] + path = vendor/nim-metrics + url = https://github.com/status-im/nim-metrics.git + ignore = untracked + branch = master +[submodule "vendor/nim-bearssl"] + path = vendor/nim-bearssl + url = https://github.com/status-im/nim-bearssl.git + ignore = untracked + branch = master +[submodule "vendor/nim-testutils"] + path = vendor/nim-testutils + url = https://github.com/status-im/nim-testutils.git + ignore = untracked + branch = master +[submodule "vendor/nim-unittest2"] + path = vendor/nim-unittest2 + url = https://github.com/status-im/nim-unittest2.git + ignore = untracked + branch = master +[submodule "vendor/nim-nat-traversal"] + path = vendor/nim-nat-traversal + url = https://github.com/status-im/nim-nat-traversal.git + ignore = untracked + branch = master +[submodule "vendor/nim-serialization"] + path = vendor/nim-serialization + url = https://github.com/status-im/nim-serialization.git + ignore = untracked + branch = master +[submodule "vendor/nimcrypto"] + path = vendor/nimcrypto + url = https://github.com/cheatfate/nimcrypto.git + ignore = untracked + branch = master +[submodule "vendor/nim-stint"] + path = vendor/nim-stint + url = https://github.com/status-im/nim-stint.git + ignore = untracked + branch = master +[submodule "vendor/nim-json-rpc"] + path = vendor/nim-json-rpc + url = https://github.com/status-im/nim-json-rpc.git + ignore = untracked + branch = master +[submodule "vendor/nim-http-utils"] + path = vendor/nim-http-utils + url = https://github.com/status-im/nim-http-utils.git + ignore = untracked + branch = master +[submodule "vendor/nim-faststreams"] + path = vendor/nim-faststreams + url = https://github.com/status-im/nim-faststreams.git + ignore = untracked + branch = master +[submodule "vendor/nim-json-serialization"] + path = vendor/nim-json-serialization + url = https://github.com/status-im/nim-json-serialization.git + ignore = untracked + branch = master +[submodule "vendor/nim-websock"] + path = vendor/nim-websock + url = https://github.com/status-im/nim-websock.git + ignore = untracked + branch = master +[submodule "vendor/nim-secp256k1"] + path = vendor/nim-secp256k1 + url = https://github.com/status-im/nim-secp256k1.git + ignore = untracked + branch = master diff --git a/LICENSE-APACHEv2 b/LICENSE-APACHEv2 new file mode 100644 index 0000000..7b6a3cb --- /dev/null +++ b/LICENSE-APACHEv2 @@ -0,0 +1,205 @@ +nim-waku is licensed under the Apache License version 2 +Copyright (c) 2018 Status Research & Development GmbH +----------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 Status Research & Development GmbH + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/LICENSE b/LICENSE-MIT similarity index 100% rename from LICENSE rename to LICENSE-MIT diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..8e5a81d --- /dev/null +++ b/Makefile @@ -0,0 +1,132 @@ +# Copyright (c) 2022 Status Research & Development GmbH. Licensed under +# either of: +# - Apache License, version 2.0 +# - MIT license +# at your option. This file may not be copied, modified, or distributed except +# according to those terms. +BUILD_SYSTEM_DIR := vendor/nimbus-build-system +LINK_PCRE := 0 +LOG_LEVEL := TRACE + +# we don't want an error here, so we can handle things later, in the ".DEFAULT" target +-include $(BUILD_SYSTEM_DIR)/makefiles/variables.mk + + +ifeq ($(NIM_PARAMS),) +# "variables.mk" was not included, so we update the submodules. +GIT_SUBMODULE_UPDATE := git submodule update --init --recursive +.DEFAULT: + +@ echo -e "Git submodules not found. Running '$(GIT_SUBMODULE_UPDATE)'.\n"; \ + $(GIT_SUBMODULE_UPDATE); \ + echo +# Now that the included *.mk files appeared, and are newer than this file, Make will restart itself: +# https://www.gnu.org/software/make/manual/make.html#Remaking-Makefiles +# +# After restarting, it will execute its original goal, so we don't have to start a child Make here +# with "$(MAKE) $(MAKECMDGOALS)". Isn't hidden control flow great? + +else # "variables.mk" was included. Business as usual until the end of this file. + + +########## +## Main ## +########## +.PHONY: all test update clean + +# default target, because it's the first one that doesn't start with '.' +all: | wakunode1 example1 sim1 + +test: | testwhisper testwaku1 + +waku.nims: + ln -s waku.nimble $@ + +update: | update-common + rm -rf waku.nims && \ + $(MAKE) waku.nims $(HANDLE_OUTPUT) + +clean: + rm -rf build + +# must be included after the default target +-include $(BUILD_SYSTEM_DIR)/makefiles/targets.mk + +## Possible values: prod; debug +TARGET ?= prod + +## Git version +GIT_VERSION ?= $(shell git describe --abbrev=6 --always --tags) +NIM_PARAMS := $(NIM_PARAMS) -d:git_version=\"$(GIT_VERSION)\" + +## Pass CPU architecture to C compiler, use basic x86-64 instruction set by default +ARCHITECTURE ?= "x86-64" +NIM_PARAMS := $(NIM_PARAMS) --passC:\"-march=$(ARCHITECTURE)\" + +endif + +################## +## Dependencies ## +################## +.PHONY: deps libbacktrace + +deps: | deps-common nat-libs waku.nims + + +### nim-libbacktrace + +# "-d:release" implies "--stacktrace:off" and it cannot be added to config.nims +ifeq ($(USE_LIBBACKTRACE), 0) +NIM_PARAMS := $(NIM_PARAMS) -d:debug -d:disable_libbacktrace +else +NIM_PARAMS := $(NIM_PARAMS) -d:release +endif + +libbacktrace: + + $(MAKE) -C vendor/nim-libbacktrace --no-print-directory BUILD_CXX_LIB=0 + +clean-libbacktrace: + + $(MAKE) -C vendor/nim-libbacktrace clean $(HANDLE_OUTPUT) + +# Extend deps and clean targets +ifneq ($(USE_LIBBACKTRACE), 0) +deps: | libbacktrace +endif + +clean: | clean-libbacktrace + + +################# +## Waku legacy ## +################# +.PHONY: testwhisper testwaku1 wakunode1 example1 sim1 + +testwhisper: | build deps + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim testwhisper $(NIM_PARAMS) waku.nims + +testwaku1: | build deps + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim test1 $(NIM_PARAMS) waku.nims + +wakunode1: | build deps + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim wakunode1 $(NIM_PARAMS) waku.nims + +example1: | build deps + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim example1 $(NIM_PARAMS) waku.nims + +sim1: | build deps wakunode1 + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim sim1 $(NIM_PARAMS) waku.nims + + +################### +## Documentation ## +################### +.PHONY: docs + +# TODO: Remove unused target +docs: | build deps + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim doc --run --index:on --project --out:.gh-pages waku/waku.nim waku.nims diff --git a/config.nims b/config.nims new file mode 100644 index 0000000..2ca66c3 --- /dev/null +++ b/config.nims @@ -0,0 +1,78 @@ +if defined(release): + switch("nimcache", "nimcache/release/$projectName") +else: + switch("nimcache", "nimcache/debug/$projectName") + +if defined(windows): + # disable timestamps in Windows PE headers - https://wiki.debian.org/ReproducibleBuilds/TimestampsInPEBinaries + switch("passL", "-Wl,--no-insert-timestamp") + # increase stack size + switch("passL", "-Wl,--stack,8388608") + # https://github.com/nim-lang/Nim/issues/4057 + --tlsEmulation:off + if defined(i386): + # set the IMAGE_FILE_LARGE_ADDRESS_AWARE flag so we can use PAE, if enabled, and access more than 2 GiB of RAM + switch("passL", "-Wl,--large-address-aware") + + # The dynamic Chronicles output currently prevents us from using colors on Windows + # because these require direct manipulations of the stdout File object. + switch("define", "chronicles_colors=off") + +# This helps especially for 32-bit x86, which sans SSE2 and newer instructions +# requires quite roundabout code generation for cryptography, and other 64-bit +# and larger arithmetic use cases, along with register starvation issues. When +# engineering a more portable binary release, this should be tweaked but still +# use at least -msse2 or -msse3. +if defined(disableMarchNative): + if defined(i386) or defined(amd64): + if defined(macosx): + switch("passC", "-march=haswell -mtune=generic") + switch("passL", "-march=haswell -mtune=generic") + else: + switch("passC", "-msse3") + switch("passL", "-msse3") +elif defined(macosx) and defined(arm64): + # Apple's Clang can't handle "-march=native" on M1: https://github.com/status-im/nimbus-eth2/issues/2758 + switch("passC", "-mcpu=apple-m1") + switch("passL", "-mcpu=apple-m1") +else: + switch("passC", "-march=native") + switch("passL", "-march=native") + if defined(windows): + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65782 + # ("-fno-asynchronous-unwind-tables" breaks Nim's exception raising, sometimes) + switch("passC", "-mno-avx512vl") + switch("passL", "-mno-avx512vl") + +--threads:on +--opt:speed +--excessiveStackTrace:on +# enable metric collection +--define:metrics +# for heap-usage-by-instance-type metrics and object base-type strings +--define:nimTypeNames + +switch("define", "withoutPCRE") + +# the default open files limit is too low on macOS (512), breaking the +# "--debugger:native" build. It can be increased with `ulimit -n 1024`. +if not defined(macosx): + # add debugging symbols and original files and line numbers + --debugger:native + if not (defined(windows) and defined(i386)) and not defined(disable_libbacktrace): + # light-weight stack traces using libbacktrace and libunwind + --define:nimStackTraceOverride + switch("import", "libbacktrace") + +--define:nimOldCaseObjects # https://github.com/status-im/nim-confutils/issues/9 + +# `switch("warning[CaseTransition]", "off")` fails with "Error: invalid command line option: '--warning[CaseTransition]'" +switch("warning", "CaseTransition:off") + +# The compiler doth protest too much, methinks, about all these cases where it can't +# do its (N)RVO pass: https://github.com/nim-lang/RFCs/issues/230 +switch("warning", "ObservableStores:off") + +# Too many false positives for "Warning: method has lock level , but another method has 0 [LockLevel]" +switch("warning", "LockLevel:off") + diff --git a/env.sh b/env.sh new file mode 100644 index 0000000..f90ba9a --- /dev/null +++ b/env.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +# We use ${BASH_SOURCE[0]} instead of $0 to allow sourcing this file +# and we fall back to a Zsh-specific special var to also support Zsh. +REL_PATH="$(dirname ${BASH_SOURCE[0]:-${(%):-%x}})" +ABS_PATH="$(cd ${REL_PATH}; pwd)" +source ${ABS_PATH}/vendor/nimbus-build-system/scripts/env.sh + diff --git a/tests/all_tests_waku.nim b/tests/all_tests_waku.nim new file mode 100644 index 0000000..b92be92 --- /dev/null +++ b/tests/all_tests_waku.nim @@ -0,0 +1,10 @@ +import + # Waku - Whisper common whisper_types test + ./whisper/test_shh, + # Waku v1 tests + ./waku/test_waku_connect, + ./waku/test_waku_config, + ./waku/test_waku_bridge, + ./waku/test_waku_mail, + ./waku/test_rpc_waku + diff --git a/tests/all_tests_whisper.nim b/tests/all_tests_whisper.nim new file mode 100644 index 0000000..2d5b524 --- /dev/null +++ b/tests/all_tests_whisper.nim @@ -0,0 +1,5 @@ +import + # Whisper tests + ./whisper/test_shh, + ./whisper/test_shh_config, + ./whisper/test_shh_connect diff --git a/tests/nim.cfg b/tests/nim.cfg new file mode 100644 index 0000000..41783bf --- /dev/null +++ b/tests/nim.cfg @@ -0,0 +1 @@ +-d:chronicles_line_numbers diff --git a/tests/test_helpers.nim b/tests/test_helpers.nim new file mode 100644 index 0000000..a4d2fd2 --- /dev/null +++ b/tests/test_helpers.nim @@ -0,0 +1,45 @@ +import + chronos, bearssl/rand, + eth/[keys, p2p] + +import libp2p/crypto/crypto + +var nextPort = 30303 + +proc localAddress*(port: int): Address = + let port = Port(port) + result = Address(udpPort: port, tcpPort: port, + ip: parseIpAddress("127.0.0.1")) + +proc setupTestNode*( + rng: ref HmacDrbgContext, + capabilities: varargs[ProtocolInfo, `protocolInfo`]): EthereumNode = + let + keys1 = keys.KeyPair.random(rng[]) + address = localAddress(nextPort) + result = newEthereumNode(keys1, address, NetworkId(1), + addAllCapabilities = false, + bindUdpPort = address.udpPort, # Assume same as external + bindTcpPort = address.tcpPort, # Assume same as external + rng = rng) + nextPort.inc + for capability in capabilities: + result.addCapability capability + +# Copied from here: https://github.com/status-im/nim-libp2p/blob/d522537b19a532bc4af94fcd146f779c1f23bad0/tests/helpers.nim#L28 +type RngWrap = object + rng: ref rand.HmacDrbgContext + +var rngVar: RngWrap + +proc getRng(): ref rand.HmacDrbgContext = + # TODO if `rngVar` is a threadvar like it should be, there are random and + # spurious compile failures on mac - this is not gcsafe but for the + # purpose of the tests, it's ok as long as we only use a single thread + {.gcsafe.}: + if rngVar.rng.isNil: + rngVar.rng = crypto.newRng() + rngVar.rng + +template rng*(): ref rand.HmacDrbgContext = + getRng() diff --git a/tests/waku/test_rpc_waku.nim b/tests/waku/test_rpc_waku.nim new file mode 100644 index 0000000..fbef0be --- /dev/null +++ b/tests/waku/test_rpc_waku.nim @@ -0,0 +1,237 @@ +{.used.} + +import + std/[options, os, strutils], + testutils/unittests, stew/byteutils, json_rpc/[rpcserver, rpcclient], + eth/common as eth_common, eth/[keys, p2p], + ../../waku/v1/protocol/waku_protocol, + ../../waku/v1/node/rpc/[hexstrings, rpc_types, waku, key_storage] + +template sourceDir*: string = currentSourcePath.rsplit(DirSep, 1)[0] +## Generate client convenience marshalling wrappers from forward declarations +## For testing, ethcallsigs needs to be kept in sync with ../waku/node/v1/rpc/waku +const sigPath = sourceDir / ParDir / ParDir / "waku" / "v1" / "node" / "rpc" / "wakucallsigs.nim" +createRpcSigs(RpcSocketClient, sigPath) + +proc setupNode(capabilities: varargs[ProtocolInfo, `protocolInfo`], + rng: ref HmacDrbgContext, ): EthereumNode = + let + keypair = KeyPair.random(rng[]) + srvAddress = Address(ip: parseIpAddress("0.0.0.0"), tcpPort: Port(30303), + udpPort: Port(30303)) + + result = newEthereumNode(keypair, srvAddress, NetworkId(1), "waku test rpc", + addAllCapabilities = false, bindUdpPort = srvAddress.udpPort, bindTcpPort = srvAddress.tcpPort, rng = rng) + for capability in capabilities: + result.addCapability capability + +proc doTests {.async.} = + suite "Waku Remote Procedure Calls": + let + rng = keys.newRng() + ethNode = setupNode(Waku, rng) + rpcPort = 8545 + rpcServer = newRpcSocketServer(["localhost:" & $rpcPort]) + client = newRpcSocketClient() + keys = newKeyStorage() + + setupWakuRPC(ethNode, keys, rpcServer, rng) + rpcServer.start() + await client.connect("localhost", Port(rpcPort)) + + asyncTest "waku_version": + check await(client.waku_version()) == wakuVersionStr + asyncTest "waku_info": + let info = await client.waku_info() + check info.maxMessageSize == defaultMaxMsgSize + asyncTest "waku_setMaxMessageSize": + let testValue = 1024'u64 + check await(client.waku_setMaxMessageSize(testValue)) == true + var info = await client.waku_info() + check info.maxMessageSize == testValue + expect ValueError: + discard await(client.waku_setMaxMessageSize(defaultMaxMsgSize + 1)) + info = await client.waku_info() + check info.maxMessageSize == testValue + asyncTest "waku_setMinPoW": + let testValue = 0.0001 + check await(client.waku_setMinPoW(testValue)) == true + let info = await client.waku_info() + check info.minPow == testValue + # test "waku_markTrustedPeer": + # TODO: need to connect a peer to test + asyncTest "waku asymKey tests": + let keyID = await client.waku_newKeyPair() + check: + await(client.waku_hasKeyPair(keyID)) == true + await(client.waku_deleteKeyPair(keyID)) == true + await(client.waku_hasKeyPair(keyID)) == false + expect ValueError: + discard await(client.waku_deleteKeyPair(keyID)) + + let privkey = "0x5dc5381cae54ba3174dc0d46040fe11614d0cc94d41185922585198b4fcef9d3" + let pubkey = "0x04e5fd642a0f630bbb1e4cd7df629d7b8b019457a9a74f983c0484a045cebb176def86a54185b50bbba6bbf97779173695e92835d63109c23471e6da382f922fdb" + let keyID2 = await client.waku_addPrivateKey(privkey) + check: + await(client.waku_getPublicKey(keyID2)) == pubkey.toPublicKey + await(client.waku_getPrivateKey(keyID2)).toRaw() == privkey.toPrivateKey.toRaw() + await(client.waku_hasKeyPair(keyID2)) == true + await(client.waku_deleteKeyPair(keyID2)) == true + await(client.waku_hasKeyPair(keyID2)) == false + expect ValueError: + discard await(client.waku_deleteKeyPair(keyID2)) + asyncTest "waku symKey tests": + let keyID = await client.waku_newSymKey() + check: + await(client.waku_hasSymKey(keyID)) == true + await(client.waku_deleteSymKey(keyID)) == true + await(client.waku_hasSymKey(keyID)) == false + expect ValueError: + discard await(client.waku_deleteSymKey(keyID)) + + let symKey = "0x0000000000000000000000000000000000000000000000000000000000000001" + let keyID2 = await client.waku_addSymKey(symKey) + check: + await(client.waku_getSymKey(keyID2)) == symKey.toSymKey + await(client.waku_hasSymKey(keyID2)) == true + await(client.waku_deleteSymKey(keyID2)) == true + await(client.waku_hasSymKey(keyID2)) == false + expect ValueError: + discard await(client.waku_deleteSymKey(keyID2)) + + let keyID3 = await client.waku_generateSymKeyFromPassword("password") + let keyID4 = await client.waku_generateSymKeyFromPassword("password") + let keyID5 = await client.waku_generateSymKeyFromPassword("nimbus!") + check: + await(client.waku_getSymKey(keyID3)) == + await(client.waku_getSymKey(keyID4)) + await(client.waku_getSymKey(keyID3)) != + await(client.waku_getSymKey(keyID5)) + await(client.waku_hasSymKey(keyID3)) == true + await(client.waku_deleteSymKey(keyID3)) == true + await(client.waku_hasSymKey(keyID3)) == false + expect ValueError: + discard await(client.waku_deleteSymKey(keyID3)) + + # Some defaults for the filter & post tests + let + ttl = 30'u64 + topicStr = "0x12345678" + payload = "0x45879632" + # A very low target and long time so we are sure the test never fails + # because of this + powTarget = 0.001 + powTime = 1.0 + + asyncTest "waku filter create and delete": + let + topic = topicStr.toTopic() + symKeyID = await client.waku_newSymKey() + options = WakuFilterOptions(symKeyID: some(symKeyID), + topics: some(@[topic])) + filterID = await client.waku_newMessageFilter(options) + + check: + filterID.string.isValidIdentifier + await(client.waku_deleteMessageFilter(filterID)) == true + expect ValueError: + discard await(client.waku_deleteMessageFilter(filterID)) + + asyncTest "waku symKey post and filter loop": + let + topic = topicStr.toTopic() + symKeyID = await client.waku_newSymKey() + options = WakuFilterOptions(symKeyID: some(symKeyID), + topics: some(@[topic])) + filterID = await client.waku_newMessageFilter(options) + message = WakuPostMessage(symKeyID: some(symKeyID), + ttl: ttl, + topic: some(topic), + payload: payload.HexDataStr, + powTime: powTime, + powTarget: powTarget) + check: + await(client.waku_setMinPoW(powTarget)) == true + await(client.waku_post(message)) == true + + let messages = await client.waku_getFilterMessages(filterID) + check: + messages.len == 1 + messages[0].sig.isNone() + messages[0].recipientPublicKey.isNone() + messages[0].ttl == ttl + messages[0].topic == topic + messages[0].payload == hexToSeqByte(payload) + messages[0].padding.len > 0 + messages[0].pow >= powTarget + + await(client.waku_deleteMessageFilter(filterID)) == true + + asyncTest "waku asymKey post and filter loop": + let + topic = topicStr.toTopic() + privateKeyID = await client.waku_newKeyPair() + options = WakuFilterOptions(privateKeyID: some(privateKeyID)) + filterID = await client.waku_newMessageFilter(options) + pubKey = await client.waku_getPublicKey(privateKeyID) + message = WakuPostMessage(pubKey: some(pubKey), + ttl: ttl, + topic: some(topic), + payload: payload.HexDataStr, + powTime: powTime, + powTarget: powTarget) + check: + await(client.waku_setMinPoW(powTarget)) == true + await(client.waku_post(message)) == true + + let messages = await client.waku_getFilterMessages(filterID) + check: + messages.len == 1 + messages[0].sig.isNone() + messages[0].recipientPublicKey.get() == pubKey + messages[0].ttl == ttl + messages[0].topic == topic + messages[0].payload == hexToSeqByte(payload) + messages[0].padding.len > 0 + messages[0].pow >= powTarget + + await(client.waku_deleteMessageFilter(filterID)) == true + + asyncTest "waku signature in post and filter loop": + let + topic = topicStr.toTopic() + symKeyID = await client.waku_newSymKey() + privateKeyID = await client.waku_newKeyPair() + pubKey = await client.waku_getPublicKey(privateKeyID) + options = WakuFilterOptions(symKeyID: some(symKeyID), + topics: some(@[topic]), + sig: some(pubKey)) + filterID = await client.waku_newMessageFilter(options) + message = WakuPostMessage(symKeyID: some(symKeyID), + sig: some(privateKeyID), + ttl: ttl, + topic: some(topic), + payload: payload.HexDataStr, + powTime: powTime, + powTarget: powTarget) + check: + await(client.waku_setMinPoW(powTarget)) == true + await(client.waku_post(message)) == true + + let messages = await client.waku_getFilterMessages(filterID) + check: + messages.len == 1 + messages[0].sig.get() == pubKey + messages[0].recipientPublicKey.isNone() + messages[0].ttl == ttl + messages[0].topic == topic + messages[0].payload == hexToSeqByte(payload) + messages[0].padding.len > 0 + messages[0].pow >= powTarget + + await(client.waku_deleteMessageFilter(filterID)) == true + + rpcServer.stop() + rpcServer.close() + +waitFor doTests() diff --git a/tests/waku/test_waku_bridge.nim b/tests/waku/test_waku_bridge.nim new file mode 100644 index 0000000..625d076 --- /dev/null +++ b/tests/waku/test_waku_bridge.nim @@ -0,0 +1,98 @@ +# +# Waku +# (c) Copyright 2019 +# Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) +{.used.} + +import + std/[sequtils, tables], + chronos, testutils/unittests, eth/p2p, eth/p2p/peer_pool, + ../../waku/whisper/whisper_protocol as whisper, + ../../waku/v1/protocol/waku_protocol as waku, + ../../waku/v1/protocol/waku_bridge, + ../test_helpers + +let safeTTL = 5'u32 +let waitInterval = waku.messageInterval + 150.milliseconds + +procSuite "Waku - Whisper bridge tests": + let rng = newRng() + # Waku Whisper node has both capabilities, listens to Whisper and Waku and + # relays traffic between the two. + var + nodeWakuWhisper = setupTestNode(rng, Whisper, Waku) # This will be the bridge + nodeWhisper = setupTestNode(rng, Whisper) + nodeWaku = setupTestNode(rng, Waku) + + nodeWakuWhisper.startListening() + let bridgeNode = newNode(nodeWakuWhisper.toENode()) + nodeWakuWhisper.shareMessageQueue() + + waitFor nodeWhisper.peerPool.connectToNode(bridgeNode) + waitFor nodeWaku.peerPool.connectToNode(bridgeNode) + + asyncTest "WakuWhisper and Whisper peers connected": + check: + nodeWhisper.peerPool.connectedNodes.len() == 1 + nodeWaku.peerPool.connectedNodes.len() == 1 + + asyncTest "Whisper - Waku communcation via bridge": + # topic whisper node subscribes to, waku node posts to + let topic1 = [byte 0x12, 0, 0, 0] + # topic waku node subscribes to, whisper node posts to + let topic2 = [byte 0x34, 0, 0, 0] + var payloads = [repeat(byte 0, 10), repeat(byte 1, 10)] + var futures = [newFuture[int](), newFuture[int]()] + + proc handler1(msg: whisper.ReceivedMessage) = + check msg.decoded.payload == payloads[0] + futures[0].complete(1) + proc handler2(msg: waku.ReceivedMessage) = + check msg.decoded.payload == payloads[1] + futures[1].complete(1) + + var filter1 = whisper.subscribeFilter(nodeWhisper, + whisper.initFilter(topics = @[topic1]), handler1) + var filter2 = waku.subscribeFilter(nodeWaku, + waku.initFilter(topics = @[topic2]), handler2) + + check: + # Message should also end up in the Whisper node its queue via the bridge + waku.postMessage(nodeWaku, ttl = safeTTL + 1, topic = topic1, + payload = payloads[0]) == true + # Message should also end up in the Waku node its queue via the bridge + whisper.postMessage(nodeWhisper, ttl = safeTTL, topic = topic2, + payload = payloads[1]) == true + nodeWhisper.protocolState(Whisper).queue.items.len == 1 + nodeWaku.protocolState(Waku).queue.items.len == 1 + + # waitInterval*2 as messages have to pass the bridge also (2 hops) + await allFutures(futures).withTimeout(waitInterval*2) + + # Relay can receive Whisper & Waku messages + nodeWakuWhisper.protocolState(Whisper).queue.items.len == 2 + nodeWakuWhisper.protocolState(Waku).queue.items.len == 2 + + # Whisper node can receive Waku messages (via bridge) + nodeWhisper.protocolState(Whisper).queue.items.len == 2 + # Waku node can receive Whisper messages (via bridge) + nodeWaku.protocolState(Waku).queue.items.len == 2 + + whisper.unsubscribeFilter(nodeWhisper, filter1) == true + waku.unsubscribeFilter(nodeWaku, filter2) == true + + # XXX: This reads a bit weird, but eh + waku.resetMessageQueue(nodeWaku) + whisper.resetMessageQueue(nodeWhisper) + # shared queue so Waku and Whisper should be set to 0 + waku.resetMessageQueue(nodeWakuWhisper) + + check: + nodeWhisper.protocolState(Whisper).queue.items.len == 0 + nodeWaku.protocolState(Waku).queue.items.len == 0 + nodeWakuWhisper.protocolState(Whisper).queue.items.len == 0 + nodeWakuWhisper.protocolState(Waku).queue.items.len == 0 diff --git a/tests/waku/test_waku_config.nim b/tests/waku/test_waku_config.nim new file mode 100644 index 0000000..b75baa1 --- /dev/null +++ b/tests/waku/test_waku_config.nim @@ -0,0 +1,65 @@ +# +# Waku +# (c) Copyright 2020 +# Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) +{.used.} + +import + std/[sequtils, options, unittest, times], + ../../waku/v1/protocol/waku_protocol + +suite "Waku envelope validation": + test "should validate and allow envelope according to config": + let ttl = 1'u32 + let topic = [byte 1, 2, 3, 4] + let config = WakuConfig(powRequirement: 0, bloom: some(topic.topicBloom()), + isLightNode: false, maxMsgSize: defaultMaxMsgSize) + + let env = Envelope(expiry:epochTime().uint32 + ttl, ttl: ttl, topic: topic, + data: repeat(byte 9, 256), nonce: 0) + check env.valid() + + let msg = initMessage(env) + check msg.allowed(config) + + test "should invalidate envelope due to ttl 0": + let ttl = 0'u32 + let topic = [byte 1, 2, 3, 4] + + let env = Envelope(expiry:epochTime().uint32 + ttl, ttl: ttl, topic: topic, + data: repeat(byte 9, 256), nonce: 0) + check env.valid() == false + + test "should invalidate envelope due to expired": + let ttl = 1'u32 + let topic = [byte 1, 2, 3, 4] + + let env = Envelope(expiry:epochTime().uint32, ttl: ttl, topic: topic, + data: repeat(byte 9, 256), nonce: 0) + check env.valid() == false + + test "should invalidate envelope due to in the future": + let ttl = 1'u32 + let topic = [byte 1, 2, 3, 4] + + # there is currently a 2 second tolerance, hence the + 3 + let env = Envelope(expiry:epochTime().uint32 + ttl + 3, ttl: ttl, + topic: topic, data: repeat(byte 9, 256), nonce: 0) + check env.valid() == false + + test "should not allow envelope due to bloom filter": + let topic = [byte 1, 2, 3, 4] + let wrongTopic = [byte 9, 8, 7, 6] + let config = WakuConfig(powRequirement: 0, + bloom: some(wrongTopic.topicBloom()), + isLightNode: false, maxMsgSize: defaultMaxMsgSize) + + let env = Envelope(expiry:100000 , ttl: 30, topic: topic, + data: repeat(byte 9, 256), nonce: 0) + + let msg = initMessage(env) + check msg.allowed(config) == false diff --git a/tests/waku/test_waku_connect.nim b/tests/waku/test_waku_connect.nim new file mode 100644 index 0000000..eba35c8 --- /dev/null +++ b/tests/waku/test_waku_connect.nim @@ -0,0 +1,560 @@ +# +# Waku +# (c) Copyright 2019 +# Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) +{.used.} + +import + std/[sequtils, tables], + chronos, testutils/unittests, eth/[keys, p2p], eth/p2p/peer_pool, + ../../waku/v1/protocol/waku_protocol, + ../test_helpers + +const + safeTTL = 5'u32 + waitInterval = messageInterval + 150.milliseconds + conditionTimeoutMs = 3000.milliseconds + +proc resetMessageQueues(nodes: varargs[EthereumNode]) = + for node in nodes: + node.resetMessageQueue() + +# check on a condition until true or return a future containing false +# if timeout expires first +proc eventually(timeout: Duration, + condition: proc(): bool {.gcsafe, raises: [Defect].}): Future[bool] = + let wrappedCondition = proc(): Future[bool] {.async.} = + let f = newFuture[bool]() + while not condition(): + await sleepAsync(100.milliseconds) + f.complete(true) + return await f + return withTimeout(wrappedCondition(), timeout) + +procSuite "Waku connections": + let rng = keys.newRng() + asyncTest "Waku connections": + var + n1 = setupTestNode(rng, Waku) + n2 = setupTestNode(rng, Waku) + n3 = setupTestNode(rng, Waku) + n4 = setupTestNode(rng, Waku) + + var topics: seq[waku_protocol.Topic] + n1.protocolState(Waku).config.topics = some(topics) + n2.protocolState(Waku).config.topics = some(topics) + n3.protocolState(Waku).config.topics = none(seq[waku_protocol.Topic]) + n4.protocolState(Waku).config.topics = none(seq[waku_protocol.Topic]) + + n1.startListening() + n3.startListening() + + let + p1 = await n2.rlpxConnect(newNode(n1.toENode())) + p2 = await n2.rlpxConnect(newNode(n3.toENode())) + p3 = await n4.rlpxConnect(newNode(n3.toENode())) + check: + p1.isErr() == true + p2.isErr() == false + p3.isErr() == false + + asyncTest "Filters with encryption and signing": + var node1 = setupTestNode(rng, Waku) + var node2 = setupTestNode(rng, Waku) + node2.startListening() + waitFor node1.peerPool.connectToNode(newNode(node2.toENode())) + + let encryptKeyPair = KeyPair.random(rng[]) + let signKeyPair = KeyPair.random(rng[]) + var symKey: SymKey + let topic = [byte 0x12, 0, 0, 0] + var filters: seq[string] = @[] + var payloads = [repeat(byte 1, 10), repeat(byte 2, 10), + repeat(byte 3, 10), repeat(byte 4, 10)] + var futures = [newFuture[int](), newFuture[int](), + newFuture[int](), newFuture[int]()] + + proc handler1(msg: ReceivedMessage) = + var count {.global.}: int + check msg.decoded.payload == payloads[0] or + msg.decoded.payload == payloads[1] + count += 1 + if count == 2: futures[0].complete(1) + proc handler2(msg: ReceivedMessage) = + check msg.decoded.payload == payloads[1] + futures[1].complete(1) + proc handler3(msg: ReceivedMessage) = + var count {.global.}: int + check msg.decoded.payload == payloads[2] or + msg.decoded.payload == payloads[3] + count += 1 + if count == 2: futures[2].complete(1) + proc handler4(msg: ReceivedMessage) = + check msg.decoded.payload == payloads[3] + futures[3].complete(1) + + # Filters + # filter for encrypted asym + filters.add(node1.subscribeFilter(initFilter( + privateKey = some(encryptKeyPair.seckey), topics = @[topic]), handler1)) + # filter for encrypted asym + signed + filters.add(node1.subscribeFilter(initFilter(some(signKeyPair.pubkey), + privateKey = some(encryptKeyPair.seckey), topics = @[topic]), handler2)) + # filter for encrypted sym + filters.add(node1.subscribeFilter(initFilter(symKey = some(symKey), + topics = @[topic]), handler3)) + # filter for encrypted sym + signed + filters.add(node1.subscribeFilter(initFilter(some(signKeyPair.pubkey), + symKey = some(symKey), topics = @[topic]), handler4)) + # Messages + check: + # encrypted asym + node2.postMessage(some(encryptKeyPair.pubkey), ttl = safeTTL, + topic = topic, payload = payloads[0]) == true + # encrypted asym + signed + node2.postMessage(some(encryptKeyPair.pubkey), + src = some(signKeyPair.seckey), ttl = safeTTL, + topic = topic, payload = payloads[1]) == true + # encrypted sym + node2.postMessage(symKey = some(symKey), ttl = safeTTL, topic = topic, + payload = payloads[2]) == true + # encrypted sym + signed + node2.postMessage(symKey = some(symKey), + src = some(signKeyPair.seckey), + ttl = safeTTL, topic = topic, + payload = payloads[3]) == true + + node2.protocolState(Waku).queue.items.len == 4 + + check: + await allFutures(futures).withTimeout(waitInterval) + node1.protocolState(Waku).queue.items.len == 4 + + for filter in filters: + check node1.unsubscribeFilter(filter) == true + + asyncTest "Filters with topics": + var node1 = setupTestNode(rng, Waku) + var node2 = setupTestNode(rng, Waku) + node2.startListening() + waitFor node1.peerPool.connectToNode(newNode(node2.toENode())) + + let topic1 = [byte 0x12, 0, 0, 0] + let topic2 = [byte 0x34, 0, 0, 0] + var payloads = [repeat(byte 0, 10), repeat(byte 1, 10)] + var futures = [newFuture[int](), newFuture[int]()] + proc handler1(msg: ReceivedMessage) = + check msg.decoded.payload == payloads[0] + futures[0].complete(1) + proc handler2(msg: ReceivedMessage) = + check msg.decoded.payload == payloads[1] + futures[1].complete(1) + + var filter1 = node1.subscribeFilter(initFilter(topics = @[topic1]), handler1) + var filter2 = node1.subscribeFilter(initFilter(topics = @[topic2]), handler2) + + check: + node2.postMessage(ttl = safeTTL + 1, topic = topic1, + payload = payloads[0]) == true + node2.postMessage(ttl = safeTTL, topic = topic2, + payload = payloads[1]) == true + node2.protocolState(Waku).queue.items.len == 2 + + await allFutures(futures).withTimeout(waitInterval) + node1.protocolState(Waku).queue.items.len == 2 + + node1.unsubscribeFilter(filter1) == true + node1.unsubscribeFilter(filter2) == true + + asyncTest "Filters with PoW": + var node1 = setupTestNode(rng, Waku) + var node2 = setupTestNode(rng, Waku) + node2.startListening() + waitFor node1.peerPool.connectToNode(newNode(node2.toENode())) + + let topic = [byte 0x12, 0, 0, 0] + var payload = repeat(byte 0, 10) + var futures = [newFuture[int](), newFuture[int]()] + proc handler1(msg: ReceivedMessage) = + check msg.decoded.payload == payload + futures[0].complete(1) + proc handler2(msg: ReceivedMessage) = + check msg.decoded.payload == payload + futures[1].complete(1) + + var filter1 = node1.subscribeFilter( + initFilter(topics = @[topic], powReq = 0), handler1) + var filter2 = node1.subscribeFilter( + initFilter(topics = @[topic], powReq = 1_000_000), handler2) + + check: + node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true + + (await futures[0].withTimeout(waitInterval)) == true + (await futures[1].withTimeout(waitInterval)) == false + node1.protocolState(Waku).queue.items.len == 1 + + node1.unsubscribeFilter(filter1) == true + node1.unsubscribeFilter(filter2) == true + + asyncTest "Filters with queues": + var node1 = setupTestNode(rng, Waku) + var node2 = setupTestNode(rng, Waku) + node2.startListening() + waitFor node1.peerPool.connectToNode(newNode(node2.toENode())) + + let topic = [byte 0, 0, 0, 0] + let payload = repeat(byte 0, 10) + + var filter = node1.subscribeFilter(initFilter(topics = @[topic])) + for i in countdown(10, 1): + check node2.postMessage(ttl = safeTTL, topic = topic, + payload = payload) == true + + await sleepAsync(waitInterval) + check: + node1.getFilterMessages(filter).len() == 10 + node1.getFilterMessages(filter).len() == 0 + node1.unsubscribeFilter(filter) == true + + asyncTest "Local filter notify": + var node1 = setupTestNode(rng, Waku) + var node2 = setupTestNode(rng, Waku) + node2.startListening() + waitFor node1.peerPool.connectToNode(newNode(node2.toENode())) + + let topic = [byte 0, 0, 0, 0] + + var filter = node1.subscribeFilter(initFilter(topics = @[topic])) + check: + node1.postMessage(ttl = safeTTL, topic = topic, + payload = repeat(byte 4, 10)) == true + node1.getFilterMessages(filter).len() == 1 + node1.unsubscribeFilter(filter) == true + + asyncTest "Bloomfilter blocking": + var node1 = setupTestNode(rng, Waku) + var node2 = setupTestNode(rng, Waku) + node2.startListening() + waitFor node1.peerPool.connectToNode(newNode(node2.toENode())) + + let sendTopic1 = [byte 0x12, 0, 0, 0] + let sendTopic2 = [byte 0x34, 0, 0, 0] + let filterTopics = @[[byte 0x34, 0, 0, 0],[byte 0x56, 0, 0, 0]] + let payload = repeat(byte 0, 10) + var f: Future[int] = newFuture[int]() + proc handler(msg: ReceivedMessage) = + check msg.decoded.payload == payload + f.complete(1) + var filter = node1.subscribeFilter( + initFilter(topics = filterTopics), handler) + await node1.setBloomFilter(node1.filtersToBloom()) + + check: + node2.postMessage(ttl = safeTTL, topic = sendTopic1, + payload = payload) == true + node2.protocolState(Waku).queue.items.len == 1 + + (await f.withTimeout(waitInterval)) == false + node1.protocolState(Waku).queue.items.len == 0 + + resetMessageQueues(node1, node2) + + f = newFuture[int]() + + check: + node2.postMessage(ttl = safeTTL, topic = sendTopic2, + payload = payload) == true + node2.protocolState(Waku).queue.items.len == 1 + + await f.withTimeout(waitInterval) + f.read() == 1 + node1.protocolState(Waku).queue.items.len == 1 + + node1.unsubscribeFilter(filter) == true + + await node1.setBloomFilter(fullBloom()) + + asyncTest "PoW blocking": + var node1 = setupTestNode(rng, Waku) + var node2 = setupTestNode(rng, Waku) + node2.startListening() + waitFor node1.peerPool.connectToNode(newNode(node2.toENode())) + + let topic = [byte 0, 0, 0, 0] + let payload = repeat(byte 0, 10) + + await node1.setPowRequirement(1_000_000) + check: + node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true + node2.protocolState(Waku).queue.items.len == 1 + await sleepAsync(waitInterval) + check: + node1.protocolState(Waku).queue.items.len == 0 + + resetMessageQueues(node1, node2) + + await node1.setPowRequirement(0.0) + check: + node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true + node2.protocolState(Waku).queue.items.len == 1 + await sleepAsync(waitInterval) + check: + node1.protocolState(Waku).queue.items.len == 1 + + asyncTest "Queue pruning": + var node1 = setupTestNode(rng, Waku) + var node2 = setupTestNode(rng, Waku) + node2.startListening() + waitFor node1.peerPool.connectToNode(newNode(node2.toENode())) + + let topic = [byte 0, 0, 0, 0] + let payload = repeat(byte 0, 10) + # We need a minimum TTL of 2 as when set to 1 there is a small chance that + # it is already expired after messageInterval due to rounding down of float + # to uint32 in postMessage() + let lowerTTL = 2'u32 # Lower TTL as we need to wait for messages to expire + for i in countdown(10, 1): + check node2.postMessage(ttl = lowerTTL, topic = topic, payload = payload) + check node2.protocolState(Waku).queue.items.len == 10 + + await sleepAsync(waitInterval) + check node1.protocolState(Waku).queue.items.len == 10 + + await sleepAsync(milliseconds((lowerTTL+1)*1000)) + check node1.protocolState(Waku).queue.items.len == 0 + check node2.protocolState(Waku).queue.items.len == 0 + + asyncTest "P2P post": + var node1 = setupTestNode(rng, Waku) + var node2 = setupTestNode(rng, Waku) + node2.startListening() + waitFor node1.peerPool.connectToNode(newNode(node2.toENode())) + + let topic = [byte 0, 0, 0, 0] + var f: Future[int] = newFuture[int]() + proc handler(msg: ReceivedMessage) = + check msg.decoded.payload == repeat(byte 4, 10) + f.complete(1) + + var filter = node1.subscribeFilter(initFilter(topics = @[topic], + allowP2P = true), handler) + # Need to be sure that node1 is added in the peerpool of node2 as + # postMessage with target will iterate over the peers + require await eventually(conditionTimeoutMs, + proc(): bool = node2.peerPool.len == 1) + check: + node1.setPeerTrusted(toNodeId(node2.keys.pubkey)) == true + node2.postMessage(ttl = 10, topic = topic, + payload = repeat(byte 4, 10), + targetPeer = some(toNodeId(node1.keys.pubkey))) == true + + await f.withTimeout(waitInterval) + f.read() == 1 + node1.protocolState(Waku).queue.items.len == 0 + node2.protocolState(Waku).queue.items.len == 0 + + node1.unsubscribeFilter(filter) == true + + asyncTest "Light node posting": + var ln = setupTestNode(rng, Waku) + await ln.setLightNode(true) + var fn = setupTestNode(rng, Waku) + fn.startListening() + await ln.peerPool.connectToNode(newNode(fn.toENode())) + + let topic = [byte 0, 0, 0, 0] + + check: + ln.peerPool.connectedNodes.len() == 1 + # normal post + ln.postMessage(ttl = safeTTL, topic = topic, + payload = repeat(byte 0, 10)) == true + ln.protocolState(Waku).queue.items.len == 1 + # TODO: add test on message relaying + + asyncTest "Connect two light nodes": + var ln1 = setupTestNode(rng, Waku) + var ln2 = setupTestNode(rng, Waku) + + await ln1.setLightNode(true) + await ln2.setLightNode(true) + + ln2.startListening() + let peer = await ln1.rlpxConnect(newNode(ln2.toENode())) + check peer.isErr() == true + + asyncTest "Waku set-topic-interest": + var + wakuTopicNode = setupTestNode(rng, Waku) + wakuNode = setupTestNode(rng, Waku) + + let + topic1 = [byte 0xDA, 0xDA, 0xDA, 0xAA] + topic2 = [byte 0xD0, 0xD0, 0xD0, 0x00] + wrongTopic = [byte 0x4B, 0x1D, 0x4B, 0x1D] + + # Set one topic so we are not considered a full node + wakuTopicNode.protocolState(Waku).config.topics = some(@[topic1]) + + wakuNode.startListening() + await wakuTopicNode.peerPool.connectToNode(newNode(wakuNode.toENode())) + + # Update topic interest + check: + await setTopicInterest(wakuTopicNode, @[topic1, topic2]) + + let payload = repeat(byte 0, 10) + check: + wakuNode.postMessage(ttl = safeTTL, topic = topic1, payload = payload) + wakuNode.postMessage(ttl = safeTTL, topic = topic2, payload = payload) + wakuNode.postMessage(ttl = safeTTL, topic = wrongTopic, payload = payload) + wakuNode.protocolState(Waku).queue.items.len == 3 + await sleepAsync(waitInterval) + check: + wakuTopicNode.protocolState(Waku).queue.items.len == 2 + + asyncTest "Waku set-minimum-pow": + var + wakuPowNode = setupTestNode(rng, Waku) + wakuNode = setupTestNode(rng, Waku) + + wakuNode.startListening() + await wakuPowNode.peerPool.connectToNode(newNode(wakuNode.toENode())) + + # Update minimum pow + await setPowRequirement(wakuPowNode, 1.0) + await sleepAsync(waitInterval) + + check: + wakuNode.peerPool.len == 1 + + # check powRequirement is updated + for peer in wakuNode.peerPool.peers: + check: + peer.state(Waku).powRequirement == 1.0 + + asyncTest "Waku set-light-node": + var + wakuLightNode = setupTestNode(rng, Waku) + wakuNode = setupTestNode(rng, Waku) + + wakuNode.startListening() + await wakuLightNode.peerPool.connectToNode(newNode(wakuNode.toENode())) + + # Update minimum pow + await setLightNode(wakuLightNode, true) + await sleepAsync(waitInterval) + + check: + wakuNode.peerPool.len == 1 + + # check lightNode is updated + for peer in wakuNode.peerPool.peers: + check: + peer.state(Waku).isLightNode + + asyncTest "Waku set-bloom-filter": + var + wakuBloomNode = setupTestNode(rng, Waku) + wakuNode = setupTestNode(rng, Waku) + bloom = fullBloom() + topics = @[[byte 0xDA, 0xDA, 0xDA, 0xAA]] + + # Set topic interest + discard await wakuBloomNode.setTopicInterest(topics) + + wakuBloomNode.startListening() + await wakuNode.peerPool.connectToNode(newNode(wakuBloomNode.toENode())) + + # Sanity check + check: + wakuNode.peerPool.len == 1 + + # check bloom filter is updated + for peer in wakuNode.peerPool.peers: + check: + peer.state(Waku).bloom == bloom + peer.state(Waku).topics == some(topics) + + let hasBloomNodeConnectedCondition = proc(): bool = + wakuBloomNode.peerPool.len == 1 + # wait for the peer to be connected on the other side + let hasBloomNodeConnected = + await eventually(conditionTimeoutMs, hasBloomNodeConnectedCondition) + # check bloom filter is updated + check: + hasBloomNodeConnected + + # disable one bit in the bloom filter + bloom[0] = 0x0 + + # and set it + await setBloomFilter(wakuBloomNode, bloom) + + let bloomFilterUpdatedCondition = proc(): bool = + for peer in wakuNode.peerPool.peers: + return peer.state(Waku).bloom == bloom and + peer.state(Waku).topics == none(seq[waku_protocol.Topic]) + + let bloomFilterUpdated = + await eventually(conditionTimeoutMs, bloomFilterUpdatedCondition) + # check bloom filter is updated + check: + bloomFilterUpdated + + asyncTest "Waku topic-interest": + var + wakuTopicNode = setupTestNode(rng, Waku) + wakuNode = setupTestNode(rng, Waku) + + let + topic1 = [byte 0xDA, 0xDA, 0xDA, 0xAA] + topic2 = [byte 0xD0, 0xD0, 0xD0, 0x00] + wrongTopic = [byte 0x4B, 0x1D, 0x4B, 0x1D] + + wakuTopicNode.protocolState(Waku).config.topics = some(@[topic1, topic2]) + + wakuNode.startListening() + await wakuTopicNode.peerPool.connectToNode(newNode(wakuNode.toENode())) + + let payload = repeat(byte 0, 10) + check: + wakuNode.postMessage(ttl = safeTTL, topic = topic1, payload = payload) + wakuNode.postMessage(ttl = safeTTL, topic = topic2, payload = payload) + wakuNode.postMessage(ttl = safeTTL, topic = wrongTopic, payload = payload) + wakuNode.protocolState(Waku).queue.items.len == 3 + + await eventually(conditionTimeoutMs, + proc (): bool = wakuTopicNode.protocolState(Waku).queue.items.len == 2) + + asyncTest "Waku topic-interest versus bloom filter": + var + wakuTopicNode = setupTestNode(rng, Waku) + wakuNode = setupTestNode(rng, Waku) + + let + topic1 = [byte 0xDA, 0xDA, 0xDA, 0xAA] + topic2 = [byte 0xD0, 0xD0, 0xD0, 0x00] + bloomTopic = [byte 0x4B, 0x1D, 0x4B, 0x1D] + + # It was checked that the topics don't trigger false positives on the bloom. + wakuTopicNode.protocolState(Waku).config.topics = some(@[topic1, topic2]) + wakuTopicNode.protocolState(Waku).config.bloom = some(toBloom([bloomTopic])) + + wakuNode.startListening() + await wakuTopicNode.peerPool.connectToNode(newNode(wakuNode.toENode())) + + let payload = repeat(byte 0, 10) + check: + wakuNode.postMessage(ttl = safeTTL, topic = topic1, payload = payload) + wakuNode.postMessage(ttl = safeTTL, topic = topic2, payload = payload) + wakuNode.postMessage(ttl = safeTTL, topic = bloomTopic, payload = payload) + wakuNode.protocolState(Waku).queue.items.len == 3 + await sleepAsync(waitInterval) + check: + wakuTopicNode.protocolState(Waku).queue.items.len == 2 diff --git a/tests/waku/test_waku_mail.nim b/tests/waku/test_waku_mail.nim new file mode 100644 index 0000000..3119ba2 --- /dev/null +++ b/tests/waku/test_waku_mail.nim @@ -0,0 +1,119 @@ +{.used.} + +import + std/[tables, sequtils, times], + chronos, testutils/unittests, eth/[p2p, async_utils], eth/p2p/peer_pool, + ../../waku/v1/protocol/[waku_protocol, waku_mail], + ../test_helpers + +const + transmissionTimeout = chronos.milliseconds(100) + +proc waitForConnected(node: EthereumNode) {.async.} = + while node.peerPool.connectedNodes.len == 0: + await sleepAsync(chronos.milliseconds(1)) + +procSuite "Waku Mail Client": + let rng = newRng() + var client = setupTestNode(rng, Waku) + var simpleServer = setupTestNode(rng, Waku) + + simpleServer.startListening() + let simpleServerNode = newNode(simpleServer.toENode()) + let clientNode = newNode(client.toENode()) + waitFor client.peerPool.connectToNode(simpleServerNode) + require: + waitFor simpleServer.waitForConnected().withTimeout(transmissionTimeout) + + asyncTest "Two peers connected": + check: + client.peerPool.connectedNodes.len() == 1 + simpleServer.peerPool.connectedNodes.len() == 1 + + asyncTest "Mail Request and Request Complete": + let + topic = [byte 0, 0, 0, 0] + bloom = toBloom(@[topic]) + lower = 0'u32 + upper = epochTime().uint32 + limit = 100'u32 + request = MailRequest(lower: lower, upper: upper, bloom: @bloom, + limit: limit) + + var symKey: SymKey + check client.setPeerTrusted(simpleServerNode.id) + var cursorFut = client.requestMail(simpleServerNode.id, request, symKey, 1) + + # Simple mailserver part + let peer = simpleServer.peerPool.connectedNodes[clientNode] + var f: Future[Waku.p2pRequest] = peer.nextMsg(Waku.p2pRequest) + require await f.withTimeout(transmissionTimeout) + let response = f.read() + let decoded = decode(response.envelope.data, symKey = some(symKey)) + require decoded.isSome() + + var rlp = rlpFromBytes(decoded.get().payload) + let output = rlp.read(MailRequest) + check: + output.lower == lower + output.upper == upper + output.bloom == bloom + output.limit == limit + + var dummy: Hash + await peer.p2pRequestComplete(dummy, dummy, @[]) + + check await cursorFut.withTimeout(transmissionTimeout) + + asyncTest "Mail Send": + let topic = [byte 0x12, 0x34, 0x56, 0x78] + let payload = repeat(byte 0, 10) + var f = newFuture[int]() + + proc handler(msg: ReceivedMessage) = + check msg.decoded.payload == payload + f.complete(1) + + let filter = subscribeFilter(client, + initFilter(topics = @[topic], allowP2P = true), handler) + + check: + client.setPeerTrusted(simpleServerNode.id) + # ttl 0 to show that ttl should be ignored + # TODO: perhaps not the best way to test this, means no PoW calculation + # may be done, and not sure if that is OK? + simpleServer.postMessage(ttl = 0, topic = topic, payload = payload, + targetPeer = some(clientNode.id)) + + await f.withTimeout(transmissionTimeout) + + client.unsubscribeFilter(filter) + + asyncTest "Multiple Client Request and Complete": + var count = 5 + proc customHandler(peer: Peer, envelope: Envelope)= + var envelopes: seq[Envelope] + traceAsyncErrors peer.p2pMessage(envelopes) + + var cursor: seq[byte] + count = count - 1 + if count == 0: + cursor = @[] + else: + cursor = @[byte count] + + var dummy: Hash + traceAsyncErrors peer.p2pRequestComplete(dummy, dummy, cursor) + + simpleServer.registerP2PRequestHandler(customHandler) + check client.setPeerTrusted(simpleServerNode.id) + var request: MailRequest + var symKey: SymKey + let cursor = + await client.requestMail(simpleServerNode.id, request, symKey, 5) + require cursor.isSome() + check: + cursor.get().len == 0 + count == 0 + + # TODO: Also check for received envelopes. diff --git a/tests/whisper/test_shh.nim b/tests/whisper/test_shh.nim new file mode 100644 index 0000000..0b70d3d --- /dev/null +++ b/tests/whisper/test_shh.nim @@ -0,0 +1,382 @@ +# +# Ethereum P2P +# (c) Copyright 2018-2021 +# Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) + +{.used.} + +import + std/[sequtils, options, unittest, tables], + nimcrypto/hash, + eth/[keys, rlp], + ../../waku/whisper/whisper_types as whisper + +let rng = newRng() + +suite "Whisper payload": + test "should roundtrip without keys": + let payload = Payload(payload: @[byte 0, 1, 2]) + let encoded = whisper.encode(rng[], payload) + + let decoded = whisper.decode(encoded.get()) + check: + decoded.isSome() + payload.payload == decoded.get().payload + decoded.get().src.isNone() + decoded.get().padding.get().len == 251 # 256 -1 -1 -3 + + test "should roundtrip with symmetric encryption": + var symKey: SymKey + let payload = Payload(symKey: some(symKey), payload: @[byte 0, 1, 2]) + let encoded = whisper.encode(rng[], payload) + + let decoded = whisper.decode(encoded.get(), symKey = some(symKey)) + check: + decoded.isSome() + payload.payload == decoded.get().payload + decoded.get().src.isNone() + decoded.get().padding.get().len == 251 # 256 -1 -1 -3 + + test "should roundtrip with signature": + let privKey = PrivateKey.random(rng[]) + + let payload = Payload(src: some(privKey), payload: @[byte 0, 1, 2]) + let encoded = whisper.encode(rng[], payload) + + let decoded = whisper.decode(encoded.get()) + check: + decoded.isSome() + payload.payload == decoded.get().payload + privKey.toPublicKey() == decoded.get().src.get() + decoded.get().padding.get().len == 186 # 256 -1 -1 -3 -65 + + test "should roundtrip with asymmetric encryption": + let privKey = PrivateKey.random(rng[]) + + let payload = Payload(dst: some(privKey.toPublicKey()), + payload: @[byte 0, 1, 2]) + let encoded = whisper.encode(rng[], payload) + + let decoded = whisper.decode(encoded.get(), dst = some(privKey)) + check: + decoded.isSome() + payload.payload == decoded.get().payload + decoded.get().src.isNone() + decoded.get().padding.get().len == 251 # 256 -1 -1 -3 + + test "should return specified bloom": + # Geth test: https://github.com/ethersphere/go-ethereum/blob/d3441ebb563439bac0837d70591f92e2c6080303/whisper/whisperv6/whisper_test.go#L834 + let top0 = [byte 0, 0, 255, 6] + var x: Bloom + x[0] = byte 1 + x[32] = byte 1 + x[^1] = byte 128 + check @(top0.topicBloom) == @x + +suite "Whisper payload padding": + test "should do max padding": + let payload = Payload(payload: repeat(byte 1, 254)) + let encoded = whisper.encode(rng[], payload) + + let decoded = whisper.decode(encoded.get()) + check: + decoded.isSome() + payload.payload == decoded.get().payload + decoded.get().padding.isSome() + decoded.get().padding.get().len == 256 # as dataLen == 256 + + test "should do max padding with signature": + let privKey = PrivateKey.random(rng[]) + + let payload = Payload(src: some(privKey), payload: repeat(byte 1, 189)) + let encoded = whisper.encode(rng[], payload) + + let decoded = whisper.decode(encoded.get()) + check: + decoded.isSome() + payload.payload == decoded.get().payload + privKey.toPublicKey() == decoded.get().src.get() + decoded.get().padding.isSome() + decoded.get().padding.get().len == 256 # as dataLen == 256 + + test "should do min padding": + let payload = Payload(payload: repeat(byte 1, 253)) + let encoded = whisper.encode(rng[], payload) + + let decoded = whisper.decode(encoded.get()) + check: + decoded.isSome() + payload.payload == decoded.get().payload + decoded.get().padding.isSome() + decoded.get().padding.get().len == 1 # as dataLen == 255 + + test "should do min padding with signature": + let privKey = PrivateKey.random(rng[]) + + let payload = Payload(src: some(privKey), payload: repeat(byte 1, 188)) + let encoded = whisper.encode(rng[], payload) + + let decoded = whisper.decode(encoded.get()) + check: + decoded.isSome() + payload.payload == decoded.get().payload + privKey.toPublicKey() == decoded.get().src.get() + decoded.get().padding.isSome() + decoded.get().padding.get().len == 1 # as dataLen == 255 + + test "should roundtrip custom padding": + let payload = Payload(payload: repeat(byte 1, 10), + padding: some(repeat(byte 2, 100))) + let encoded = whisper.encode(rng[], payload) + + let decoded = whisper.decode(encoded.get()) + check: + decoded.isSome() + payload.payload == decoded.get().payload + decoded.get().padding.isSome() + payload.padding.get() == decoded.get().padding.get() + + test "should roundtrip custom 0 padding": + let padding: seq[byte] = @[] + let payload = Payload(payload: repeat(byte 1, 10), + padding: some(padding)) + let encoded = whisper.encode(rng[], payload) + + let decoded = whisper.decode(encoded.get()) + check: + decoded.isSome() + payload.payload == decoded.get().payload + decoded.get().padding.isNone() + + test "should roundtrip custom padding with signature": + let privKey = PrivateKey.random(rng[]) + let payload = Payload(src: some(privKey), payload: repeat(byte 1, 10), + padding: some(repeat(byte 2, 100))) + let encoded = whisper.encode(rng[], payload) + + let decoded = whisper.decode(encoded.get()) + check: + decoded.isSome() + payload.payload == decoded.get().payload + privKey.toPublicKey() == decoded.get().src.get() + decoded.get().padding.isSome() + payload.padding.get() == decoded.get().padding.get() + + test "should roundtrip custom 0 padding with signature": + let padding: seq[byte] = @[] + let privKey = PrivateKey.random(rng[]) + let payload = Payload(src: some(privKey), payload: repeat(byte 1, 10), + padding: some(padding)) + let encoded = whisper.encode(rng[], payload) + + let decoded = whisper.decode(encoded.get()) + check: + decoded.isSome() + payload.payload == decoded.get().payload + privKey.toPublicKey() == decoded.get().src.get() + decoded.get().padding.isNone() + +# example from https://github.com/paritytech/parity-ethereum/blob/93e1040d07e385d1219d00af71c46c720b0a1acf/whisper/src/message.rs#L439 +let + env0 = Envelope( + expiry:100000, ttl: 30, topic: [byte 0, 0, 0, 0], + data: repeat(byte 9, 256), nonce: 1010101) + env1 = Envelope( + expiry:100000, ttl: 30, topic: [byte 0, 0, 0, 0], + data: repeat(byte 9, 256), nonce: 1010102) + env2 = Envelope( + expiry:100000, ttl: 30, topic: [byte 0, 0, 0, 0], + data: repeat(byte 9, 256), nonce: 1010103) + +suite "Whisper envelope": + + proc hashAndPow(env: Envelope): (string, float64) = + # This is the current implementation of go-ethereum + let size = env.toShortRlp().len().uint32 + # This is our current implementation in `whisper_protocol.nim` + # let size = env.len().uint32 + # This is the EIP-627 specification + # let size = env.toRlp().len().uint32 + let hash = env.calcPowHash() + ($hash, calcPow(size, env.ttl, hash)) + + test "PoW calculation leading zeroes tests": + # Test values from Parity, in message.rs + let testHashes = [ + # 256 leading zeroes + "0x0000000000000000000000000000000000000000000000000000000000000000", + # 255 leading zeroes + "0x0000000000000000000000000000000000000000000000000000000000000001", + # no leading zeroes + "0xff00000000000000000000000000000000000000000000000000000000000000" + ] + check: + calcPow(1, 1, Hash.fromHex(testHashes[0])) == + 115792089237316200000000000000000000000000000000000000000000000000000000000000.0 + calcPow(1, 1, Hash.fromHex(testHashes[1])) == + 57896044618658100000000000000000000000000000000000000000000000000000000000000.0 + calcPow(1, 1, Hash.fromHex(testHashes[2])) == 1.0 + + # Test values from go-ethereum whisperv6 in envelope_test + var env = Envelope(ttl: 1, data: @[byte 0xde, 0xad, 0xbe, 0xef]) + # PoW calculation with no leading zeroes + env.nonce = 100000 + check hashAndPoW(env) == ("A788E02A95BFC673709E97CA81E39CA903BAD5638D3388964C51EB64952172D6", + 0.07692307692307693) + # PoW calculation with 8 leading zeroes + env.nonce = 276 + check hashAndPoW(env) == ("00E2374C6353C243E4073E209A7F2ACB2506522AF318B3B78CF9A88310A2A11C", + 19.692307692307693) + +suite "Whisper queue": + test "should throw out lower proof-of-work item when full": + var queue = initQueue(1) + + let msg0 = initMessage(env0) + let msg1 = initMessage(env1) + + discard queue.add(msg0) + discard queue.add(msg1) + + check: + queue.items.len() == 1 + queue.items[0].env.nonce == + (if msg0.pow > msg1.pow: msg0.env.nonce else: msg1.env.nonce) + + test "should not throw out messages as long as there is capacity": + var queue = initQueue(2) + + check: + queue.add(initMessage(env0)) == true + queue.add(initMessage(env1)) == true + + queue.items.len() == 2 + + test "check if order of queue is by decreasing PoW": + var queue = initQueue(3) + + let msg0 = initMessage(env0) + let msg1 = initMessage(env1) + let msg2 = initMessage(env2) + + discard queue.add(msg0) + discard queue.add(msg1) + discard queue.add(msg2) + + check: + queue.items.len() == 3 + queue.items[0].pow > queue.items[1].pow and + queue.items[1].pow > queue.items[2].pow + + test "check field order against expected rlp order": + check rlp.encode(env0) == + rlp.encodeList(env0.expiry, env0.ttl, env0.topic, env0.data, env0.nonce) + +# To test filters we do not care if the msg is valid or allowed +proc prepFilterTestMsg(pubKey = none[PublicKey](), symKey = none[SymKey](), + src = none[PrivateKey](), topic: Topic, + padding = none[seq[byte]]()): Message = + let payload = Payload(dst: pubKey, symKey: symKey, src: src, + payload: @[byte 0, 1, 2], padding: padding) + let encoded = whisper.encode(rng[], payload) + let env = Envelope(expiry: 1, ttl: 1, topic: topic, data: encoded.get(), + nonce: 0) + result = initMessage(env) + +suite "Whisper filter": + test "should notify filter on message with symmetric encryption": + var symKey: SymKey + let topic = [byte 0, 0, 0, 0] + let msg = prepFilterTestMsg(symKey = some(symKey), topic = topic) + + var filters = initTable[string, Filter]() + let filter = initFilter(symKey = some(symKey), topics = @[topic]) + let filterId = subscribeFilter(rng[], filters, filter) + + notify(filters, msg) + + let messages = filters.getFilterMessages(filterId) + check: + messages.len == 1 + messages[0].decoded.src.isNone() + messages[0].dst.isNone() + + test "should notify filter on message with asymmetric encryption": + let privKey = PrivateKey.random(rng[]) + let topic = [byte 0, 0, 0, 0] + let msg = prepFilterTestMsg(pubKey = some(privKey.toPublicKey()), + topic = topic) + + var filters = initTable[string, Filter]() + let filter = initFilter(privateKey = some(privKey), topics = @[topic]) + let filterId = subscribeFilter(rng[], filters, filter) + + notify(filters, msg) + + let messages = filters.getFilterMessages(filterId) + check: + messages.len == 1 + messages[0].decoded.src.isNone() + messages[0].dst.isSome() + + test "should notify filter on message with signature": + let privKey = PrivateKey.random(rng[]) + let topic = [byte 0, 0, 0, 0] + let msg = prepFilterTestMsg(src = some(privKey), topic = topic) + + var filters = initTable[string, Filter]() + let filter = initFilter(src = some(privKey.toPublicKey()), + topics = @[topic]) + let filterId = subscribeFilter(rng[], filters, filter) + + notify(filters, msg) + + let messages = filters.getFilterMessages(filterId) + check: + messages.len == 1 + messages[0].decoded.src.isSome() + messages[0].dst.isNone() + + test "test notify of filter against PoW requirement": + let topic = [byte 0, 0, 0, 0] + let padding = some(repeat(byte 0, 251)) + # this message has a PoW of 0.02962962962962963, number should be updated + # in case PoW algorithm changes or contents of padding, payload, topic, etc. + # update: now with NON rlp encoded envelope size the PoW of this message is + # 0.014492753623188406 + let msg = prepFilterTestMsg(topic = topic, padding = padding) + + var filters = initTable[string, Filter]() + let + filterId1 = subscribeFilter(rng[], filters, + initFilter(topics = @[topic], powReq = 0.014492753623188406)) + filterId2 = subscribeFilter(rng[], filters, + initFilter(topics = @[topic], powReq = 0.014492753623188407)) + + notify(filters, msg) + + check: + filters.getFilterMessages(filterId1).len == 1 + filters.getFilterMessages(filterId2).len == 0 + + test "test notify of filter on message with certain topic": + let + topic1 = [byte 0xAB, 0x12, 0xCD, 0x34] + topic2 = [byte 0, 0, 0, 0] + + let msg = prepFilterTestMsg(topic = topic1) + + var filters = initTable[string, Filter]() + let + filterId1 = subscribeFilter(rng[], filters, initFilter(topics = @[topic1])) + filterId2 = subscribeFilter(rng[], filters, initFilter(topics = @[topic2])) + + notify(filters, msg) + + check: + filters.getFilterMessages(filterId1).len == 1 + filters.getFilterMessages(filterId2).len == 0 diff --git a/tests/whisper/test_shh_config.nim b/tests/whisper/test_shh_config.nim new file mode 100644 index 0000000..ce470ba --- /dev/null +++ b/tests/whisper/test_shh_config.nim @@ -0,0 +1,71 @@ +# +# Ethereum P2P +# (c) Copyright 2018-2021 +# Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) + +{.used.} + +import + std/[sequtils, unittest, times], + ../../waku/whisper/whisper_protocol as whisper + +suite "Whisper envelope validation": + test "should validate and allow envelope according to config": + let ttl = 1'u32 + let topic = [byte 1, 2, 3, 4] + let config = WhisperConfig(powRequirement: 0, bloom: topic.topicBloom(), + isLightNode: false, maxMsgSize: defaultMaxMsgSize) + + let env = Envelope(expiry:epochTime().uint32 + ttl, ttl: ttl, topic: topic, + data: repeat(byte 9, 256), nonce: 0) + check env.valid() + + let msg = initMessage(env) + check msg.allowed(config) + + test "should invalidate envelope due to ttl 0": + let ttl = 0'u32 + let topic = [byte 1, 2, 3, 4] + let config = WhisperConfig(powRequirement: 0, bloom: topic.topicBloom(), + isLightNode: false, maxMsgSize: defaultMaxMsgSize) + + let env = Envelope(expiry:epochTime().uint32 + ttl, ttl: ttl, topic: topic, + data: repeat(byte 9, 256), nonce: 0) + check env.valid() == false + + test "should invalidate envelope due to expired": + let ttl = 1'u32 + let topic = [byte 1, 2, 3, 4] + let config = WhisperConfig(powRequirement: 0, bloom: topic.topicBloom(), + isLightNode: false, maxMsgSize: defaultMaxMsgSize) + + let env = Envelope(expiry:epochTime().uint32, ttl: ttl, topic: topic, + data: repeat(byte 9, 256), nonce: 0) + check env.valid() == false + + test "should invalidate envelope due to in the future": + let ttl = 1'u32 + let topic = [byte 1, 2, 3, 4] + let config = WhisperConfig(powRequirement: 0, bloom: topic.topicBloom(), + isLightNode: false, maxMsgSize: defaultMaxMsgSize) + + # there is currently a 2 second tolerance, hence the + 3 + let env = Envelope(expiry:epochTime().uint32 + ttl + 3, ttl: ttl, topic: topic, + data: repeat(byte 9, 256), nonce: 0) + check env.valid() == false + + test "should not allow envelope due to bloom filter": + let topic = [byte 1, 2, 3, 4] + let wrongTopic = [byte 9, 8, 7, 6] + let config = WhisperConfig(powRequirement: 0, bloom: wrongTopic.topicBloom(), + isLightNode: false, maxMsgSize: defaultMaxMsgSize) + + let env = Envelope(expiry:100000 , ttl: 30, topic: topic, + data: repeat(byte 9, 256), nonce: 0) + + let msg = initMessage(env) + check msg.allowed(config) == false diff --git a/tests/whisper/test_shh_connect.nim b/tests/whisper/test_shh_connect.nim new file mode 100644 index 0000000..1f5161d --- /dev/null +++ b/tests/whisper/test_shh_connect.nim @@ -0,0 +1,329 @@ +# +# Ethereum P2P +# (c) Copyright 2018-2021 +# Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) + +{.used.} + +import + std/[sequtils, options, tables], + chronos, testutils/unittests, bearssl, + eth/[keys, p2p], eth/p2p/peer_pool, + ../../waku/whisper/whisper_protocol as whisper, + ../test_helpers + +proc resetMessageQueues(nodes: varargs[EthereumNode]) = + for node in nodes: + node.resetMessageQueue() + +let safeTTL = 5'u32 +let waitInterval = messageInterval + 150.milliseconds + +procSuite "Whisper connections": + let rng = newRng() + var node1 = setupTestNode(rng, Whisper) + var node2 = setupTestNode(rng, Whisper) + node2.startListening() + waitFor node1.peerPool.connectToNode(newNode(node2.toENode())) + asyncTest "Two peers connected": + check: + node1.peerPool.connectedNodes.len() == 1 + + asyncTest "Filters with encryption and signing": + let encryptKeyPair = KeyPair.random(rng[]) + let signKeyPair = KeyPair.random(rng[]) + var symKey: SymKey + let topic = [byte 0x12, 0, 0, 0] + var filters: seq[string] = @[] + var payloads = [repeat(byte 1, 10), repeat(byte 2, 10), + repeat(byte 3, 10), repeat(byte 4, 10)] + var futures = [newFuture[int](), newFuture[int](), + newFuture[int](), newFuture[int]()] + + proc handler1(msg: ReceivedMessage) = + var count {.global.}: int + check msg.decoded.payload == payloads[0] or msg.decoded.payload == payloads[1] + count += 1 + if count == 2: futures[0].complete(1) + proc handler2(msg: ReceivedMessage) = + check msg.decoded.payload == payloads[1] + futures[1].complete(1) + proc handler3(msg: ReceivedMessage) = + var count {.global.}: int + check msg.decoded.payload == payloads[2] or msg.decoded.payload == payloads[3] + count += 1 + if count == 2: futures[2].complete(1) + proc handler4(msg: ReceivedMessage) = + check msg.decoded.payload == payloads[3] + futures[3].complete(1) + + # Filters + # filter for encrypted asym + filters.add(node1.subscribeFilter(initFilter(privateKey = some(encryptKeyPair.seckey), + topics = @[topic]), handler1)) + # filter for encrypted asym + signed + filters.add(node1.subscribeFilter(initFilter(some(signKeyPair.pubkey), + privateKey = some(encryptKeyPair.seckey), + topics = @[topic]), handler2)) + # filter for encrypted sym + filters.add(node1.subscribeFilter(initFilter(symKey = some(symKey), + topics = @[topic]), handler3)) + # filter for encrypted sym + signed + filters.add(node1.subscribeFilter(initFilter(some(signKeyPair.pubkey), + symKey = some(symKey), + topics = @[topic]), handler4)) + # Messages + check: + # encrypted asym + node2.postMessage(some(encryptKeyPair.pubkey), ttl = safeTTL, + topic = topic, payload = payloads[0]) == true + # encrypted asym + signed + node2.postMessage(some(encryptKeyPair.pubkey), + src = some(signKeyPair.seckey), ttl = safeTTL, + topic = topic, payload = payloads[1]) == true + # encrypted sym + node2.postMessage(symKey = some(symKey), ttl = safeTTL, topic = topic, + payload = payloads[2]) == true + # encrypted sym + signed + node2.postMessage(symKey = some(symKey), + src = some(signKeyPair.seckey), + ttl = safeTTL, topic = topic, + payload = payloads[3]) == true + + node2.protocolState(Whisper).queue.items.len == 4 + + check: + await allFutures(futures).withTimeout(waitInterval) + node1.protocolState(Whisper).queue.items.len == 4 + + for filter in filters: + check node1.unsubscribeFilter(filter) == true + + resetMessageQueues(node1, node2) + + asyncTest "Filters with topics": + let topic1 = [byte 0x12, 0, 0, 0] + let topic2 = [byte 0x34, 0, 0, 0] + var payloads = [repeat(byte 0, 10), repeat(byte 1, 10)] + var futures = [newFuture[int](), newFuture[int]()] + proc handler1(msg: ReceivedMessage) = + check msg.decoded.payload == payloads[0] + futures[0].complete(1) + proc handler2(msg: ReceivedMessage) = + check msg.decoded.payload == payloads[1] + futures[1].complete(1) + + var filter1 = node1.subscribeFilter(initFilter(topics = @[topic1]), handler1) + var filter2 = node1.subscribeFilter(initFilter(topics = @[topic2]), handler2) + + check: + node2.postMessage(ttl = safeTTL + 1, topic = topic1, + payload = payloads[0]) == true + node2.postMessage(ttl = safeTTL, topic = topic2, + payload = payloads[1]) == true + node2.protocolState(Whisper).queue.items.len == 2 + + await allFutures(futures).withTimeout(waitInterval) + node1.protocolState(Whisper).queue.items.len == 2 + + node1.unsubscribeFilter(filter1) == true + node1.unsubscribeFilter(filter2) == true + + resetMessageQueues(node1, node2) + + asyncTest "Filters with PoW": + let topic = [byte 0x12, 0, 0, 0] + var payload = repeat(byte 0, 10) + var futures = [newFuture[int](), newFuture[int]()] + proc handler1(msg: ReceivedMessage) = + check msg.decoded.payload == payload + futures[0].complete(1) + proc handler2(msg: ReceivedMessage) = + check msg.decoded.payload == payload + futures[1].complete(1) + + var filter1 = node1.subscribeFilter(initFilter(topics = @[topic], powReq = 0), + handler1) + var filter2 = node1.subscribeFilter(initFilter(topics = @[topic], + powReq = 1_000_000), handler2) + + check: + node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true + + (await futures[0].withTimeout(waitInterval)) == true + (await futures[1].withTimeout(waitInterval)) == false + node1.protocolState(Whisper).queue.items.len == 1 + + node1.unsubscribeFilter(filter1) == true + node1.unsubscribeFilter(filter2) == true + + resetMessageQueues(node1, node2) + + asyncTest "Filters with queues": + let topic = [byte 0, 0, 0, 0] + let payload = repeat(byte 0, 10) + + var filter = node1.subscribeFilter(initFilter(topics = @[topic])) + for i in countdown(10, 1): + check node2.postMessage(ttl = safeTTL, topic = topic, + payload = payload) == true + + await sleepAsync(waitInterval) + check: + node1.getFilterMessages(filter).len() == 10 + node1.getFilterMessages(filter).len() == 0 + node1.unsubscribeFilter(filter) == true + + resetMessageQueues(node1, node2) + + asyncTest "Local filter notify": + let topic = [byte 0, 0, 0, 0] + + var filter = node1.subscribeFilter(initFilter(topics = @[topic])) + check: + node1.postMessage(ttl = safeTTL, topic = topic, + payload = repeat(byte 4, 10)) == true + node1.getFilterMessages(filter).len() == 1 + node1.unsubscribeFilter(filter) == true + + await sleepAsync(waitInterval) + resetMessageQueues(node1, node2) + + asyncTest "Bloomfilter blocking": + let sendTopic1 = [byte 0x12, 0, 0, 0] + let sendTopic2 = [byte 0x34, 0, 0, 0] + let filterTopics = @[[byte 0x34, 0, 0, 0],[byte 0x56, 0, 0, 0]] + let payload = repeat(byte 0, 10) + var f: Future[int] = newFuture[int]() + proc handler(msg: ReceivedMessage) = + check msg.decoded.payload == payload + f.complete(1) + var filter = node1.subscribeFilter(initFilter(topics = filterTopics), handler) + await node1.setBloomFilter(node1.filtersToBloom()) + + check: + node2.postMessage(ttl = safeTTL, topic = sendTopic1, + payload = payload) == true + node2.protocolState(Whisper).queue.items.len == 1 + + (await f.withTimeout(waitInterval)) == false + node1.protocolState(Whisper).queue.items.len == 0 + + resetMessageQueues(node1, node2) + + f = newFuture[int]() + + check: + node2.postMessage(ttl = safeTTL, topic = sendTopic2, + payload = payload) == true + node2.protocolState(Whisper).queue.items.len == 1 + + await f.withTimeout(waitInterval) + f.read() == 1 + node1.protocolState(Whisper).queue.items.len == 1 + + node1.unsubscribeFilter(filter) == true + + await node1.setBloomFilter(fullBloom()) + + resetMessageQueues(node1, node2) + + asyncTest "PoW blocking": + let topic = [byte 0, 0, 0, 0] + let payload = repeat(byte 0, 10) + + await node1.setPowRequirement(1_000_000) + check: + node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true + node2.protocolState(Whisper).queue.items.len == 1 + await sleepAsync(waitInterval) + check: + node1.protocolState(Whisper).queue.items.len == 0 + + resetMessageQueues(node1, node2) + + await node1.setPowRequirement(0.0) + check: + node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true + node2.protocolState(Whisper).queue.items.len == 1 + await sleepAsync(waitInterval) + check: + node1.protocolState(Whisper).queue.items.len == 1 + + resetMessageQueues(node1, node2) + + asyncTest "Queue pruning": + let topic = [byte 0, 0, 0, 0] + let payload = repeat(byte 0, 10) + # We need a minimum TTL of 2 as when set to 1 there is a small chance that + # it is already expired after messageInterval due to rounding down of float + # to uint32 in postMessage() + let lowerTTL = 2'u32 # Lower TTL as we need to wait for messages to expire + for i in countdown(10, 1): + check node2.postMessage(ttl = lowerTTL, topic = topic, payload = payload) == true + check node2.protocolState(Whisper).queue.items.len == 10 + + await sleepAsync(waitInterval) + check node1.protocolState(Whisper).queue.items.len == 10 + + await sleepAsync(milliseconds((lowerTTL+1)*1000)) + check node1.protocolState(Whisper).queue.items.len == 0 + check node2.protocolState(Whisper).queue.items.len == 0 + + resetMessageQueues(node1, node2) + + asyncTest "P2P post": + let topic = [byte 0, 0, 0, 0] + var f: Future[int] = newFuture[int]() + proc handler(msg: ReceivedMessage) = + check msg.decoded.payload == repeat(byte 4, 10) + f.complete(1) + + var filter = node1.subscribeFilter(initFilter(topics = @[topic], + allowP2P = true), handler) + check: + node1.setPeerTrusted(toNodeId(node2.keys.pubkey)) == true + node2.postMessage(ttl = 10, topic = topic, + payload = repeat(byte 4, 10), + targetPeer = some(toNodeId(node1.keys.pubkey))) == true + + await f.withTimeout(waitInterval) + f.read() == 1 + node1.protocolState(Whisper).queue.items.len == 0 + node2.protocolState(Whisper).queue.items.len == 0 + + node1.unsubscribeFilter(filter) == true + + asyncTest "Light node posting": + var ln1 = setupTestNode(rng, Whisper) + ln1.setLightNode(true) + + await ln1.peerPool.connectToNode(newNode(node2.toENode())) + + let topic = [byte 0, 0, 0, 0] + + check: + # normal post + ln1.postMessage(ttl = safeTTL, topic = topic, + payload = repeat(byte 0, 10)) == false + ln1.protocolState(Whisper).queue.items.len == 0 + # P2P post + ln1.postMessage(ttl = safeTTL, topic = topic, + payload = repeat(byte 0, 10), + targetPeer = some(toNodeId(node2.keys.pubkey))) == true + ln1.protocolState(Whisper).queue.items.len == 0 + + asyncTest "Connect two light nodes": + var ln1 = setupTestNode(rng, Whisper) + var ln2 = setupTestNode(rng, Whisper) + + ln1.setLightNode(true) + ln2.setLightNode(true) + + ln2.startListening() + let peer = await ln1.rlpxConnect(newNode(ln2.toENode())) + check peer.isErr() == true diff --git a/vendor/nim-bearssl b/vendor/nim-bearssl new file mode 160000 index 0000000..9372f27 --- /dev/null +++ b/vendor/nim-bearssl @@ -0,0 +1 @@ +Subproject commit 9372f27a25d0718d3527afad6cc936f6a853f86e diff --git a/vendor/nim-chronicles b/vendor/nim-chronicles new file mode 160000 index 0000000..af31ca2 --- /dev/null +++ b/vendor/nim-chronicles @@ -0,0 +1 @@ +Subproject commit af31ca2157ed5c65ed339a0bbe5bed6faa033502 diff --git a/vendor/nim-chronos b/vendor/nim-chronos new file mode 160000 index 0000000..02fda01 --- /dev/null +++ b/vendor/nim-chronos @@ -0,0 +1 @@ +Subproject commit 02fda01bf260a16d70e5b827819b5314542d5ecd diff --git a/vendor/nim-confutils b/vendor/nim-confutils new file mode 160000 index 0000000..1f3acaf --- /dev/null +++ b/vendor/nim-confutils @@ -0,0 +1 @@ +Subproject commit 1f3acaf6e968ea8e4ec3eec177aebd50eef1040c diff --git a/vendor/nim-eth b/vendor/nim-eth new file mode 160000 index 0000000..55b9da0 --- /dev/null +++ b/vendor/nim-eth @@ -0,0 +1 @@ +Subproject commit 55b9da0bea5dde89a160d43cdc469210dd481720 diff --git a/vendor/nim-faststreams b/vendor/nim-faststreams new file mode 160000 index 0000000..5839747 --- /dev/null +++ b/vendor/nim-faststreams @@ -0,0 +1 @@ +Subproject commit 583974782f1d5487e16cc72289cd97e8897bc894 diff --git a/vendor/nim-http-utils b/vendor/nim-http-utils new file mode 160000 index 0000000..aad684d --- /dev/null +++ b/vendor/nim-http-utils @@ -0,0 +1 @@ +Subproject commit aad684d3758a74c1b327df93da2e956458410b48 diff --git a/vendor/nim-json-rpc b/vendor/nim-json-rpc new file mode 160000 index 0000000..f79be14 --- /dev/null +++ b/vendor/nim-json-rpc @@ -0,0 +1 @@ +Subproject commit f79be14c997092e29ba1edf706bb15a238fb37a5 diff --git a/vendor/nim-json-serialization b/vendor/nim-json-serialization new file mode 160000 index 0000000..ed4440d --- /dev/null +++ b/vendor/nim-json-serialization @@ -0,0 +1 @@ +Subproject commit ed4440d881f9e2cb7778c01a0f638d928f339aa7 diff --git a/vendor/nim-libbacktrace b/vendor/nim-libbacktrace new file mode 160000 index 0000000..06380d5 --- /dev/null +++ b/vendor/nim-libbacktrace @@ -0,0 +1 @@ +Subproject commit 06380d57ff587793fd564e4da76d287c03e59056 diff --git a/vendor/nim-metrics b/vendor/nim-metrics new file mode 160000 index 0000000..abf3acc --- /dev/null +++ b/vendor/nim-metrics @@ -0,0 +1 @@ +Subproject commit abf3acc7f06cee9ee2c287d2f31413dc3df4c04e diff --git a/vendor/nim-nat-traversal b/vendor/nim-nat-traversal new file mode 160000 index 0000000..802d75e --- /dev/null +++ b/vendor/nim-nat-traversal @@ -0,0 +1 @@ +Subproject commit 802d75edcc656e616120fb27f950ff1285ddcbba diff --git a/vendor/nim-secp256k1 b/vendor/nim-secp256k1 new file mode 160000 index 0000000..5fd8135 --- /dev/null +++ b/vendor/nim-secp256k1 @@ -0,0 +1 @@ +Subproject commit 5fd81357839d57ef38fb17647bd5e31dfa9f55b8 diff --git a/vendor/nim-serialization b/vendor/nim-serialization new file mode 160000 index 0000000..bc46b4c --- /dev/null +++ b/vendor/nim-serialization @@ -0,0 +1 @@ +Subproject commit bc46b4c1c1730cc25bf5fb5f3d64bd708a6ad89e diff --git a/vendor/nim-stew b/vendor/nim-stew new file mode 160000 index 0000000..7b4c940 --- /dev/null +++ b/vendor/nim-stew @@ -0,0 +1 @@ +Subproject commit 7b4c9407f29075d3206123c1a2d87fa74af40fd0 diff --git a/vendor/nim-stint b/vendor/nim-stint new file mode 160000 index 0000000..94fc521 --- /dev/null +++ b/vendor/nim-stint @@ -0,0 +1 @@ +Subproject commit 94fc521ee0f1e113d09ceeaa3568d4d7a6c0b67d diff --git a/vendor/nim-testutils b/vendor/nim-testutils new file mode 160000 index 0000000..53a9fb0 --- /dev/null +++ b/vendor/nim-testutils @@ -0,0 +1 @@ +Subproject commit 53a9fb09923004455b7e6d750c8144f3c7ede201 diff --git a/vendor/nim-unittest2 b/vendor/nim-unittest2 new file mode 160000 index 0000000..5ed05c9 --- /dev/null +++ b/vendor/nim-unittest2 @@ -0,0 +1 @@ +Subproject commit 5ed05c90fb54f5e1c41004f91fa57260bdfb6133 diff --git a/vendor/nim-websock b/vendor/nim-websock new file mode 160000 index 0000000..494afad --- /dev/null +++ b/vendor/nim-websock @@ -0,0 +1 @@ +Subproject commit 494afadb86628fc365693f9faa3d61b95990bad8 diff --git a/vendor/nimbus-build-system b/vendor/nimbus-build-system new file mode 160000 index 0000000..cb65dbc --- /dev/null +++ b/vendor/nimbus-build-system @@ -0,0 +1 @@ +Subproject commit cb65dbcd80126128f216d668dafed31b91879145 diff --git a/vendor/nimcrypto b/vendor/nimcrypto new file mode 160000 index 0000000..4014ef9 --- /dev/null +++ b/vendor/nimcrypto @@ -0,0 +1 @@ +Subproject commit 4014ef939b51e02053c2e16dd3481d47bc9267dd diff --git a/waku.nimble b/waku.nimble new file mode 100644 index 0000000..761ceae --- /dev/null +++ b/waku.nimble @@ -0,0 +1,74 @@ +mode = ScriptMode.Verbose + +### Package +version = "0.1.0" +author = "Status Research & Development GmbH" +description = "Waku, Private P2P Messaging for Resource-Restricted Devices" +license = "MIT or Apache License 2.0" +#bin = @["build/waku"] + +### Dependencies +requires "nim >= 1.6.0", + "chronicles", + "confutils", + "chronos", + "eth", + "json_rpc", + "libbacktrace", + "nimcrypto", + "stew", + "stint", + "metrics", + "web3", + "presto", + "regex" + +### Helper functions +proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") = + if not dirExists "build": + mkDir "build" + # allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims" + var extra_params = params + for i in 2.. NIM_PARAMS="-d:chronicles_log_level=INFO" make test2` + # I expect compiler flag to be overridden, however it stays with whatever is + # specified here. + buildBinary name, "tests/", params + exec "build/" & name + +### Legacy: Whisper & Waku v1 tasks +task testwhisper, "Build & run Whisper tests": + test "all_tests_whisper", "-d:chronicles_log_level=WARN -d:chronosStrictException" + +task wakunode1, "Build Waku v1 cli node": + buildBinary "wakunode1", "waku/node/", + "-d:chronicles_log_level=DEBUG -d:chronosStrictException" + +task sim1, "Build Waku v1 simulation tools": + buildBinary "quicksim", "waku/node/", + "-d:chronicles_log_level=INFO -d:chronosStrictException" + buildBinary "start_network", "waku/node/", + "-d:chronicles_log_level=DEBUG -d:chronosStrictException" + +task example1, "Build Waku v1 example": + buildBinary "example", "examples/", + "-d:chronicles_log_level=DEBUG -d:chronosStrictException" + +task test1, "Build & run Waku v1 tests": + test "all_tests_waku", "-d:chronicles_log_level=WARN -d:chronosStrictException" diff --git a/waku/common/utils/nat.nim b/waku/common/utils/nat.nim new file mode 100644 index 0000000..de36ffc --- /dev/null +++ b/waku/common/utils/nat.nim @@ -0,0 +1,67 @@ + +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import + std/[options, strutils] +import + chronicles, + eth/net/nat, + stew/results, + stew/shims/net, + nativesockets + +logScope: + topics = "nat" + +proc setupNat*(natConf, clientId: string, + tcpPort, udpPort: Port): + Result[tuple[ip: Option[ValidIpAddress], + tcpPort: Option[Port], + udpPort: Option[Port]], string] + {.gcsafe.} = + + let strategy = case natConf.toLowerAscii(): + of "any": NatAny + of "none": NatNone + of "upnp": NatUpnp + of "pmp": NatPmp + else: NatNone + + var endpoint: tuple[ip: Option[ValidIpAddress], tcpPort: Option[Port], udpPort: Option[Port]] + + if strategy != NatNone: + let extIp = getExternalIP(strategy) + if extIP.isSome(): + endpoint.ip = some(ValidIpAddress.init(extIp.get())) + # RedirectPorts in considered a gcsafety violation + # because it obtains the address of a non-gcsafe proc? + var extPorts: Option[(Port, Port)] + try: + extPorts = ({.gcsafe.}: redirectPorts(tcpPort = tcpPort, + udpPort = udpPort, + description = clientId)) + except CatchableError: + # TODO: nat.nim Error: can raise an unlisted exception: Exception. Isolate here for now. + error "unable to determine external ports" + extPorts = none((Port, Port)) + + if extPorts.isSome(): + let (extTcpPort, extUdpPort) = extPorts.get() + endpoint.tcpPort = some(extTcpPort) + endpoint.udpPort = some(extUdpPort) + + else: # NatNone + if not natConf.startsWith("extip:"): + return err("not a valid NAT mechanism: " & $natConf) + + try: + # any required port redirection is assumed to be done by hand + endpoint.ip = some(ValidIpAddress.init(natConf[6..^1])) + except ValueError: + return err("not a valid IP address: " & $natConf[6..^1]) + + return ok(endpoint) + diff --git a/waku/node/README.md b/waku/node/README.md new file mode 100644 index 0000000..74d040f --- /dev/null +++ b/waku/node/README.md @@ -0,0 +1,3 @@ +# Waku Node v1 + +This folder contains code related to running a `wakunode` process. The main entrypoint is the `wakunode` file. diff --git a/waku/node/config.nim b/waku/node/config.nim new file mode 100644 index 0000000..78f572d --- /dev/null +++ b/waku/node/config.nim @@ -0,0 +1,164 @@ +import + confutils/defs, chronicles, chronos, eth/keys + +type + Fleet* = enum + none + prod + staging + test + + WakuNodeCmd* = enum + noCommand + genNodekey + + WakuNodeConf* = object + logLevel* {. + desc: "Sets the log level." + defaultValue: LogLevel.INFO + name: "log-level" .}: LogLevel + + case cmd* {. + command + defaultValue: noCommand .}: WakuNodeCmd + + of noCommand: + tcpPort* {. + desc: "TCP listening port." + defaultValue: 30303 + name: "tcp-port" .}: uint16 + + portsShift* {. + desc: "Add a shift to all port numbers." + defaultValue: 0 + name: "ports-shift" .}: uint16 + + nat* {. + desc: "Specify method to use for determining public address. " & + "Must be one of: any, none, upnp, pmp, extip:." + defaultValue: "any" .}: string + + discovery* {. + desc: "Enable/disable discovery v4." + defaultValue: true + name: "discovery" .}: bool + + noListen* {. + desc: "Disable listening for incoming peers." + defaultValue: false + name: "no-listen" .}: bool + + fleet* {. + desc: "Select the fleet to connect to." + defaultValue: Fleet.none + name: "fleet" .}: Fleet + + bootnodes* {. + desc: "Enode URL to bootstrap P2P discovery with. Argument may be repeated." + name: "bootnode" .}: seq[string] + + staticnodes* {. + desc: "Enode URL to directly connect with. Argument may be repeated." + name: "staticnode" .}: seq[string] + + whisper* {. + desc: "Enable the Whisper protocol." + defaultValue: false + name: "whisper" .}: bool + + whisperBridge* {. + desc: "Enable the Whisper protocol and bridge with Waku protocol." + defaultValue: false + name: "whisper-bridge" .}: bool + + lightNode* {. + desc: "Run as light node (no message relay).", + defaultValue: false + name: "light-node" .}: bool + + wakuTopicInterest* {. + desc: "Run as node with a topic-interest", + defaultValue: false + name: "waku-topic-interest" .}: bool + + wakuPow* {. + desc: "PoW requirement of Waku node.", + defaultValue: 0.002 + name: "waku-pow" .}: float64 + + nodekey* {. + desc: "P2P node private key as hex.", + # TODO: can the rng be passed in somehow via Load? + defaultValue: KeyPair.random(keys.newRng()[]) + name: "nodekey" .}: KeyPair + # TODO: Add nodekey file option + + bootnodeOnly* {. + desc: "Run only as discovery bootnode." + defaultValue: false + name: "bootnode-only" .}: bool + + rpc* {. + desc: "Enable Waku RPC server.", + defaultValue: false + name: "rpc" .}: bool + + rpcAddress* {. + desc: "Listening address of the RPC server.", + defaultValue: parseIpAddress("127.0.0.1") + name: "rpc-address" .}: IpAddress + + rpcPort* {. + desc: "Listening port of the RPC server.", + defaultValue: 8545 + name: "rpc-port" .}: uint16 + + metricsServer* {. + desc: "Enable the metrics server." + defaultValue: false + name: "metrics-server" .}: bool + + metricsServerAddress* {. + desc: "Listening address of the metrics server." + defaultValue: parseIpAddress("127.0.0.1") + name: "metrics-server-address" .}: IpAddress + + metricsServerPort* {. + desc: "Listening HTTP port of the metrics server." + defaultValue: 8008 + name: "metrics-server-port" .}: uint16 + + logMetrics* {. + desc: "Enable metrics logging." + defaultValue: false + name: "log-metrics" .}: bool + + logAccounting* {. + desc: "Enable peer accounting logging." + defaultValue: false + name: "log-accounting" .}: bool + + # TODO: + # - discv5 + topic register + # - mailserver functionality + of genNodekey: + discard + +proc parseCmdArg*(T: type KeyPair, p: string): T = + try: + let privkey = PrivateKey.fromHex(string(p)).tryGet() + result = privkey.toKeyPair() + except CatchableError: + raise newException(ConfigurationError, "Invalid private key") + +proc completeCmdArg*(T: type KeyPair, val: string): seq[string] = + return @[] + +proc parseCmdArg*(T: type IpAddress, p: string): T = + try: + result = parseIpAddress(p) + except CatchableError: + raise newException(ConfigurationError, "Invalid IP address") + +proc completeCmdArg*(T: type IpAddress, val: string): seq[string] = + return @[] diff --git a/waku/node/nim.cfg b/waku/node/nim.cfg new file mode 100644 index 0000000..3d41815 --- /dev/null +++ b/waku/node/nim.cfg @@ -0,0 +1,4 @@ +-d:chronicles_line_numbers +-d:"chronicles_runtime_filtering=on" +-d:nimDebugDlOpen + diff --git a/waku/node/quicksim.nim b/waku/node/quicksim.nim new file mode 100644 index 0000000..9de4cd9 --- /dev/null +++ b/waku/node/quicksim.nim @@ -0,0 +1,76 @@ +import + os, strformat, chronicles, json_rpc/[rpcclient, rpcserver], nimcrypto/sysrand, + eth/common as eth_common, eth/keys, + ../protocol/waku_protocol, ./rpc/[hexstrings, rpc_types], + options as what # TODO: Huh? Redefinition? + +from os import DirSep +from strutils import rsplit +template sourceDir: string = currentSourcePath.rsplit(DirSep, 1)[0] + +const sigWakuPath = &"{sourceDir}{DirSep}rpc{DirSep}wakucallsigs.nim" +createRpcSigs(RpcHttpClient, sigWakuPath) + +const topicAmount = 100 + +let + trafficNode = newRpcHttpClient() + lightNode = newRpcHttpClient() + lightNode2 = newRpcHttpClient() + +waitFor lightNode.connect("localhost", Port(8545), false) +waitFor lightNode2.connect("localhost", Port(8546), false) +waitFor trafficNode.connect("localhost", Port(8548), false) + +proc generateTopics(amount = topicAmount): seq[waku_protocol.Topic] = + var topic: waku_protocol.Topic + for i in 0..= 2 and value[0] == '0' and value[1] in {'x', 'X'}: true + else: false + +template isHexChar(c: char): bool = + if c notin {'0'..'9'} and + c notin {'a'..'f'} and + c notin {'A'..'F'}: false + else: true + +func isValidHexQuantity*(value: string): bool = + if not value.hasHexHeader: + return false + # No leading zeros (but allow 0x0) + if value.len < 3 or (value.len > 3 and value[2] == '0'): return false + for i in 2 ..< value.len: + let c = value[i] + if not c.isHexChar: + return false + return true + +func isValidHexData*(value: string, header = true): bool = + if header and not value.hasHexHeader: + return false + # Must be even number of digits + if value.len mod 2 != 0: return false + # Leading zeros are allowed + for i in 2 ..< value.len: + let c = value[i] + if not c.isHexChar: + return false + return true + +template isValidHexData(value: string, hexLen: int, header = true): bool = + value.len == hexLen and value.isValidHexData(header) + +func isValidIdentifier*(value: string): bool = + # 32 bytes for Whisper ID, no 0x prefix + result = value.isValidHexData(64, false) + +func isValidPublicKey*(value: string): bool = + # 65 bytes for Public Key plus 1 byte for 0x prefix + result = value.isValidHexData(132) + +func isValidPrivateKey*(value: string): bool = + # 32 bytes for Private Key plus 1 byte for 0x prefix + result = value.isValidHexData(66) + +func isValidSymKey*(value: string): bool = + # 32 bytes for Private Key plus 1 byte for 0x prefix + result = value.isValidHexData(66) + +func isValidHash256*(value: string): bool = + # 32 bytes for Hash256 plus 1 byte for 0x prefix + result = value.isValidHexData(66) + +func isValidTopic*(value: string): bool = + # 4 bytes for Topic plus 1 byte for 0x prefix + result = value.isValidHexData(10) + +const + SInvalidData = "Invalid hex data format for Ethereum" + +proc validateHexData*(value: string) {.inline.} = + if unlikely(not value.isValidHexData): + raise newException(ValueError, SInvalidData & ": " & value) + +# Initialisation + +proc hexDataStr*(value: string): HexDataStr {.inline.} = + value.validateHexData + result = value.HexDataStr + +# Converters for use in RPC + +import json +from json_rpc/rpcserver import expect + +proc `%`*(value: HexStrings): JsonNode = + result = %(value.string) + +# Overloads to support expected representation of hex data + +proc `%`*(value: Hash256): JsonNode = + #result = %("0x" & $value) # More clean but no lowercase :( + result = %("0x" & value.data.toHex) + +proc `%`*(value: UInt256): JsonNode = + result = %("0x" & value.toString(16)) + +proc `%`*(value: PublicKey): JsonNode = + result = %("0x04" & $value) + +proc `%`*(value: PrivateKey): JsonNode = + result = %("0x" & $value) + +proc `%`*(value: SymKey): JsonNode = + result = %("0x" & value.toHex) + +proc `%`*(value: waku_protocol.Topic): JsonNode = + result = %("0x" & value.toHex) + +proc `%`*(value: seq[byte]): JsonNode = + if value.len > 0: + result = %("0x" & value.toHex) + else: + result = newJArray() + +# Helpers for the fromJson procs + +proc toPublicKey*(key: string): PublicKey {.inline.} = + result = PublicKey.fromHex(key[4 .. ^1]).tryGet() + +proc toPrivateKey*(key: string): PrivateKey {.inline.} = + result = PrivateKey.fromHex(key[2 .. ^1]).tryGet() + +proc toSymKey*(key: string): SymKey {.inline.} = + hexToByteArray(key[2 .. ^1], result) + +proc toTopic*(topic: string): waku_protocol.Topic {.inline.} = + hexToByteArray(topic[2 .. ^1], result) + +# Marshalling from JSON to Nim types that includes format checking + +func invalidMsg(name: string): string = "When marshalling from JSON, parameter \"" & name & "\" is not valid" + +proc fromJson*(n: JsonNode, argName: string, result: var HexDataStr) = + n.kind.expect(JString, argName) + let hexStr = n.getStr() + if not hexStr.isValidHexData: + raise newException(ValueError, invalidMsg(argName) & " as Ethereum data \"" & hexStr & "\"") + result = hexStr.hexDataStr + +proc fromJson*(n: JsonNode, argName: string, result: var Identifier) = + n.kind.expect(JString, argName) + let hexStr = n.getStr() + if not hexStr.isValidIdentifier: + raise newException(ValueError, invalidMsg(argName) & " as a identifier \"" & hexStr & "\"") + result = hexStr.Identifier + +proc fromJson*(n: JsonNode, argName: string, result: var UInt256) = + n.kind.expect(JString, argName) + let hexStr = n.getStr() + if not (hexStr.len <= 66 and hexStr.isValidHexQuantity): + raise newException(ValueError, invalidMsg(argName) & " as a UInt256 \"" & hexStr & "\"") + result = readUintBE[256](hexToPaddedByteArray[32](hexStr)) + +proc fromJson*(n: JsonNode, argName: string, result: var PublicKey) = + n.kind.expect(JString, argName) + let hexStr = n.getStr() + if not hexStr.isValidPublicKey: + raise newException(ValueError, invalidMsg(argName) & " as a public key \"" & hexStr & "\"") + result = hexStr.toPublicKey + +proc fromJson*(n: JsonNode, argName: string, result: var PrivateKey) = + n.kind.expect(JString, argName) + let hexStr = n.getStr() + if not hexStr.isValidPrivateKey: + raise newException(ValueError, invalidMsg(argName) & " as a private key \"" & hexStr & "\"") + result = hexStr.toPrivateKey + +proc fromJson*(n: JsonNode, argName: string, result: var SymKey) = + n.kind.expect(JString, argName) + let hexStr = n.getStr() + if not hexStr.isValidSymKey: + raise newException(ValueError, invalidMsg(argName) & " as a symmetric key \"" & hexStr & "\"") + result = toSymKey(hexStr) + +proc fromJson*(n: JsonNode, argName: string, result: var waku_protocol.Topic) = + n.kind.expect(JString, argName) + let hexStr = n.getStr() + if not hexStr.isValidTopic: + raise newException(ValueError, invalidMsg(argName) & " as a topic \"" & hexStr & "\"") + result = toTopic(hexStr) + +# Following procs currently required only for testing, the `createRpcSigs` macro +# requires it as it will convert the JSON results back to the original Nim +# types, but it needs the `fromJson` calls for those specific Nim types to do so + +proc fromJson*(n: JsonNode, argName: string, result: var Hash256) = + n.kind.expect(JString, argName) + let hexStr = n.getStr() + if not hexStr.isValidHash256: + raise newException(ValueError, invalidMsg(argName) & " as a Hash256 \"" & hexStr & "\"") + hexToByteArray(hexStr, result.data) diff --git a/waku/node/rpc/key_storage.nim b/waku/node/rpc/key_storage.nim new file mode 100644 index 0000000..9f49c8d --- /dev/null +++ b/waku/node/rpc/key_storage.nim @@ -0,0 +1,22 @@ +# +# Nimbus +# (c) Copyright 2019 +# Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) + +import tables, eth/keys, ../../../whisper/whisper_types + +type + KeyStorage* = ref object + asymKeys*: Table[string, KeyPair] + symKeys*: Table[string, SymKey] + + KeyGenerationError* = object of CatchableError + +proc newKeyStorage*(): KeyStorage = + new(result) + result.asymKeys = initTable[string, KeyPair]() + result.symKeys = initTable[string, SymKey]() diff --git a/waku/node/rpc/rpc_types.nim b/waku/node/rpc/rpc_types.nim new file mode 100644 index 0000000..cd1cda4 --- /dev/null +++ b/waku/node/rpc/rpc_types.nim @@ -0,0 +1,58 @@ +import + hexstrings, options, eth/keys, + ../../protocol/waku_protocol + +#[ + Notes: + * Some of the types suppose 'null' when there is no appropriate value. + To allow for this, you can use Option[T] or use refs so the JSON transform can convert to `JNull`. + * Parameter objects from users must have their data verified so will use EthAddressStr instead of EthAddres, for example + * Objects returned to the user can use native Waku types, where hexstrings provides converters to hex strings. + This is because returned arrays in JSON is + a) not an efficient use of space + b) not the format the user expects (for example addresses are expected to be hex strings prefixed by "0x") +]# + +type + WakuInfo* = object + # Returned to user + minPow*: float64 # Current minimum PoW requirement. + # TODO: may be uint32 + maxMessageSize*: uint64 # Current message size limit in bytes. + memory*: int # Memory size of the floating messages in bytes. + messages*: int # Number of floating messages. + + WakuFilterOptions* = object + # Parameter from user + symKeyID*: Option[Identifier] # ID of symmetric key for message decryption. + privateKeyID*: Option[Identifier] # ID of private (asymmetric) key for message decryption. + sig*: Option[PublicKey] # (Optional) Public key of the signature. + minPow*: Option[float64] # (Optional) Minimal PoW requirement for incoming messages. + topics*: Option[seq[waku_protocol.Topic]] # (Optional when asym key): Array of possible topics (or partial topics). + allowP2P*: Option[bool] # (Optional) Indicates if this filter allows processing of direct peer-to-peer messages. + + WakuFilterMessage* = object + # Returned to user + sig*: Option[PublicKey] # Public key who signed this message. + recipientPublicKey*: Option[PublicKey] # The recipients public key. + ttl*: uint64 # Time-to-live in seconds. + timestamp*: uint64 # Unix timestamp of the message generation. + topic*: waku_protocol.Topic # 4 Bytes: Message topic. + payload*: seq[byte] # Decrypted payload. + padding*: seq[byte] # (Optional) Padding (byte array of arbitrary length). + pow*: float64 # Proof of work value. + hash*: Hash # Hash of the enveloped message. + + WakuPostMessage* = object + # Parameter from user + symKeyID*: Option[Identifier] # ID of symmetric key for message encryption. + pubKey*: Option[PublicKey] # Public key for message encryption. + sig*: Option[Identifier] # (Optional) ID of the signing key. + ttl*: uint64 # Time-to-live in seconds. + topic*: Option[waku_protocol.Topic] # Message topic (mandatory when key is symmetric). + payload*: HexDataStr # Payload to be encrypted. + padding*: Option[HexDataStr] # (Optional) Padding (byte array of arbitrary length). + powTime*: float64 # Maximal time in seconds to be spent on proof of work. + powTarget*: float64 # Minimal PoW target required for this message. + # TODO: EnodeStr + targetPeer*: Option[string] # (Optional) Peer ID (for peer-to-peer message only). diff --git a/waku/node/rpc/waku.nim b/waku/node/rpc/waku.nim new file mode 100644 index 0000000..84cd0b1 --- /dev/null +++ b/waku/node/rpc/waku.nim @@ -0,0 +1,365 @@ +import + json_rpc/rpcserver, tables, options, sequtils, + eth/[common, keys, p2p], + nimcrypto/[sysrand, hmac, sha2, pbkdf2], + rpc_types, hexstrings, key_storage, + ../../protocol/waku_protocol + +from stew/byteutils import hexToSeqByte, hexToByteArray + +# Blatant copy of Whisper RPC but for the Waku protocol + +proc setupWakuRPC*(node: EthereumNode, keys: KeyStorage, rpcsrv: RpcServer, + rng: ref HmacDrbgContext) = + + rpcsrv.rpc("waku_version") do() -> string: + ## Returns string of the current Waku protocol version. + result = wakuVersionStr + + rpcsrv.rpc("waku_info") do() -> WakuInfo: + ## Returns diagnostic information about the Waku node. + let config = node.protocolState(Waku).config + result = WakuInfo(minPow: config.powRequirement, + maxMessageSize: config.maxMsgSize, + memory: 0, + messages: 0) + + # TODO: uint32 instead of uint64 is OK here, but needs to be added in json_rpc + rpcsrv.rpc("waku_setMaxMessageSize") do(size: uint64) -> bool: + ## Sets the maximal message size allowed by this node. + ## Incoming and outgoing messages with a larger size will be rejected. + ## Waku message size can never exceed the limit imposed by the underlying + ## P2P protocol (10 Mb). + ## + ## size: Message size in bytes. + ## + ## Returns true on success and an error on failure. + result = node.setMaxMessageSize(size.uint32) + if not result: + raise newException(ValueError, "Invalid size") + + rpcsrv.rpc("waku_setMinPoW") do(pow: float) -> bool: + ## Sets the minimal PoW required by this node. + ## + ## pow: The new PoW requirement. + ## + ## Returns true on success and an error on failure. + # Note: `setPowRequirement` does not raise on failures of sending the update + # to the peers. Hence in theory this should not causes errors. + await node.setPowRequirement(pow) + result = true + + # TODO: change string in to ENodeStr with extra checks + rpcsrv.rpc("waku_markTrustedPeer") do(enode: string) -> bool: + ## Marks specific peer trusted, which will allow it to send historic + ## (expired) messages. + ## Note: This function is not adding new nodes, the node needs to exists as + ## a peer. + ## + ## enode: Enode of the trusted peer. + ## + ## Returns true on success and an error on failure. + # TODO: It will now require an enode://pubkey@ip:port uri + # could also accept only the pubkey (like geth)? + let peerNode = newNode(enode) + result = node.setPeerTrusted(peerNode.id) + if not result: + raise newException(ValueError, "Not a peer") + + rpcsrv.rpc("waku_newKeyPair") do() -> Identifier: + ## Generates a new public and private key pair for message decryption and + ## encryption. + ## + ## Returns key identifier on success and an error on failure. + result = generateRandomID(rng[]).Identifier + keys.asymKeys[result.string] = KeyPair.random(rng[]) + + rpcsrv.rpc("waku_addPrivateKey") do(key: PrivateKey) -> Identifier: + ## Stores the key pair, and returns its ID. + ## + ## key: Private key as hex bytes. + ## + ## Returns key identifier on success and an error on failure. + result = generateRandomID(rng[]).Identifier + + keys.asymKeys[result.string] = key.toKeyPair() + + rpcsrv.rpc("waku_deleteKeyPair") do(id: Identifier) -> bool: + ## Deletes the specifies key if it exists. + ## + ## id: Identifier of key pair + ## + ## Returns true on success and an error on failure. + var unneeded: KeyPair + result = keys.asymKeys.take(id.string, unneeded) + if not result: + raise newException(ValueError, "Invalid key id") + + rpcsrv.rpc("waku_hasKeyPair") do(id: Identifier) -> bool: + ## Checks if the Waku node has a private key of a key pair matching the + ## given ID. + ## + ## id: Identifier of key pair + ## + ## Returns (true or false) on success and an error on failure. + result = keys.asymkeys.hasKey(id.string) + + rpcsrv.rpc("waku_getPublicKey") do(id: Identifier) -> PublicKey: + ## Returns the public key for identity ID. + ## + ## id: Identifier of key pair + ## + ## Returns public key on success and an error on failure. + # Note: key not found exception as error in case not existing + result = keys.asymkeys[id.string].pubkey + + rpcsrv.rpc("waku_getPrivateKey") do(id: Identifier) -> PrivateKey: + ## Returns the private key for identity ID. + ## + ## id: Identifier of key pair + ## + ## Returns private key on success and an error on failure. + # Note: key not found exception as error in case not existing + result = keys.asymkeys[id.string].seckey + + rpcsrv.rpc("waku_newSymKey") do() -> Identifier: + ## Generates a random symmetric key and stores it under an ID, which is then + ## returned. Can be used encrypting and decrypting messages where the key is + ## known to both parties. + ## + ## Returns key identifier on success and an error on failure. + result = generateRandomID(rng[]).Identifier + var key: SymKey + if randomBytes(key) != key.len: + raise newException(KeyGenerationError, "Failed generating key") + + keys.symKeys[result.string] = key + + + rpcsrv.rpc("waku_addSymKey") do(key: SymKey) -> Identifier: + ## Stores the key, and returns its ID. + ## + ## key: The raw key for symmetric encryption as hex bytes. + ## + ## Returns key identifier on success and an error on failure. + result = generateRandomID(rng[]).Identifier + + keys.symKeys[result.string] = key + + rpcsrv.rpc("waku_generateSymKeyFromPassword") do(password: string) -> Identifier: + ## Generates the key from password, stores it, and returns its ID. + ## + ## password: Password. + ## + ## Returns key identifier on success and an error on failure. + ## Warning: an empty string is used as salt because the shh RPC API does not + ## allow for passing a salt. A very good password is necessary (calculate + ## yourself what that means :)) + var ctx: HMAC[sha256] + var symKey: SymKey + if pbkdf2(ctx, password, "", 65356, symKey) != sizeof(SymKey): + raise newException(KeyGenerationError, "Failed generating key") + + result = generateRandomID(rng[]).Identifier + keys.symKeys[result.string] = symKey + + rpcsrv.rpc("waku_hasSymKey") do(id: Identifier) -> bool: + ## Returns true if there is a key associated with the name string. + ## Otherwise, returns false. + ## + ## id: Identifier of key. + ## + ## Returns (true or false) on success and an error on failure. + result = keys.symkeys.hasKey(id.string) + + rpcsrv.rpc("waku_getSymKey") do(id: Identifier) -> SymKey: + ## Returns the symmetric key associated with the given ID. + ## + ## id: Identifier of key. + ## + ## Returns Raw key on success and an error on failure. + # Note: key not found exception as error in case not existing + result = keys.symkeys[id.string] + + rpcsrv.rpc("waku_deleteSymKey") do(id: Identifier) -> bool: + ## Deletes the key associated with the name string if it exists. + ## + ## id: Identifier of key. + ## + ## Returns (true or false) on success and an error on failure. + var unneeded: SymKey + result = keys.symKeys.take(id.string, unneeded) + if not result: + raise newException(ValueError, "Invalid key id") + + rpcsrv.rpc("waku_subscribe") do(id: string, + options: WakuFilterOptions) -> Identifier: + ## Creates and registers a new subscription to receive notifications for + ## inbound Waku messages. Returns the ID of the newly created + ## subscription. + ## + ## id: identifier of function call. In case of Waku must contain the + ## value "messages". + ## options: WakuFilterOptions + ## + ## Returns the subscription ID on success, the error on failure. + + # TODO: implement subscriptions, only for WS & IPC? + discard + + rpcsrv.rpc("waku_unsubscribe") do(id: Identifier) -> bool: + ## Cancels and removes an existing subscription. + ## + ## id: Subscription identifier + ## + ## Returns true on success, the error on failure + result = node.unsubscribeFilter(id.string) + if not result: + raise newException(ValueError, "Invalid filter id") + + proc validateOptions[T,U,V](asym: Option[T], sym: Option[U], topic: Option[V]) = + if (asym.isSome() and sym.isSome()) or (asym.isNone() and sym.isNone()): + raise newException(ValueError, + "Either privateKeyID/pubKey or symKeyID must be present") + if asym.isNone() and topic.isNone(): + raise newException(ValueError, "Topic mandatory with symmetric key") + + rpcsrv.rpc("waku_newMessageFilter") do(options: WakuFilterOptions) -> Identifier: + ## Create a new filter within the node. This filter can be used to poll for + ## new messages that match the set of criteria. + ## + ## options: WakuFilterOptions + ## + ## Returns filter identifier on success, error on failure + + # Check if either symKeyID or privateKeyID is present, and not both + # Check if there are Topics when symmetric key is used + validateOptions(options.privateKeyID, options.symKeyID, options.topics) + + var + src: Option[PublicKey] + privateKey: Option[PrivateKey] + symKey: Option[SymKey] + topics: seq[waku_protocol.Topic] + powReq: float64 + allowP2P: bool + + src = options.sig + + if options.privateKeyID.isSome(): + privateKey = some(keys.asymKeys[options.privateKeyID.get().string].seckey) + + if options.symKeyID.isSome(): + symKey= some(keys.symKeys[options.symKeyID.get().string]) + + if options.minPow.isSome(): + powReq = options.minPow.get() + + if options.topics.isSome(): + topics = options.topics.get() + + if options.allowP2P.isSome(): + allowP2P = options.allowP2P.get() + + let filter = initFilter(src, privateKey, symKey, topics, powReq, allowP2P) + result = node.subscribeFilter(filter).Identifier + + # TODO: Should we do this here "automatically" or separate it in another + # RPC call? Is there a use case for that? + # Same could be said about bloomfilter, except that there is a use case + # there to have a full node no matter what message filters. + # Could also be moved to waku_protocol.nim + let config = node.protocolState(Waku).config + if config.topics.isSome(): + try: + # TODO: an addTopics call would probably be more useful + let result = await node.setTopicInterest(config.topics.get().concat(filter.topics)) + if not result: + raise newException(ValueError, "Too many topics") + except CatchableError: + trace "setTopics error occured" + elif config.isLightNode: + try: + await node.setBloomFilter(node.filtersToBloom()) + except CatchableError: + trace "setBloomFilter error occured" + + rpcsrv.rpc("waku_deleteMessageFilter") do(id: Identifier) -> bool: + ## Uninstall a message filter in the node. + ## + ## id: Filter identifier as returned when the filter was created. + ## + ## Returns true on success, error on failure. + result = node.unsubscribeFilter(id.string) + if not result: + raise newException(ValueError, "Invalid filter id") + + rpcsrv.rpc("waku_getFilterMessages") do(id: Identifier) -> seq[WakuFilterMessage]: + ## Retrieve messages that match the filter criteria and are received between + ## the last time this function was called and now. + ## + ## id: ID of filter that was created with `waku_newMessageFilter`. + ## + ## Returns array of messages on success and an error on failure. + let messages = node.getFilterMessages(id.string) + for msg in messages: + result.add WakuFilterMessage( + sig: msg.decoded.src, + recipientPublicKey: msg.dst, + ttl: msg.ttl, + topic: msg.topic, + timestamp: msg.timestamp, + payload: msg.decoded.payload, + # Note: waku_protocol padding is an Option as there is the + # possibility of 0 padding in case of custom padding. + padding: msg.decoded.padding.get(@[]), + pow: msg.pow, + hash: msg.hash) + + rpcsrv.rpc("waku_post") do(message: WakuPostMessage) -> bool: + ## Creates a Waku message and injects it into the network for + ## distribution. + ## + ## message: Waku message to post. + ## + ## Returns true on success and an error on failure. + + # Check if either symKeyID or pubKey is present, and not both + # Check if there is a Topic when symmetric key is used + validateOptions(message.pubKey, message.symKeyID, message.topic) + + var + sigPrivKey: Option[PrivateKey] + symKey: Option[SymKey] + topic: waku_protocol.Topic + padding: Option[seq[byte]] + targetPeer: Option[NodeId] + + if message.sig.isSome(): + sigPrivKey = some(keys.asymKeys[message.sig.get().string].seckey) + + if message.symKeyID.isSome(): + symKey = some(keys.symKeys[message.symKeyID.get().string]) + + # Note: If no topic it will be defaulted to 0x00000000 + if message.topic.isSome(): + topic = message.topic.get() + + if message.padding.isSome(): + padding = some(hexToSeqByte(message.padding.get().string)) + + if message.targetPeer.isSome(): + targetPeer = some(newNode(message.targetPeer.get()).id) + + result = node.postMessage(message.pubKey, + symKey, + sigPrivKey, + ttl = message.ttl.uint32, + topic = topic, + payload = hexToSeqByte(message.payload.string), + padding = padding, + powTime = message.powTime, + powTarget = message.powTarget, + targetPeer = targetPeer) + if not result: + raise newException(ValueError, "Message could not be posted") diff --git a/waku/node/rpc/wakucallsigs.nim b/waku/node/rpc/wakucallsigs.nim new file mode 100644 index 0000000..d49947e --- /dev/null +++ b/waku/node/rpc/wakucallsigs.nim @@ -0,0 +1,27 @@ +proc waku_version(): string +proc waku_info(): WakuInfo +proc waku_setMaxMessageSize(size: uint64): bool +proc waku_setMinPoW(pow: float): bool +proc waku_markTrustedPeer(enode: string): bool + +proc waku_newKeyPair(): Identifier +proc waku_addPrivateKey(key: string): Identifier +proc waku_deleteKeyPair(id: Identifier): bool +proc waku_hasKeyPair(id: Identifier): bool +proc waku_getPublicKey(id: Identifier): PublicKey +proc waku_getPrivateKey(id: Identifier): PrivateKey + +proc waku_newSymKey(): Identifier +proc waku_addSymKey(key: string): Identifier +proc waku_generateSymKeyFromPassword(password: string): Identifier +proc waku_hasSymKey(id: Identifier): bool +proc waku_getSymKey(id: Identifier): SymKey +proc waku_deleteSymKey(id: Identifier): bool + +proc waku_newMessageFilter(options: WakuFilterOptions): Identifier +proc waku_deleteMessageFilter(id: Identifier): bool +proc waku_getFilterMessages(id: Identifier): seq[WakuFilterMessage] +proc waku_post(message: WakuPostMessage): bool + +proc wakusim_generateTraffic(amount: int): bool +proc wakusim_generateRandomTraffic(amount: int): bool diff --git a/waku/node/rpc/wakusim.nim b/waku/node/rpc/wakusim.nim new file mode 100644 index 0000000..03a4635 --- /dev/null +++ b/waku/node/rpc/wakusim.nim @@ -0,0 +1,31 @@ +import + json_rpc/rpcserver, stew/endians2, nimcrypto/sysrand, + eth/[p2p, async_utils], + ../../protocol/waku_protocol + +proc generateTraffic(node: EthereumNode, amount = 100) {.async.} = + let payload = @[byte 0] + for i in 0.. bool: + traceAsyncErrors node.generateTraffic(amount) + return true + + rpcsrv.rpc("wakusim_generateRandomTraffic") do(amount: int) -> bool: + traceAsyncErrors node.generateRandomTraffic(amount) + return true diff --git a/waku/node/start_network.nim b/waku/node/start_network.nim new file mode 100644 index 0000000..b40d903 --- /dev/null +++ b/waku/node/start_network.nim @@ -0,0 +1,204 @@ +import + options, strformat, os, osproc, net, confutils, strformat, chronicles, json, strutils, + eth/keys, eth/p2p/enode + +const + defaults ="--log-level:DEBUG --log-metrics --metrics-server --rpc" + wakuNodeBin = "build" / "wakunode1" + metricsDir = "metrics" + portOffset = 2 + +type + NodeType = enum + FullNode = "", + LightNode = "--light-node:on", + + Topology = enum + Star, + FullMesh, + DiscoveryBased # Whatever topology the discovery brings + + WakuNetworkConf* = object + topology* {. + desc: "Set the network topology." + defaultValue: Star + name: "topology" .}: Topology + + amount* {. + desc: "Amount of full nodes to be started." + defaultValue: 4 + name: "amount" .}: int + + testNodes* {. + desc: "Initialize light test nodes as part of network." + defaultValue: true + name: "test-nodes" .}: bool + + testNodePeers* {. + desc: "Amount of peers a test node should connect to." + defaultValue: 1 + name: "test-node-peers" .}: int + + NodeInfo* = object + cmd: string + master: bool + enode: string + shift: int + label: string + +proc initNodeCmd(nodeType: NodeType, shift: int, staticNodes: seq[string] = @[], + discovery = false, bootNodes: seq[string] = @[], topicInterest = false, + master = false, label: string): NodeInfo = + let + rng = keys.newRng() + keypair = KeyPair.random(rng[]) + address = Address(ip: parseIpAddress("127.0.0.1"), + udpPort: (30303 + shift).Port, tcpPort: (30303 + shift).Port) + enode = ENode(pubkey: keypair.pubkey, address: address) + + result.cmd = wakuNodeBin & " " & defaults & " " + result.cmd &= $nodeType & " " + result.cmd &= "--waku-topic-interest:" & $topicInterest & " " + result.cmd &= "--nodekey:" & $keypair.seckey & " " + result.cmd &= "--ports-shift:" & $shift & " " + if discovery: + result.cmd &= "--discovery:on" & " " + if bootNodes.len > 0: + for bootNode in bootNodes: + result.cmd &= "--bootnode:" & bootNode & " " + else: + result.cmd &= "--discovery:off" & " " + if staticNodes.len > 0: + for staticNode in staticNodes: + result.cmd &= "--staticnode:" & staticNode & " " + + result.master = master + result.enode = $enode + result.shift = shift + result.label = label + + debug "Node command created.", cmd=result.cmd + +proc starNetwork(amount: int): seq[NodeInfo] = + let masterNode = initNodeCmd(FullNode, portOffset, master = true, + label = "master node") + result.add(masterNode) + for i in 1.. 0: setBootNodes(config.bootnodes) + elif config.fleet == prod: setBootNodes(StatusBootNodes) + elif config.fleet == staging: setBootNodes(StatusBootNodesStaging) + elif config.fleet == test : setBootNodes(StatusBootNodesTest) + else: @[] + + # Set-up node + var node = newEthereumNode(config.nodekey, address, NetworkId(1), clientId, + addAllCapabilities = false, bootstrapNodes = bootnodes, bindUdpPort = address.udpPort, bindTcpPort = address.tcpPort, rng = rng) + if not config.bootnodeOnly: + node.addCapability Waku # Always enable Waku protocol + var topicInterest: Option[seq[waku_protocol.Topic]] + var bloom: Option[Bloom] + if config.wakuTopicInterest: + var topics: seq[waku_protocol.Topic] + topicInterest = some(topics) + else: + bloom = some(fullBloom()) + let wakuConfig = WakuConfig(powRequirement: config.wakuPow, + bloom: bloom, + isLightNode: config.lightNode, + maxMsgSize: waku_protocol.defaultMaxMsgSize, + topics: topicInterest) + node.configureWaku(wakuConfig) + if config.whisper or config.whisperBridge: + node.addCapability Whisper + node.protocolState(Whisper).config.powRequirement = 0.002 + if config.whisperBridge: + node.shareMessageQueue() + + let connectedFut = node.connectToNetwork(not config.noListen, + config.discovery) + connectedFut.callback = proc(data: pointer) {.gcsafe.} = + {.gcsafe.}: + if connectedFut.failed: + fatal "connectToNetwork failed", msg = connectedFut.readError.msg + quit(1) + + if not config.bootnodeOnly: + # Optionally direct connect with a set of nodes + if config.staticnodes.len > 0: connectToNodes(node, config.staticnodes) + elif config.fleet == prod: connectToNodes(node, WhisperNodes) + elif config.fleet == staging: connectToNodes(node, WhisperNodesStaging) + elif config.fleet == test: connectToNodes(node, WhisperNodesTest) + + if config.rpc: + let ta = initTAddress(config.rpcAddress, + Port(config.rpcPort + config.portsShift)) + var rpcServer = newRpcHttpServer([ta]) + let keys = newKeyStorage() + setupWakuRPC(node, keys, rpcServer, rng) + setupWakuSimRPC(node, rpcServer) + rpcServer.start() + + + if config.logAccounting: + # https://github.com/nim-lang/Nim/issues/17369 + var logPeerAccounting: proc(udata: pointer) {.gcsafe, raises: [Defect].} + logPeerAccounting = proc(udata: pointer) = + {.gcsafe.}: + for peer in node.peerPool.peers: + let + sent = peer.state(Waku).accounting.sent + received = peer.state(Waku).accounting.received + id = peer.network.toEnode + info "Peer accounting", id, sent, received + peer.state(Waku).accounting = Accounting(sent: 0, received: 0) + + discard setTimer(Moment.fromNow(2.seconds), logPeerAccounting) + discard setTimer(Moment.fromNow(2.seconds), logPeerAccounting) + + if config.metricsServer: + let + address = config.metricsServerAddress + port = config.metricsServerPort + config.portsShift + info "Starting metrics HTTP server", address, port + startMetricsHttpServer($address, Port(port)) + + if config.logMetrics: + # https://github.com/nim-lang/Nim/issues/17369 + var logMetrics: proc(udata: pointer) {.gcsafe, raises: [Defect].} + logMetrics = proc(udata: pointer) = + {.gcsafe.}: + let + connectedPeers = rlpx_connected_peers + validEnvelopes = waku_protocol.envelopes_valid + droppedEnvelopes = waku_protocol.envelopes_dropped + + info "Node metrics", connectedPeers, validEnvelopes, droppedEnvelopes + discard setTimer(Moment.fromNow(2.seconds), logMetrics) + discard setTimer(Moment.fromNow(2.seconds), logMetrics) + + runForever() + +{.pop.} # @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError +when isMainModule: + let + rng = keys.newRng() + conf = WakuNodeConf.load() + + if conf.logLevel != LogLevel.NONE: + setLogLevel(conf.logLevel) + + case conf.cmd + of genNodekey: + echo PrivateKey.random(rng[]) + of noCommand: + run(conf, rng) diff --git a/waku/protocol/README.md b/waku/protocol/README.md new file mode 100644 index 0000000..9d88dbe --- /dev/null +++ b/waku/protocol/README.md @@ -0,0 +1,3 @@ +# Waku v1 protocol + +This folder contains implementations of [Waku v1 protocols](https://specs.vac.dev/specs/waku/v1/waku-1.html). diff --git a/waku/protocol/waku_bridge.nim b/waku/protocol/waku_bridge.nim new file mode 100644 index 0000000..84cc174 --- /dev/null +++ b/waku/protocol/waku_bridge.nim @@ -0,0 +1,22 @@ +# +# Waku - Whisper Bridge +# (c) Copyright 2018-2021 +# Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) +# + +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import + eth/p2p, + ../../whisper/whisper_protocol, + ./waku_protocol + +proc shareMessageQueue*(node: EthereumNode) = + node.protocolState(Waku).queue = node.protocolState(Whisper).queue diff --git a/waku/protocol/waku_mail.nim b/waku/protocol/waku_mail.nim new file mode 100644 index 0000000..e8023f2 --- /dev/null +++ b/waku/protocol/waku_mail.nim @@ -0,0 +1,91 @@ +# +# Waku Mail Client & Server +# (c) Copyright 2018-2021 +# Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2)F +# MIT license (LICENSE-MIT) +# + +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import + chronos, + eth/[p2p, async_utils], + ./waku_protocol + +const + requestCompleteTimeout = chronos.seconds(5) + +type + Cursor = seq[byte] + + MailRequest* = object + lower*: uint32 ## Unix timestamp; oldest requested envelope's creation time + upper*: uint32 ## Unix timestamp; newest requested envelope's creation time + bloom*: seq[byte] ## Bloom filter to apply on the envelopes + limit*: uint32 ## Maximum amount of envelopes to return + cursor*: Cursor ## Optional cursor + +proc requestMail*(node: EthereumNode, peerId: NodeId, request: MailRequest, + symKey: SymKey, requests = 10): Future[Option[Cursor]] {.async.} = + ## Send p2p mail request and check request complete. + ## If result is none, and error occured. If result is a none empty cursor, + ## more envelopes are available. + # TODO: Perhaps don't go the recursive route or could use the actual response + # proc to implement this (via a handler) and store the necessary data in the + # WakuPeer object. + # TODO: Several requestMail calls in parallel can create issues with handling + # the wrong response to a request. Can additionaly check the requestId but + # that would only solve it half. Better to use the requestResponse mechanism. + + # TODO: move this check out of requestMail? + let peer = node.getPeer(peerId, Waku) + if not peer.isSome(): + error "Invalid peer" + return result + elif not peer.get().state(Waku).trusted: + return result + + var writer = initRlpWriter() + writer.append(request) + let payload = writer.finish() + let data = encode(node.rng[], Payload(payload: payload, symKey: some(symKey))) + if not data.isSome(): + error "Encoding of payload failed" + return result + + # TODO: should this envelope be valid in terms of ttl, PoW, etc.? + let env = Envelope(expiry:0, ttl: 0, data: data.get(), nonce: 0) + # Send the request + traceAsyncErrors peer.get().p2pRequest(env) + + # Wait for the Request Complete packet + var f: Future[Waku.p2pRequestComplete] = peer.get().nextMsg(Waku.p2pRequestComplete) + if await f.withTimeout(requestCompleteTimeout): + let response = f.read() + # TODO: I guess the idea is to check requestId (Hash) also? + let requests = requests - 1 + # If there is cursor data, do another request + if response.cursor.len > 0 and requests > 0: + var newRequest = request + newRequest.cursor = response.cursor + return await requestMail(node, peerId, newRequest, symKey, requests) + else: + return some(response.cursor) + else: + error "p2pRequestComplete timeout" + return result + +proc p2pRequestHandler(peer: Peer, envelope: Envelope) = + # Mail server p2p request implementation + discard + +proc enableMailServer*(node: EthereumNode) = + # TODO: This could become part of an init call for an actual `MailServer` + # object. + node.registerP2PRequestHandler(p2pRequestHandler) diff --git a/waku/protocol/waku_protocol.nim b/waku/protocol/waku_protocol.nim new file mode 100644 index 0000000..d015099 --- /dev/null +++ b/waku/protocol/waku_protocol.nim @@ -0,0 +1,694 @@ +# +# Waku +# (c) Copyright 2018-2021 +# Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) +# + +## Waku +## ******* +## +## Waku is a fork of Whisper. +## +## Waku is a gossip protocol that synchronizes a set of messages across nodes +## with attention given to sender and recipient anonymitiy. Messages are +## categorized by a topic and stay alive in the network based on a time-to-live +## measured in seconds. Spam prevention is based on proof-of-work, where large +## or long-lived messages must spend more work. +## +## Implementation should be according to Waku specification defined here: +## https://github.com/vacp2p/specs/blob/master/waku/waku.md +## +## Example usage +## ---------- +## First an `EthereumNode` needs to be created, either with all capabilities set +## or with specifically the Waku capability set. +## The latter can be done like this: +## +## .. code-block::nim +## var node = newEthereumNode(keypair, address, netId, nil, +## addAllCapabilities = false) +## node.addCapability Waku +## +## Now calls such as ``postMessage`` and ``subscribeFilter`` can be done. +## However, they only make real sense after ``connectToNetwork`` was started. As +## else there will be no peers to send and receive messages from. + +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import + options, tables, times, chronos, chronicles, metrics, + eth/[keys, async_utils, p2p], + ../../whisper/whisper_types, + eth/trie/trie_defs + +export + whisper_types + +logScope: + topics = "waku" + +const + defaultQueueCapacity = 2048 + wakuVersion* = 1 ## Waku version. + wakuVersionStr* = $wakuVersion ## Waku version. + defaultMinPow* = 0.2'f64 ## The default minimum PoW requirement for this node. + defaultMaxMsgSize* = 1024'u32 * 1024'u32 ## The current default and max + ## message size. This can never be larger than the maximum RLPx message size. + messageInterval* = chronos.milliseconds(300) ## Interval at which messages are + ## send to peers, in ms. + pruneInterval* = chronos.milliseconds(1000) ## Interval at which message + ## queue is pruned, in ms. + topicInterestMax = 10000 + +type + WakuConfig* = object + powRequirement*: float64 + bloom*: Option[Bloom] + isLightNode*: bool + maxMsgSize*: uint32 + confirmationsEnabled*: bool + rateLimits*: Option[RateLimits] + topics*: Option[seq[whisper_types.Topic]] + + Accounting* = ref object + sent*: uint + received*: uint + + WakuPeer = ref object + initialized: bool # when successfully completed the handshake + powRequirement*: float64 + bloom*: Bloom + isLightNode*: bool + trusted*: bool + topics*: Option[seq[whisper_types.Topic]] + received: HashSet[Hash] + accounting*: Accounting + + P2PRequestHandler* = proc(peer: Peer, envelope: Envelope) + {.gcsafe, raises: [Defect].} + + EnvReceivedHandler* = proc(envelope: Envelope) {.gcsafe, raises: [Defect].} + + WakuNetwork = ref object + queue*: ref Queue + filters*: Filters + config*: WakuConfig + p2pRequestHandler*: P2PRequestHandler + envReceivedHandler*: EnvReceivedHandler + + RateLimits* = object + # TODO: uint or specifically uint32? + limitIp*: uint + limitPeerId*: uint + limitTopic*: uint + + StatusOptions* = object + powRequirement*: Option[(float64)] + bloomFilter*: Option[Bloom] + lightNode*: Option[bool] + confirmationsEnabled*: Option[bool] + rateLimits*: Option[RateLimits] + topicInterest*: Option[seq[whisper_types.Topic]] + + KeyKind* = enum + powRequirementKey, + bloomFilterKey, + lightNodeKey, + confirmationsEnabledKey, + rateLimitsKey, + topicInterestKey + +template countSomeFields*(x: StatusOptions): int = + var count = 0 + for f in fields(x): + if f.isSome(): + inc count + count + +proc append*(rlpWriter: var RlpWriter, value: StatusOptions) = + var list = initRlpList(countSomeFields(value)) + if value.powRequirement.isSome(): + list.append((powRequirementKey, cast[uint64](value.powRequirement.get()))) + if value.bloomFilter.isSome(): + list.append((bloomFilterKey, @(value.bloomFilter.get()))) + if value.lightNode.isSome(): + list.append((lightNodeKey, value.lightNode.get())) + if value.confirmationsEnabled.isSome(): + list.append((confirmationsEnabledKey, value.confirmationsEnabled.get())) + if value.rateLimits.isSome(): + list.append((rateLimitsKey, value.rateLimits.get())) + if value.topicInterest.isSome(): + list.append((topicInterestKey, value.topicInterest.get())) + + let bytes = list.finish() + + try: + rlpWriter.append(rlpFromBytes(bytes)) + except RlpError as e: + # bytes is valid rlp just created here, rlpFromBytes should thus never fail + raiseAssert e.msg + +proc read*(rlp: var Rlp, T: typedesc[StatusOptions]): + T {.raises: [RlpError, Defect].}= + if not rlp.isList(): + raise newException(RlpTypeMismatch, + "List expected, but the source RLP is not a list.") + + let sz = rlp.listLen() + # We already know that we are working with a list + doAssert rlp.enterList() + for i in 0 ..< sz: + rlp.tryEnterList() + + var k: KeyKind + try: + k = rlp.read(KeyKind) + except RlpTypeMismatch: + # skip unknown keys and their value + rlp.skipElem() + rlp.skipElem() + continue + + case k + of powRequirementKey: + let pow = rlp.read(uint64) + result.powRequirement = some(cast[float64](pow)) + of bloomFilterKey: + let bloom = rlp.read(seq[byte]) + if bloom.len != bloomSize: + raise newException(RlpTypeMismatch, "Bloomfilter size mismatch") + var bloomFilter: Bloom + bloomFilter.bytesCopy(bloom) + result.bloomFilter = some(bloomFilter) + of lightNodeKey: + result.lightNode = some(rlp.read(bool)) + of confirmationsEnabledKey: + result.confirmationsEnabled = some(rlp.read(bool)) + of rateLimitsKey: + result.rateLimits = some(rlp.read(RateLimits)) + of topicInterestKey: + result.topicInterest = some(rlp.read(seq[whisper_types.Topic])) + +proc allowed*(msg: Message, config: WakuConfig): bool = + # Check max msg size, already happens in RLPx but there is a specific waku + # max msg size which should always be < RLPx max msg size + if msg.size > config.maxMsgSize: + envelopes_dropped.inc(labelValues = ["too_large"]) + warn "Message size too large", size = msg.size + return false + + if msg.pow < config.powRequirement: + envelopes_dropped.inc(labelValues = ["low_pow"]) + warn "Message PoW too low", pow = msg.pow, minPow = config.powRequirement + return false + + if config.topics.isSome(): + if msg.env.topic notin config.topics.get(): + envelopes_dropped.inc(labelValues = ["topic_mismatch"]) + warn "Message topic does not match Waku topic list" + return false + else: + if config.bloom.isSome() and not bloomFilterMatch(config.bloom.get(), msg.bloom): + envelopes_dropped.inc(labelValues = ["bloom_filter_mismatch"]) + warn "Message does not match node bloom filter" + return false + + return true + +proc run(peer: Peer) {.gcsafe, async, raises: [Defect].} +proc run(node: EthereumNode, network: WakuNetwork) + {.gcsafe, async, raises: [Defect].} + +proc initProtocolState*(network: WakuNetwork, node: EthereumNode) {.gcsafe.} = + new(network.queue) + network.queue[] = initQueue(defaultQueueCapacity) + network.filters = initTable[string, Filter]() + network.config.bloom = some(fullBloom()) + network.config.powRequirement = defaultMinPow + network.config.isLightNode = false + # RateLimits and confirmations are not yet implemented so we set confirmations + # to false and we don't pass RateLimits at all. + network.config.confirmationsEnabled = false + network.config.rateLimits = none(RateLimits) + network.config.maxMsgSize = defaultMaxMsgSize + network.config.topics = none(seq[whisper_types.Topic]) + asyncSpawn node.run(network) + +p2pProtocol Waku(version = wakuVersion, + rlpxName = "waku", + peerState = WakuPeer, + networkState = WakuNetwork): + + onPeerConnected do (peer: Peer): + trace "onPeerConnected Waku" + let + wakuNet = peer.networkState + wakuPeer = peer.state + + let options = StatusOptions( + powRequirement: some(wakuNet.config.powRequirement), + bloomFilter: wakuNet.config.bloom, + lightNode: some(wakuNet.config.isLightNode), + confirmationsEnabled: some(wakuNet.config.confirmationsEnabled), + rateLimits: wakuNet.config.rateLimits, + topicInterest: wakuNet.config.topics) + + let m = await peer.status(options, + timeout = chronos.milliseconds(5000)) + + wakuPeer.powRequirement = m.options.powRequirement.get(defaultMinPow) + wakuPeer.bloom = m.options.bloomFilter.get(fullBloom()) + + wakuPeer.isLightNode = m.options.lightNode.get(false) + if wakuPeer.isLightNode and wakuNet.config.isLightNode: + # No sense in connecting two light nodes so we disconnect + raise newException(UselessPeerError, "Two light nodes connected") + + wakuPeer.topics = m.options.topicInterest + if wakuPeer.topics.isSome(): + if wakuPeer.topics.get().len > topicInterestMax: + raise newException(UselessPeerError, "Topic-interest is too large") + if wakuNet.config.topics.isSome(): + raise newException(UselessPeerError, + "Two Waku nodes with topic-interest connected") + + wakuPeer.received.init() + wakuPeer.trusted = false + wakuPeer.accounting = Accounting(sent: 0, received: 0) + wakuPeer.initialized = true + + # No timer based queue processing for a light node. + if not wakuNet.config.isLightNode: + asyncSpawn peer.run() + + debug "Waku peer initialized", peer + + handshake: + proc status(peer: Peer, options: StatusOptions) + + proc messages(peer: Peer, envelopes: openarray[Envelope]) = + if not peer.state.initialized: + warn "Handshake not completed yet, discarding messages" + return + + for envelope in envelopes: + # check if expired or in future, or ttl not 0 + if not envelope.valid(): + warn "Expired or future timed envelope", peer + # disconnect from peers sending bad envelopes + # await peer.disconnect(SubprotocolReason) + continue + + peer.state.accounting.received += 1 + + let msg = initMessage(envelope) + if not msg.allowed(peer.networkState.config): + # disconnect from peers sending bad envelopes + # await peer.disconnect(SubprotocolReason) + continue + + # This peer send this message thus should not receive it again. + # If this peer has the message in the `received` set already, this means + # it was either already received here from this peer or send to this peer. + # Either way it will be in our queue already (and the peer should know + # this) and this peer is sending duplicates. + # Note: geth does not check if a peer has send a message to them before + # broadcasting this message. This too is seen here as a duplicate message + # (see above comment). If we want to seperate these cases (e.g. when peer + # rating), then we have to add a "peer.state.send" HashSet. + # Note: it could also be a race between the arrival of a message send by + # this node to a peer and that same message arriving from that peer (after + # it was received from another peer) here. + if peer.state.received.containsOrIncl(msg.hash): + envelopes_dropped.inc(labelValues = ["duplicate"]) + trace "Peer sending duplicate messages", peer, hash = $msg.hash + # await peer.disconnect(SubprotocolReason) + continue + + # This can still be a duplicate message, but from another peer than + # the peer who send the message. + if peer.networkState.queue[].add(msg): + # notify filters of this message + peer.networkState.filters.notify(msg) + # trigger handler on received envelope, if registered + if not peer.networkState.envReceivedHandler.isNil(): + peer.networkState.envReceivedHandler(envelope) + + nextID 22 + + proc statusOptions(peer: Peer, options: StatusOptions) = + if not peer.state.initialized: + warn "Handshake not completed yet, discarding statusOptions" + return + + if options.topicInterest.isSome(): + peer.state.topics = options.topicInterest + elif options.bloomFilter.isSome(): + peer.state.bloom = options.bloomFilter.get() + peer.state.topics = none(seq[whisper_types.Topic]) + + if options.powRequirement.isSome(): + peer.state.powRequirement = options.powRequirement.get() + + if options.lightNode.isSome(): + peer.state.isLightNode = options.lightNode.get() + + nextID 126 + + proc p2pRequest(peer: Peer, envelope: Envelope) = + if not peer.networkState.p2pRequestHandler.isNil(): + peer.networkState.p2pRequestHandler(peer, envelope) + + proc p2pMessage(peer: Peer, envelopes: openarray[Envelope]) = + if peer.state.trusted: + # when trusted we can bypass any checks on envelope + for envelope in envelopes: + let msg = Message(env: envelope, isP2P: true) + peer.networkState.filters.notify(msg) + + # Following message IDs are not part of EIP-627, but are added and used by + # the Status application, we ignore them for now. + nextID 11 + proc batchAcknowledged(peer: Peer) = discard + proc messageResponse(peer: Peer) = discard + + nextID 123 + requestResponse: + proc p2pSyncRequest(peer: Peer) = discard + proc p2pSyncResponse(peer: Peer) = discard + + + proc p2pRequestComplete(peer: Peer, requestId: Hash, lastEnvelopeHash: Hash, + cursor: seq[byte]) = discard + # TODO: + # In the current specification the parameters are not wrapped in a regular + # envelope as is done for the P2P Request packet. If we could alter this in + # the spec it would be a cleaner separation between Waku and Mail server / + # client. + # Also, if a requestResponse block is used, a reqestId will automatically + # be added by the protocol DSL. + # However the requestResponse block in combination with p2pRequest cannot be + # used due to the unfortunate fact that the packet IDs are not consecutive, + # and nextID is not recognized in between these. The nextID behaviour could + # be fixed, however it would be cleaner if the specification could be + # changed to have these IDs to be consecutive. + +# 'Runner' calls --------------------------------------------------------------- + +proc processQueue(peer: Peer) {.raises: [Defect].} = + # Send to peer all valid and previously not send envelopes in the queue. + var + envelopes: seq[Envelope] = @[] + wakuPeer = peer.state(Waku) + wakuNet = peer.networkState(Waku) + + for message in wakuNet.queue.items: + if wakuPeer.received.contains(message.hash): + # trace "message was already send to peer", hash = $message.hash, peer + continue + + if message.pow < wakuPeer.powRequirement: + trace "Message PoW too low for peer", pow = message.pow, + powReq = wakuPeer.powRequirement + continue + + if wakuPeer.topics.isSome(): + if message.env.topic notin wakuPeer.topics.get(): + trace "Message does not match topics list" + continue + else: + if not bloomFilterMatch(wakuPeer.bloom, message.bloom): + trace "Message does not match peer bloom filter" + continue + + trace "Adding envelope" + envelopes.add(message.env) + wakuPeer.accounting.sent += 1 + wakuPeer.received.incl(message.hash) + + if envelopes.len() > 0: + trace "Sending envelopes", amount=envelopes.len + # Ignore failure of sending messages, this could occur when the connection + # gets dropped + traceAsyncErrors peer.messages(envelopes) + +proc run(peer: Peer) {.async, raises: [Defect].} = + while peer.connectionState notin {Disconnecting, Disconnected}: + peer.processQueue() + await sleepAsync(messageInterval) + +proc pruneReceived(node: EthereumNode) = + if node.peerPool != nil: # XXX: a bit dirty to need to check for this here ... + var wakuNet = node.protocolState(Waku) + + for peer in node.protocolPeers(Waku): + if not peer.initialized: + continue + + # NOTE: Perhaps alter the queue prune call to keep track of a HashSet + # of pruned messages (as these should be smaller), and diff this with + # the received sets. + peer.received = intersection(peer.received, wakuNet.queue.itemHashes) + +proc run(node: EthereumNode, network: WakuNetwork) {.async, raises: [Defect].} = + while true: + # prune message queue every second + # TTL unit is in seconds, so this should be sufficient? + network.queue[].prune() + # pruning the received sets is not necessary for correct workings + # but simply from keeping the sets growing indefinitely + node.pruneReceived() + await sleepAsync(pruneInterval) + +# Private EthereumNode calls --------------------------------------------------- + +proc sendP2PMessage(node: EthereumNode, peerId: NodeId, + envelopes: openarray[Envelope]): bool = + for peer in node.peers(Waku): + if peer.remote.id == peerId: + let f = peer.p2pMessage(envelopes) + # Can't make p2pMessage not raise so this is the "best" option I can think + # of instead of using asyncSpawn and still keeping the call not async. + f.callback = proc(data: pointer) {.gcsafe, raises: [Defect].} = + if f.failed: + warn "P2PMessage send failed", msg = f.readError.msg + + return true + +proc queueMessage(node: EthereumNode, msg: Message): bool = + + var wakuNet = node.protocolState(Waku) + # We have to do the same checks here as in the messages proc not to leak + # any information that the message originates from this node. + if not msg.allowed(wakuNet.config): + return false + + trace "Adding message to queue", hash = $msg.hash + if wakuNet.queue[].add(msg): + # Also notify our own filters of the message we are sending, + # e.g. msg from local Dapp to Dapp + wakuNet.filters.notify(msg) + + return true + +# Public EthereumNode calls ---------------------------------------------------- + +proc postEncoded*(node: EthereumNode, ttl: uint32, + topic: whisper_types.Topic, encodedPayload: seq[byte], + powTime = 1'f, + powTarget = defaultMinPow, + targetPeer = none[NodeId]()): bool = + ## Post a message from pre-encoded payload on the message queue. + ## This will be processed at the next `messageInterval`. + ## The encodedPayload must be encoded according to RFC 26/WAKU-PAYLOAD + ## at https://rfc.vac.dev/spec/26/ + + var env = Envelope(expiry:epochTime().uint32 + ttl, + ttl: ttl, topic: topic, data: encodedPayload, nonce: 0) + + # Allow lightnode to post only direct p2p messages + if targetPeer.isSome(): + return node.sendP2PMessage(targetPeer.get(), [env]) + else: + # non direct p2p message can not have ttl of 0 + if env.ttl == 0: + return false + var msg = initMessage(env, powCalc = false) + # XXX: make this non blocking or not? + # In its current blocking state, it could be noticed by a peer that no + # messages are send for a while, and thus that mining PoW is done, and + # that next messages contains a message originated from this peer + # zah: It would be hard to execute this in a background thread at the + # moment. We'll need a way to send custom "tasks" to the async message + # loop (e.g. AD2 support for AsyncChannels). + if not msg.sealEnvelope(powTime, powTarget): + return false + + # need to check expiry after mining PoW + if not msg.env.valid(): + return false + + result = node.queueMessage(msg) + + # Allows light nodes to post via untrusted messages packet. + # Queue gets processed immediatly as the node sends only its own messages, + # so the privacy ship has already sailed anyhow. + # TODO: + # - Could be still a concern in terms of efficiency, if multiple messages + # need to be send. + # - For Waku Mode, the checks in processQueue are rather useless as the + # idea is to connect only to 1 node? Also refactor in that case. + if node.protocolState(Waku).config.isLightNode: + for peer in node.peers(Waku): + peer.processQueue() + +proc postMessage*(node: EthereumNode, pubKey = none[PublicKey](), + symKey = none[SymKey](), src = none[PrivateKey](), + ttl: uint32, topic: whisper_types.Topic, payload: seq[byte], + padding = none[seq[byte]](), powTime = 1'f, + powTarget = defaultMinPow, + targetPeer = none[NodeId]()): bool = + ## Post a message on the message queue which will be processed at the + ## next `messageInterval`. + ## + ## NOTE: This call allows a post without encryption. If encryption is + ## mandatory it should be enforced a layer up + let payload = encode(node.rng[], Payload( + payload: payload, src: src, dst: pubKey, symKey: symKey, padding: padding)) + if payload.isSome(): + return node.postEncoded(ttl, topic, payload.get(), powTime, powTarget, targetPeer) + else: + error "Encoding of payload failed" + return false + +proc subscribeFilter*(node: EthereumNode, filter: Filter, + handler:FilterMsgHandler = nil): string = + ## Initiate a filter for incoming/outgoing messages. Messages can be + ## retrieved with the `getFilterMessages` call or with a provided + ## `FilterMsgHandler`. + ## + ## NOTE: This call allows for a filter without decryption. If encryption is + ## mandatory it should be enforced a layer up. + return subscribeFilter( + node.rng[], node.protocolState(Waku).filters, filter, handler) + +proc unsubscribeFilter*(node: EthereumNode, filterId: string): bool = + ## Remove a previously subscribed filter. + var filter: Filter + return node.protocolState(Waku).filters.take(filterId, filter) + +proc getFilterMessages*(node: EthereumNode, filterId: string): + seq[ReceivedMessage] {.raises: [KeyError, Defect].} = + ## Get all the messages currently in the filter queue. This will reset the + ## filter message queue. + return node.protocolState(Waku).filters.getFilterMessages(filterId) + +proc filtersToBloom*(node: EthereumNode): Bloom = + ## Returns the bloom filter of all topics of all subscribed filters. + return node.protocolState(Waku).filters.toBloom() + +proc setPowRequirement*(node: EthereumNode, powReq: float64) {.async.} = + ## Sets the PoW requirement for this node, will also send + ## this new PoW requirement to all connected peers. + ## + ## Failures when sending messages to peers will not be reported. + # NOTE: do we need a tolerance of old PoW for some time? + node.protocolState(Waku).config.powRequirement = powReq + var futures: seq[Future[void]] = @[] + let list = StatusOptions(powRequirement: some(powReq)) + for peer in node.peers(Waku): + futures.add(peer.statusOptions(list)) + + # Exceptions from sendMsg will not be raised + await allFutures(futures) + +proc setBloomFilter*(node: EthereumNode, bloom: Bloom) {.async.} = + ## Sets the bloom filter for this node, will also send + ## this new bloom filter to all connected peers. + ## + ## Failures when sending messages to peers will not be reported. + # NOTE: do we need a tolerance of old bloom filter for some time? + node.protocolState(Waku).config.bloom = some(bloom) + # reset topics + node.protocolState(Waku).config.topics = none(seq[whisper_types.Topic]) + + var futures: seq[Future[void]] = @[] + let list = StatusOptions(bloomFilter: some(bloom)) + for peer in node.peers(Waku): + futures.add(peer.statusOptions(list)) + + # Exceptions from sendMsg will not be raised + await allFutures(futures) + +proc setTopicInterest*(node: EthereumNode, topics: seq[whisper_types.Topic]): + Future[bool] {.async.} = + if topics.len > topicInterestMax: + return false + + node.protocolState(Waku).config.topics = some(topics) + + var futures: seq[Future[void]] = @[] + let list = StatusOptions(topicInterest: some(topics)) + for peer in node.peers(Waku): + futures.add(peer.statusOptions(list)) + + # Exceptions from sendMsg will not be raised + await allFutures(futures) + + return true + +proc setMaxMessageSize*(node: EthereumNode, size: uint32): bool = + ## Set the maximum allowed message size. + ## Can not be set higher than ``defaultMaxMsgSize``. + if size > defaultMaxMsgSize: + warn "size > defaultMaxMsgSize" + return false + node.protocolState(Waku).config.maxMsgSize = size + return true + +proc setPeerTrusted*(node: EthereumNode, peerId: NodeId): bool = + ## Set a connected peer as trusted. + for peer in node.peers(Waku): + if peer.remote.id == peerId: + peer.state(Waku).trusted = true + return true + +proc setLightNode*(node: EthereumNode, isLightNode: bool) {.async.} = + ## Set this node as a Waku light node. + node.protocolState(Waku).config.isLightNode = isLightNode +# TODO: Add starting/stopping of `processQueue` loop depending on value of isLightNode. + var futures: seq[Future[void]] = @[] + let list = StatusOptions(lightNode: some(isLightNode)) + for peer in node.peers(Waku): + futures.add(peer.statusOptions(list)) + + # Exceptions from sendMsg will not be raised + await allFutures(futures) + +proc configureWaku*(node: EthereumNode, config: WakuConfig) = + ## Apply a Waku configuration. + ## + ## NOTE: Should be run before connection is made with peers as some + ## of the settings are only communicated at peer handshake. + node.protocolState(Waku).config = config + +proc registerP2PRequestHandler*(node: EthereumNode, + customHandler: P2PRequestHandler) = + node.protocolState(Waku).p2pRequestHandler = customHandler + +proc registerEnvReceivedHandler*(node: EthereumNode, + customHandler: EnvReceivedHandler) = + node.protocolState(Waku).envReceivedHandler = customHandler + +proc resetMessageQueue*(node: EthereumNode) = + ## Full reset of the message queue. + ## + ## NOTE: Not something that should be run in normal circumstances. + node.protocolState(Waku).queue[] = initQueue(defaultQueueCapacity) diff --git a/waku/whisper/whisper_protocol.nim b/waku/whisper/whisper_protocol.nim new file mode 100644 index 0000000..8e6c311 --- /dev/null +++ b/waku/whisper/whisper_protocol.nim @@ -0,0 +1,481 @@ +# nim-eth - Whisper +# Copyright (c) 2018-2021 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +## Whisper +## ******* +## +## Whisper is a gossip protocol that synchronizes a set of messages across nodes +## with attention given to sender and recipient anonymitiy. Messages are +## categorized by a topic and stay alive in the network based on a time-to-live +## measured in seconds. Spam prevention is based on proof-of-work, where large +## or long-lived messages must spend more work. +## +## Example usage +## ---------- +## First an `EthereumNode` needs to be created, either with all capabilities set +## or with specifically the Whisper capability set. +## The latter can be done like this: +## +## .. code-block::nim +## var node = newEthereumNode(keypair, address, netId, nil, +## addAllCapabilities = false) +## node.addCapability Whisper +## +## Now calls such as ``postMessage`` and ``subscribeFilter`` can be done. +## However, they only make real sense after ``connectToNetwork`` was started. As +## else there will be no peers to send and receive messages from. + +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import + std/[options, tables, times], + chronos, chronicles, metrics, + eth/[keys, async_utils, p2p], + ./whisper_types + +export + whisper_types + +logScope: + topics = "whisper" + +const + defaultQueueCapacity = 2048 + whisperVersion* = 6 ## Whisper version. + whisperVersionStr* = $whisperVersion ## Whisper version. + defaultMinPow* = 0.2'f64 ## The default minimum PoW requirement for this node. + defaultMaxMsgSize* = 1024'u32 * 1024'u32 ## The current default and max + ## message size. This can never be larger than the maximum RLPx message size. + messageInterval* = chronos.milliseconds(300) ## Interval at which messages are + ## send to peers, in ms. + pruneInterval* = chronos.milliseconds(1000) ## Interval at which message + ## queue is pruned, in ms. + +type + WhisperConfig* = object + powRequirement*: float64 + bloom*: Bloom + isLightNode*: bool + maxMsgSize*: uint32 + + WhisperPeer = ref object + initialized: bool # when successfully completed the handshake + powRequirement*: float64 + bloom*: Bloom + isLightNode*: bool + trusted*: bool + received: HashSet[Hash] + + WhisperNetwork = ref object + queue*: ref Queue + filters*: Filters + config*: WhisperConfig + +proc allowed*(msg: Message, config: WhisperConfig): bool = + # Check max msg size, already happens in RLPx but there is a specific shh + # max msg size which should always be < RLPx max msg size + if msg.size > config.maxMsgSize: + envelopes_dropped.inc(labelValues = ["too_large"]) + warn "Message size too large", size = msg.size + return false + + if msg.pow < config.powRequirement: + envelopes_dropped.inc(labelValues = ["low_pow"]) + warn "Message PoW too low", pow = msg.pow, minPow = config.powRequirement + return false + + if not bloomFilterMatch(config.bloom, msg.bloom): + envelopes_dropped.inc(labelValues = ["bloom_filter_mismatch"]) + warn "Message does not match node bloom filter" + return false + + return true + +proc run(peer: Peer) {.gcsafe, async, raises: [Defect].} +proc run(node: EthereumNode, network: WhisperNetwork) + {.gcsafe, async, raises: [Defect].} + +proc initProtocolState*(network: WhisperNetwork, node: EthereumNode) {.gcsafe.} = + new(network.queue) + network.queue[] = initQueue(defaultQueueCapacity) + network.filters = initTable[string, Filter]() + network.config.bloom = fullBloom() + network.config.powRequirement = defaultMinPow + network.config.isLightNode = false + network.config.maxMsgSize = defaultMaxMsgSize + asyncSpawn node.run(network) + +p2pProtocol Whisper(version = whisperVersion, + rlpxName = "shh", + peerState = WhisperPeer, + networkState = WhisperNetwork): + + onPeerConnected do (peer: Peer): + trace "onPeerConnected Whisper" + let + whisperNet = peer.networkState + whisperPeer = peer.state + + let m = await peer.status(whisperVersion, + cast[uint64](whisperNet.config.powRequirement), + @(whisperNet.config.bloom), + whisperNet.config.isLightNode, + timeout = chronos.milliseconds(5000)) + + if m.protocolVersion == whisperVersion: + debug "Whisper peer", peer, whisperVersion + else: + raise newException(UselessPeerError, "Incompatible Whisper version") + + whisperPeer.powRequirement = cast[float64](m.powConverted) + + if m.bloom.len > 0: + if m.bloom.len != bloomSize: + raise newException(UselessPeerError, "Bloomfilter size mismatch") + else: + whisperPeer.bloom.bytesCopy(m.bloom) + else: + # If no bloom filter is send we allow all + whisperPeer.bloom = fullBloom() + + whisperPeer.isLightNode = m.isLightNode + if whisperPeer.isLightNode and whisperNet.config.isLightNode: + # No sense in connecting two light nodes so we disconnect + raise newException(UselessPeerError, "Two light nodes connected") + + whisperPeer.received.init() + whisperPeer.trusted = false + whisperPeer.initialized = true + + if not whisperNet.config.isLightNode: + asyncSpawn peer.run() + + debug "Whisper peer initialized", peer + + handshake: + proc status(peer: Peer, + protocolVersion: uint, + powConverted: uint64, + bloom: seq[byte], + isLightNode: bool) + + proc messages(peer: Peer, envelopes: openarray[Envelope]) = + if not peer.state.initialized: + warn "Handshake not completed yet, discarding messages" + return + + for envelope in envelopes: + # check if expired or in future, or ttl not 0 + if not envelope.valid(): + warn "Expired or future timed envelope", peer + # disconnect from peers sending bad envelopes + # await peer.disconnect(SubprotocolReason) + continue + + let msg = initMessage(envelope) + if not msg.allowed(peer.networkState.config): + # disconnect from peers sending bad envelopes + # await peer.disconnect(SubprotocolReason) + continue + + # This peer send this message thus should not receive it again. + # If this peer has the message in the `received` set already, this means + # it was either already received here from this peer or send to this peer. + # Either way it will be in our queue already (and the peer should know + # this) and this peer is sending duplicates. + # Note: geth does not check if a peer has send a message to them before + # broadcasting this message. This too is seen here as a duplicate message + # (see above comment). If we want to seperate these cases (e.g. when peer + # rating), then we have to add a "peer.state.send" HashSet. + # Note: it could also be a race between the arrival of a message send by + # this node to a peer and that same message arriving from that peer (after + # it was received from another peer) here. + if peer.state.received.containsOrIncl(msg.hash): + envelopes_dropped.inc(labelValues = ["duplicate"]) + trace "Peer sending duplicate messages", peer, hash = $msg.hash + # await peer.disconnect(SubprotocolReason) + continue + + # This can still be a duplicate message, but from another peer than + # the peer who send the message. + if peer.networkState.queue[].add(msg): + # notify filters of this message + peer.networkState.filters.notify(msg) + + proc powRequirement(peer: Peer, value: uint64) = + if not peer.state.initialized: + warn "Handshake not completed yet, discarding powRequirement" + return + + peer.state.powRequirement = cast[float64](value) + + proc bloomFilterExchange(peer: Peer, bloom: openArray[byte]) = + if not peer.state.initialized: + warn "Handshake not completed yet, discarding bloomFilterExchange" + return + + if bloom.len == bloomSize: + peer.state.bloom.bytesCopy(bloom) + + nextID 126 + + proc p2pRequest(peer: Peer, envelope: Envelope) = + # TODO: here we would have to allow to insert some specific implementation + # such as e.g. Whisper Mail Server + discard + + proc p2pMessage(peer: Peer, envelope: Envelope) = + if peer.state.trusted: + # when trusted we can bypass any checks on envelope + let msg = Message(env: envelope, isP2P: true) + peer.networkState.filters.notify(msg) + + # Following message IDs are not part of EIP-627, but are added and used by + # the Status application, we ignore them for now. + nextID 11 + proc batchAcknowledged(peer: Peer) = discard + proc messageResponse(peer: Peer) = discard + + nextID 123 + requestResponse: + proc p2pSyncRequest(peer: Peer) = discard + proc p2pSyncResponse(peer: Peer) = discard + + proc p2pRequestComplete(peer: Peer) = discard + +# 'Runner' calls --------------------------------------------------------------- + +proc processQueue(peer: Peer) = + # Send to peer all valid and previously not send envelopes in the queue. + var + envelopes: seq[Envelope] = @[] + whisperPeer = peer.state(Whisper) + whisperNet = peer.networkState(Whisper) + + for message in whisperNet.queue.items: + if whisperPeer.received.contains(message.hash): + # trace "message was already send to peer", hash = $message.hash, peer + continue + + if message.pow < whisperPeer.powRequirement: + trace "Message PoW too low for peer", pow = message.pow, + powReq = whisperPeer.powRequirement + continue + + if not bloomFilterMatch(whisperPeer.bloom, message.bloom): + trace "Message does not match peer bloom filter" + continue + + trace "Adding envelope" + envelopes.add(message.env) + whisperPeer.received.incl(message.hash) + + if envelopes.len() > 0: + trace "Sending envelopes", amount=envelopes.len + # Ignore failure of sending messages, this could occur when the connection + # gets dropped + traceAsyncErrors peer.messages(envelopes) + +proc run(peer: Peer) {.async.} = + while peer.connectionState notin {Disconnecting, Disconnected}: + peer.processQueue() + await sleepAsync(messageInterval) + +proc pruneReceived(node: EthereumNode) = + if node.peerPool != nil: # XXX: a bit dirty to need to check for this here ... + var whisperNet = node.protocolState(Whisper) + + for peer in node.protocolPeers(Whisper): + if not peer.initialized: + continue + + # NOTE: Perhaps alter the queue prune call to keep track of a HashSet + # of pruned messages (as these should be smaller), and diff this with + # the received sets. + peer.received = intersection(peer.received, whisperNet.queue.itemHashes) + +proc run(node: EthereumNode, network: WhisperNetwork) {.async.} = + while true: + # prune message queue every second + # TTL unit is in seconds, so this should be sufficient? + network.queue[].prune() + # pruning the received sets is not necessary for correct workings + # but simply from keeping the sets growing indefinitely + node.pruneReceived() + await sleepAsync(pruneInterval) + +# Private EthereumNode calls --------------------------------------------------- + +proc sendP2PMessage(node: EthereumNode, peerId: NodeId, env: Envelope): bool = + for peer in node.peers(Whisper): + if peer.remote.id == peerId: + let f = peer.p2pMessage(env) + # Can't make p2pMessage not raise so this is the "best" option I can think + # of instead of using asyncSpawn and still keeping the call not async. + f.callback = proc(data: pointer) {.gcsafe, raises: [Defect].} = + if f.failed: + warn "P2PMessage send failed", msg = f.readError.msg + + return true + +proc queueMessage(node: EthereumNode, msg: Message): bool = + + var whisperNet = node.protocolState(Whisper) + # We have to do the same checks here as in the messages proc not to leak + # any information that the message originates from this node. + if not msg.allowed(whisperNet.config): + return false + + trace "Adding message to queue", hash = $msg.hash + if whisperNet.queue[].add(msg): + # Also notify our own filters of the message we are sending, + # e.g. msg from local Dapp to Dapp + whisperNet.filters.notify(msg) + + return true + +# Public EthereumNode calls ---------------------------------------------------- + +proc postMessage*(node: EthereumNode, pubKey = none[PublicKey](), + symKey = none[SymKey](), src = none[PrivateKey](), + ttl: uint32, topic: whisper_types.Topic, payload: seq[byte], + padding = none[seq[byte]](), powTime = 1'f, + powTarget = defaultMinPow, + targetPeer = none[NodeId]()): bool = + ## Post a message on the message queue which will be processed at the + ## next `messageInterval`. + ## + ## NOTE: This call allows a post without encryption. If encryption is + ## mandatory it should be enforced a layer up + let payload = encode(node.rng[], Payload( + payload: payload, src: src, dst: pubKey, symKey: symKey, padding: padding)) + if payload.isSome(): + var env = Envelope(expiry:epochTime().uint32 + ttl, + ttl: ttl, topic: topic, data: payload.get(), nonce: 0) + + # Allow lightnode to post only direct p2p messages + if targetPeer.isSome(): + return node.sendP2PMessage(targetPeer.get(), env) + elif not node.protocolState(Whisper).config.isLightNode: + # non direct p2p message can not have ttl of 0 + if env.ttl == 0: + return false + var msg = initMessage(env, powCalc = false) + # XXX: make this non blocking or not? + # In its current blocking state, it could be noticed by a peer that no + # messages are send for a while, and thus that mining PoW is done, and + # that next messages contains a message originated from this peer + # zah: It would be hard to execute this in a background thread at the + # moment. We'll need a way to send custom "tasks" to the async message + # loop (e.g. AD2 support for AsyncChannels). + if not msg.sealEnvelope(powTime, powTarget): + return false + + # need to check expiry after mining PoW + if not msg.env.valid(): + return false + + return node.queueMessage(msg) + else: + warn "Light node not allowed to post messages" + return false + else: + error "Encoding of payload failed" + return false + +proc subscribeFilter*(node: EthereumNode, filter: Filter, + handler:FilterMsgHandler = nil): string = + ## Initiate a filter for incoming/outgoing messages. Messages can be + ## retrieved with the `getFilterMessages` call or with a provided + ## `FilterMsgHandler`. + ## + ## NOTE: This call allows for a filter without decryption. If encryption is + ## mandatory it should be enforced a layer up. + return subscribeFilter( + node.rng[], node.protocolState(Whisper).filters, filter, handler) + +proc unsubscribeFilter*(node: EthereumNode, filterId: string): bool = + ## Remove a previously subscribed filter. + var filter: Filter + return node.protocolState(Whisper).filters.take(filterId, filter) + +proc getFilterMessages*(node: EthereumNode, filterId: string): + seq[ReceivedMessage] {.raises: [KeyError, Defect].} = + ## Get all the messages currently in the filter queue. This will reset the + ## filter message queue. + return node.protocolState(Whisper).filters.getFilterMessages(filterId) + +proc filtersToBloom*(node: EthereumNode): Bloom = + ## Returns the bloom filter of all topics of all subscribed filters. + return node.protocolState(Whisper).filters.toBloom() + +proc setPowRequirement*(node: EthereumNode, powReq: float64) {.async.} = + ## Sets the PoW requirement for this node, will also send + ## this new PoW requirement to all connected peers. + ## + ## Failures when sending messages to peers will not be reported. + # NOTE: do we need a tolerance of old PoW for some time? + node.protocolState(Whisper).config.powRequirement = powReq + var futures: seq[Future[void]] = @[] + for peer in node.peers(Whisper): + futures.add(peer.powRequirement(cast[uint64](powReq))) + + # Exceptions from sendMsg will not be raised + await allFutures(futures) + +proc setBloomFilter*(node: EthereumNode, bloom: Bloom) {.async.} = + ## Sets the bloom filter for this node, will also send + ## this new bloom filter to all connected peers. + ## + ## Failures when sending messages to peers will not be reported. + # NOTE: do we need a tolerance of old bloom filter for some time? + node.protocolState(Whisper).config.bloom = bloom + var futures: seq[Future[void]] = @[] + for peer in node.peers(Whisper): + futures.add(peer.bloomFilterExchange(@bloom)) + + # Exceptions from sendMsg will not be raised + await allFutures(futures) + +proc setMaxMessageSize*(node: EthereumNode, size: uint32): bool = + ## Set the maximum allowed message size. + ## Can not be set higher than ``defaultMaxMsgSize``. + if size > defaultMaxMsgSize: + warn "size > defaultMaxMsgSize" + return false + node.protocolState(Whisper).config.maxMsgSize = size + return true + +proc setPeerTrusted*(node: EthereumNode, peerId: NodeId): bool = + ## Set a connected peer as trusted. + for peer in node.peers(Whisper): + if peer.remote.id == peerId: + peer.state(Whisper).trusted = true + return true + +proc setLightNode*(node: EthereumNode, isLightNode: bool) = + ## Set this node as a Whisper light node. + ## + ## NOTE: Should be run before connection is made with peers as this + ## setting is only communicated at peer handshake. + node.protocolState(Whisper).config.isLightNode = isLightNode + +proc configureWhisper*(node: EthereumNode, config: WhisperConfig) = + ## Apply a Whisper configuration. + ## + ## NOTE: Should be run before connection is made with peers as some + ## of the settings are only communicated at peer handshake. + node.protocolState(Whisper).config = config + +proc resetMessageQueue*(node: EthereumNode) = + ## Full reset of the message queue. + ## + ## NOTE: Not something that should be run in normal circumstances. + node.protocolState(Whisper).queue[] = initQueue(defaultQueueCapacity) diff --git a/waku/whisper/whisper_types.nim b/waku/whisper/whisper_types.nim new file mode 100644 index 0000000..b5e6d10 --- /dev/null +++ b/waku/whisper/whisper_types.nim @@ -0,0 +1,674 @@ +# nim-eth - Whisper +# Copyright (c) 2018-2021 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import + std/[algorithm, bitops, math, options, tables, times, hashes], + chronicles, stew/[byteutils, endians2], metrics, bearssl/rand, + nimcrypto/[bcmode, hash, keccak, rijndael], + eth/[keys, rlp, p2p], eth/p2p/ecies + +when chronicles.enabledLogLevel == LogLevel.TRACE: + import std/strutils + +logScope: + topics = "whisper_types" + +declarePublicCounter envelopes_valid, + "Received & posted valid envelopes" +declarePublicCounter envelopes_dropped, + "Dropped envelopes", labels = ["reason"] + +const + flagsLen = 1 ## payload flags field length, bytes + gcmIVLen = 12 ## Length of IV (seed) used for AES + gcmTagLen = 16 ## Length of tag used to authenticate AES-GCM-encrypted message + padMaxLen = 256 ## payload will be padded to multiples of this by default + signatureBits = 0b100'u8 ## payload flags signature mask + bloomSize* = 512 div 8 + defaultFilterQueueCapacity = 64 + +type + Hash* = MDigest[256] + SymKey* = array[256 div 8, byte] ## AES256 key. + Topic* = array[4, byte] ## 4 bytes that can be used to filter messages on. + Bloom* = array[bloomSize, byte] ## A bloom filter that can be used to identify + ## a number of topics that a peer is interested in. + # XXX: nim-eth-bloom has really quirky API and fixed + # bloom size. + # stint is massive overkill / poor fit - a bloom filter is an array of bits, + # not a number + + Payload* = object + ## Payload is what goes in the data field of the Envelope. + + src*: Option[PrivateKey] ## Optional key used for signing message + dst*: Option[PublicKey] ## Optional key used for asymmetric encryption + symKey*: Option[SymKey] ## Optional key used for symmetric encryption + payload*: seq[byte] ## Application data / message contents + padding*: Option[seq[byte]] ## Padding - if unset, will automatically pad up to + ## nearest maxPadLen-byte boundary + DecodedPayload* = object + ## The decoded payload of a received message. + + src*: Option[PublicKey] ## If the message was signed, this is the public key + ## of the source + payload*: seq[byte] ## Application data / message contents + padding*: Option[seq[byte]] ## Message padding + + Envelope* = object + ## What goes on the wire in the whisper protocol - a payload and some + ## book-keeping + # Don't touch field order, there's lots of macro magic that depends on it + expiry*: uint32 ## Unix timestamp when message expires + ttl*: uint32 ## Time-to-live, seconds - message was created at (expiry - ttl) + topic*: Topic + data*: seq[byte] ## Payload, as given by user + nonce*: uint64 ## Nonce used for proof-of-work calculation + + Message* = object + ## An Envelope with a few cached properties + + env*: Envelope + hash*: Hash ## Hash, as calculated for proof-of-work + size*: uint32 ## RLP-encoded size of message + pow*: float64 ## Calculated proof-of-work + bloom*: Bloom ## Filter sent to direct peers for topic-based filtering + isP2P*: bool + + ReceivedMessage* = object + ## A received message that matched a filter and was possible to decrypt. + ## Contains the decoded payload and additional information. + decoded*: DecodedPayload + timestamp*: uint32 + ttl*: uint32 + topic*: Topic + pow*: float64 + hash*: Hash + dst*: Option[PublicKey] + + Queue* = object + ## Bounded message repository + ## + ## Whisper uses proof-of-work to judge the usefulness of a message staying + ## in the "cloud" - messages with low proof-of-work will be removed to make + ## room for those with higher pow, even if they haven't expired yet. + ## Larger messages and those with high time-to-live will require more pow. + items*: seq[Message] ## Sorted by proof-of-work + itemHashes*: HashSet[Hash] ## For easy duplication checking + # XXX: itemHashes is added for easy message duplication checking and for + # easy pruning of the peer received message sets. It does have an impact on + # adding and pruning of items however. + # Need to give it some more thought and check where most time is lost in + # typical cases, perhaps we are better of with one hash table (lose PoW + # sorting however), or perhaps there is a simpler solution... + + capacity*: int ## Max messages to keep. \ + ## XXX: really big messages can cause excessive mem usage when using msg \ + ## count + + FilterMsgHandler* = proc(msg: ReceivedMessage) {.gcsafe, raises: [Defect].} + + Filter* = object + src*: Option[PublicKey] + privateKey*: Option[PrivateKey] + symKey*: Option[SymKey] + topics*: seq[Topic] + powReq*: float64 + allowP2P*: bool + + bloom: Bloom # Cached bloom filter of all topics of filter + handler: FilterMsgHandler + queue: seq[ReceivedMessage] + + Filters* = Table[string, Filter] + +# Utilities -------------------------------------------------------------------- + +proc leadingZeroBits(hash: MDigest): int = + ## Number of most significant zero bits before the first one + for h in hash.data: + static: doAssert sizeof(h) == 1 + if h == 0: + result += 8 + else: + result += countLeadingZeroBits(h) + break + +proc calcPow*(size, ttl: uint64, hash: Hash): float64 = + ## Whisper proof-of-work is defined as the best bit of a hash divided by + ## encoded size and time-to-live, such that large and long-lived messages get + ## penalized + + let bits = leadingZeroBits(hash) + return pow(2.0, bits.float64) / (size.float64 * ttl.float64) + +proc topicBloom*(topic: Topic): Bloom = + ## Whisper uses 512-bit bloom filters meaning 9 bits of indexing - 3 9-bit + ## indexes into the bloom are created using the first 3 bytes of the topic and + ## complementing each byte with an extra bit from the last topic byte + for i in 0..<3: + var idx = uint16(topic[i]) + if (topic[3] and byte(1 shl i)) != 0: # fetch the 9'th bit from the last byte + idx = idx + 256 + + doAssert idx <= 511 + result[idx div 8] = result[idx div 8] or byte(1 shl (idx and 7'u16)) + +proc generateRandomID*(rng: var HmacDrbgContext): string = + var bytes: array[256 div 8, byte] + hmacDrbgGenerate(rng, bytes) + toHex(bytes) + +proc `or`(a, b: Bloom): Bloom = + for i in 0..= 256*256*256: + notice "Payload exceeds max length", len = self.payload.len + return + + # length of the payload length field :) + let payloadLenLen = + if self.payload.len >= 256*256: 3'u8 + elif self.payload.len >= 256: 2'u8 + else: 1'u8 + + let signatureLen = + if self.src.isSome(): keys.RawSignatureSize + else: 0 + + # useful data length + let dataLen = flagsLen + payloadLenLen.int + self.payload.len + signatureLen + + let padLen = + if self.padding.isSome(): self.padding.get().len + # is there a reason why 256 bytes are padded when the dataLen is 256? + else: padMaxLen - (dataLen mod padMaxLen) + + # buffer space that we need to allocate + let totalLen = dataLen + padLen + + var plain = newSeqOfCap[byte](totalLen) + + let signatureFlag = + if self.src.isSome(): signatureBits + else: 0'u8 + + # byte 0: flags with payload length length and presence of signature + plain.add payloadLenLen or signatureFlag + + # next, length of payload - little endian (who comes up with this stuff? why + # can't the world just settle on one endian?) + let payloadLenLE = self.payload.len.uint32.toBytesLE + + # No, I have no love for nim closed ranges - such a mess to remember the extra + # < or risk off-by-ones when working with lengths.. + plain.add payloadLenLE[0.. pos + keys.RawSignatureSize: + res.padding = some(plain[pos .. ^(keys.RawSignatureSize+1)]) + else: + if plain.len > pos: + res.padding = some(plain[pos .. ^1]) + + return some(res) + +# Envelopes -------------------------------------------------------------------- + +proc valid*(self: Envelope, now = epochTime()): bool = + if self.expiry.float64 < now: # expired + envelopes_dropped.inc(labelValues = ["expired"]) + return false + if self.ttl <= 0: # this would invalidate pow calculation + envelopes_dropped.inc(labelValues = ["expired"]) + return false + + let created = self.expiry - self.ttl + if created.float64 > (now + 2.0): # created in the future + envelopes_dropped.inc(labelValues = ["future_timestamp"]) + return false + + return true + +proc len(self: Envelope): int = 20 + self.data.len + +proc toShortRlp*(self: Envelope): seq[byte] = + ## RLP-encoded message without nonce is used during proof-of-work calculations + rlp.encodeList(self.expiry, self.ttl, self.topic, self.data) + +proc toRlp(self: Envelope): seq[byte] = + ## What gets sent out over the wire includes the nonce + rlp.encode(self) + +proc minePow*(self: Envelope, seconds: float, bestBitTarget: int = 0): (uint64, Hash) = + ## For the given envelope, spend millis milliseconds to find the + ## best proof-of-work and return the nonce + let bytes = self.toShortRlp() + + var ctx: keccak256 + ctx.init() + ctx.update(bytes) + + var bestBit: int = 0 + + let mineEnd = epochTime() + seconds + + var i: uint64 + while epochTime() < mineEnd or bestBit == 0: # At least one round + var tmp = ctx # copy hash calculated so far - we'll reuse that for each iter + tmp.update(i.toBytesBE()) + # XXX:a random nonce here would not leak number of iters + let hash = tmp.finish() + let zeroBits = leadingZeroBits(hash) + if zeroBits > bestBit: # XXX: could also compare hashes as numbers instead + bestBit = zeroBits + result = (i, hash) + if bestBitTarget > 0 and bestBit >= bestBitTarget: + break + + i.inc + +proc calcPowHash*(self: Envelope): Hash = + ## Calculate the message hash, as done during mining - this can be used to + ## verify proof-of-work + + let bytes = self.toShortRlp() + + var ctx: keccak256 + ctx.init() + ctx.update(bytes) + ctx.update(self.nonce.toBytesBE()) + return ctx.finish() + +# Messages --------------------------------------------------------------------- + +proc cmpPow(a, b: Message): int = + ## Biggest pow first, lowest at the end (for easy popping) + if a.pow < b.pow: 1 + elif a.pow == b.pow: 0 + else: -1 + +proc initMessage*(env: Envelope, powCalc = true): Message = + result.env = env + result.size = env.toRlp().len().uint32 # XXX: calc len without creating RLP + result.bloom = topicBloom(env.topic) + if powCalc: + result.hash = env.calcPowHash() + result.pow = calcPow(result.env.len.uint32, result.env.ttl, result.hash) + trace "Message PoW", pow = result.pow.formatFloat(ffScientific) + +proc hash*(hash: Hash): hashes.Hash = hashes.hash(hash.data) + +# NOTE: Hashing and leading zeroes calculation is now the same between geth, +# parity and this implementation. +# However, there is still a difference in the size calculation. +# See also here: https://github.com/ethereum/go-ethereum/pull/19753 +# This implementation is not conform EIP-627 as we do not use the size of the +# RLP-encoded envelope, but the size of the envelope object itself. +# This is done to be able to correctly calculate the bestBitTarget. +# Other options would be: +# - work directly with powTarget in minePow, but this requires recalculation of +# rlp size + calcPow +# - Use worst case size of envelope nonce +# - Mine PoW for x interval, calcPow of best result, if target not met .. repeat +proc sealEnvelope*(msg: var Message, powTime: float, powTarget: float): bool = + let size = msg.env.len + if powTarget > 0: + let x = powTarget * size.float * msg.env.ttl.float + var bestBitTarget: int + if x <= 1: # log() would return negative numbers or 0 + bestBitTarget = 1 + else: + bestBitTarget = ceil(log(x, 2)).int + (msg.env.nonce, msg.hash) = msg.env.minePow(powTime, bestBitTarget) + else: + # If no target is set, we are certain of executed powTime + msg.env.expiry += powTime.uint32 + (msg.env.nonce, msg.hash) = msg.env.minePow(powTime) + + msg.pow = calcPow(size.uint32, msg.env.ttl, msg.hash) + trace "Message PoW", pow = msg.pow + if msg.pow < powTarget: + return false + + return true + +# Queues ----------------------------------------------------------------------- + +proc initQueue*(capacity: int): Queue = + result.items = newSeqOfCap[Message](capacity) + result.capacity = capacity + result.itemHashes.init() + +proc prune*(self: var Queue) = + ## Remove items that are past their expiry time + let now = epochTime().uint32 + + # keepIf code + pruning of hashset + var pos = 0 + for i in 0 ..< len(self.items): + if self.items[i].env.expiry > now: + if pos != i: + shallowCopy(self.items[pos], self.items[i]) + inc(pos) + else: self.itemHashes.excl(self.items[i].hash) + setLen(self.items, pos) + +proc add*(self: var Queue, msg: Message): bool = + ## Add a message to the queue. + ## If we're at capacity, we will be removing, in order: + ## * expired messages + ## * lowest proof-of-work message - this may be `msg` itself! + + # check for duplicate before pruning + if self.itemHashes.contains(msg.hash): + envelopes_dropped.inc(labelValues = ["benign_duplicate"]) + return false + else: + envelopes_valid.inc() + if self.items.len >= self.capacity: + self.prune() # Only prune if needed + + if self.items.len >= self.capacity: + # Still no room - go by proof-of-work quantity + let last = self.items[^1] + + if last.pow > msg.pow or + (last.pow == msg.pow and last.env.expiry > msg.env.expiry): + # The new message has less pow or will expire earlier - drop it + envelopes_dropped.inc(labelValues = ["full_queue_new"]) + return false + + self.items.del(self.items.len() - 1) + self.itemHashes.excl(last.hash) + envelopes_dropped.inc(labelValues = ["full_queue_old"]) + + self.itemHashes.incl(msg.hash) + self.items.insert(msg, self.items.lowerBound(msg, cmpPow)) + return true + +# Filters ---------------------------------------------------------------------- +proc initFilter*(src = none[PublicKey](), privateKey = none[PrivateKey](), + symKey = none[SymKey](), topics: seq[Topic] = @[], + powReq = 0.0, allowP2P = false): Filter = + # Zero topics will give an empty bloom filter which is fine as this bloom + # filter is only used to `or` with existing/other bloom filters. Not to do + # matching. + Filter(src: src, privateKey: privateKey, symKey: symKey, topics: topics, + powReq: powReq, allowP2P: allowP2P, bloom: toBloom(topics)) + +proc subscribeFilter*( + rng: var HmacDrbgContext, filters: var Filters, filter: Filter, + handler: FilterMsgHandler = nil): string = + # NOTE: Should we allow a filter without a key? Encryption is mandatory in v6? + # Check if asymmetric _and_ symmetric key? Now asymmetric just has precedence. + let id = generateRandomID(rng) + var filter = filter + if handler.isNil(): + filter.queue = newSeqOfCap[ReceivedMessage](defaultFilterQueueCapacity) + else: + filter.handler = handler + + filters[id] = filter + debug "Filter added", filter = id + return id + +proc notify*(filters: var Filters, msg: Message) {.gcsafe.} = + var decoded: Option[DecodedPayload] + var keyHash: Hash + var dst: Option[PublicKey] + + for filter in filters.mvalues: + if not filter.allowP2P and msg.isP2P: + continue + + # if message is direct p2p PoW doesn't matter + if msg.pow < filter.powReq and not msg.isP2P: + continue + + if filter.topics.len > 0: + if msg.env.topic notin filter.topics: + continue + + # Decode, if already decoded previously check if hash of key matches + if decoded.isNone(): + decoded = decode(msg.env.data, dst = filter.privateKey, + symKey = filter.symKey) + if decoded.isNone(): + continue + if filter.privateKey.isSome(): + keyHash = keccak256.digest(filter.privateKey.get().toRaw()) + # TODO: Get rid of the hash and just use pubkey to compare? + dst = some(toPublicKey(filter.privateKey.get())) + elif filter.symKey.isSome(): + keyHash = keccak256.digest(filter.symKey.get()) + # else: + # NOTE: In this case the message was not encrypted + else: + if filter.privateKey.isSome(): + if keyHash != keccak256.digest(filter.privateKey.get().toRaw()): + continue + elif filter.symKey.isSome(): + if keyHash != keccak256.digest(filter.symKey.get()): + continue + # else: + # NOTE: In this case the message was not encrypted + + # When decoding is done we can check the src (signature) + if filter.src.isSome(): + let src: Option[PublicKey] = decoded.get().src + if not src.isSome(): + continue + elif src.get() != filter.src.get(): + continue + + let receivedMsg = ReceivedMessage(decoded: decoded.get(), + timestamp: msg.env.expiry - msg.env.ttl, + ttl: msg.env.ttl, + topic: msg.env.topic, + pow: msg.pow, + hash: msg.hash, + dst: dst) + # Either run callback or add to queue + if filter.handler.isNil(): + filter.queue.insert(receivedMsg) + else: + filter.handler(receivedMsg) + +proc getFilterMessages*(filters: var Filters, filterId: string): + seq[ReceivedMessage] {.raises: [KeyError, Defect].} = + result = @[] + if filters.contains(filterId): + if filters[filterId].handler.isNil(): + shallowCopy(result, filters[filterId].queue) + filters[filterId].queue = + newSeqOfCap[ReceivedMessage](defaultFilterQueueCapacity) + +proc toBloom*(filters: Filters): Bloom = + for filter in filters.values: + if filter.topics.len > 0: + result = result or filter.bloom diff --git a/waku/whisper/whispernodes.nim b/waku/whisper/whispernodes.nim new file mode 100644 index 0000000..3b726db --- /dev/null +++ b/waku/whisper/whispernodes.nim @@ -0,0 +1,67 @@ +const + # Whisper nodes taken from: + # curl -s https://fleets.status.im | jq '"\"" + .fleets["eth.prod"].whisper[] + "\","' -r + WhisperNodes* = [ + "enode://b957e51f41e4abab8382e1ea7229e88c6e18f34672694c6eae389eac22dab8655622bbd4a08192c321416b9becffaab11c8e2b7a5d0813b922aa128b82990dab@47.75.222.178:443", + "enode://66ba15600cda86009689354c3a77bdf1a97f4f4fb3ab50ffe34dbc904fac561040496828397be18d9744c75881ffc6ac53729ddbd2cdbdadc5f45c400e2622f7@178.128.141.87:443", + "enode://182ed5d658d1a1a4382c9e9f7c9e5d8d9fec9db4c71ae346b9e23e1a589116aeffb3342299bdd00e0ab98dbf804f7b2d8ae564ed18da9f45650b444aed79d509@34.68.132.118:443", + "enode://8bebe73ddf7cf09e77602c7d04c93a73f455b51f24ae0d572917a4792f1dec0bb4c562759b8830cc3615a658d38c1a4a38597a1d7ae3ba35111479fc42d65dec@47.75.85.212:443", + "enode://4ea35352702027984a13274f241a56a47854a7fd4b3ba674a596cff917d3c825506431cf149f9f2312a293bb7c2b1cca55db742027090916d01529fe0729643b@134.209.136.79:443", + "enode://fbeddac99d396b91d59f2c63a3cb5fc7e0f8a9f7ce6fe5f2eed5e787a0154161b7173a6a73124a4275ef338b8966dc70a611e9ae2192f0f2340395661fad81c0@34.67.230.193:443", + "enode://ac3948b2c0786ada7d17b80cf869cf59b1909ea3accd45944aae35bf864cc069126da8b82dfef4ddf23f1d6d6b44b1565c4cf81c8b98022253c6aea1a89d3ce2@47.75.88.12:443", + "enode://ce559a37a9c344d7109bd4907802dd690008381d51f658c43056ec36ac043338bd92f1ac6043e645b64953b06f27202d679756a9c7cf62fdefa01b2e6ac5098e@134.209.136.123:443", + "enode://c07aa0deea3b7056c5d45a85bca42f0d8d3b1404eeb9577610f386e0a4744a0e7b2845ae328efc4aa4b28075af838b59b5b3985bffddeec0090b3b7669abc1f3@35.226.92.155:443", + "enode://385579fc5b14e04d5b04af7eee835d426d3d40ccf11f99dbd95340405f37cf3bbbf830b3eb8f70924be0c2909790120682c9c3e791646e2d5413e7801545d353@47.244.221.249:443", + "enode://4e0a8db9b73403c9339a2077e911851750fc955db1fc1e09f81a4a56725946884dd5e4d11258eac961f9078a393c45bcab78dd0e3bc74e37ce773b3471d2e29c@134.209.136.101:443", + "enode://0624b4a90063923c5cc27d12624b6a49a86dfb3623fcb106801217fdbab95f7617b83fa2468b9ae3de593ff6c1cf556ccf9bc705bfae9cb4625999765127b423@35.222.158.246:443", + "enode://b77bffc29e2592f30180311dd81204ab845e5f78953b5ba0587c6631be9c0862963dea5eb64c90617cf0efd75308e22a42e30bc4eb3cd1bbddbd1da38ff6483e@47.75.10.177:443", + "enode://a8bddfa24e1e92a82609b390766faa56cf7a5eef85b22a2b51e79b333c8aaeec84f7b4267e432edd1cf45b63a3ad0fc7d6c3a16f046aa6bc07ebe50e80b63b8c@178.128.141.249:443", + "enode://a5fe9c82ad1ffb16ae60cb5d4ffe746b9de4c5fbf20911992b7dd651b1c08ba17dd2c0b27ee6b03162c52d92f219961cc3eb14286aca8a90b75cf425826c3bd8@104.154.230.58:443", + "enode://cf5f7a7e64e3b306d1bc16073fba45be3344cb6695b0b616ccc2da66ea35b9f35b3b231c6cf335fdfaba523519659a440752fc2e061d1e5bc4ef33864aac2f19@47.75.221.196:443", + "enode://887cbd92d95afc2c5f1e227356314a53d3d18855880ac0509e0c0870362aee03939d4074e6ad31365915af41d34320b5094bfcc12a67c381788cd7298d06c875@178.128.141.0:443", + "enode://282e009967f9f132a5c2dd366a76319f0d22d60d0c51f7e99795a1e40f213c2705a2c10e4cc6f3890319f59da1a535b8835ed9b9c4b57c3aad342bf312fd7379@35.223.240.17:443", + "enode://13d63a1f85ccdcbd2fb6861b9bd9d03f94bdba973608951f7c36e5df5114c91de2b8194d71288f24bfd17908c48468e89dd8f0fb8ccc2b2dedae84acdf65f62a@47.244.210.80:443", + "enode://2b01955d7e11e29dce07343b456e4e96c081760022d1652b1c4b641eaf320e3747871870fa682e9e9cfb85b819ce94ed2fee1ac458904d54fd0b97d33ba2c4a4@134.209.136.112:443", + "enode://b706a60572634760f18a27dd407b2b3582f7e065110dae10e3998498f1ae3f29ba04db198460d83ed6d2bfb254bb06b29aab3c91415d75d3b869cd0037f3853c@35.239.5.162:443", + "enode://32915c8841faaef21a6b75ab6ed7c2b6f0790eb177ad0f4ea6d731bacc19b938624d220d937ebd95e0f6596b7232bbb672905ee12601747a12ee71a15bfdf31c@47.75.59.11:443", + "enode://0d9d65fcd5592df33ed4507ce862b9c748b6dbd1ea3a1deb94e3750052760b4850aa527265bbaf357021d64d5cc53c02b410458e732fafc5b53f257944247760@178.128.141.42:443", + "enode://e87f1d8093d304c3a9d6f1165b85d6b374f1c0cc907d39c0879eb67f0a39d779be7a85cbd52920b6f53a94da43099c58837034afa6a7be4b099bfcd79ad13999@35.238.106.101:443", + ] + + # curl -s https://fleets.status.im | jq '"\"" + .fleets["eth.staging"].whisper[] + "\","' -r + WhisperNodesStaging* = [ + "enode://00395686f5954662a3796e170b9e87bbaf68a050d57e9987b78a2292502dae44aae2b8803280a017ec9af9be0b3121db9d6b3693ab3a0451a866bcbedd58fdac@47.52.226.137:443", + "enode://914c0b30f27bab30c1dfd31dad7652a46fda9370542aee1b062498b1345ee0913614b8b9e3e84622e84a7203c5858ae1d9819f63aece13ee668e4f6668063989@167.99.19.148:443", + "enode://2d897c6e846949f9dcf10279f00e9b8325c18fe7fa52d658520ad7be9607c83008b42b06aefd97cfe1fdab571f33a2a9383ff97c5909ed51f63300834913237e@35.192.0.86:443", + ] + + # curl -s https://fleets.status.im | jq '"\"" + .fleets["eth.test"].whisper[] + "\","' -r + WhisperNodesTest* = [ + "enode://ad38f94030a846cc7005b7a1f3b6b01bf4ef59d34e8d3d6f4d12df23d14ba8656702a435d34cf4df3b412c0c1923df5adcce8461321a0d8ffb9435b26e572c2a@47.52.255.194:443", + "enode://1d193635e015918fb85bbaf774863d12f65d70c6977506187ef04420d74ec06c9e8f0dcb57ea042f85df87433dab17a1260ed8dde1bdf9d6d5d2de4b7bf8e993@206.189.243.163:443", + "enode://f593a27731bc0f8eb088e2d39222c2d59dfb9bf0b3950d7a828d51e8ab9e08fffbd9916a82fd993c1a080c57c2bd70ed6c36f489a969de697aff93088dbee1a9@35.194.31.108:443", + ] + + # curl -s https://fleets.status.im | jq '"\"" + .fleets["eth.prod"].boot[] + "\","' -r + StatusBootNodes* = [ + "enode://6e6554fb3034b211398fcd0f0082cbb6bd13619e1a7e76ba66e1809aaa0c5f1ac53c9ae79cf2fd4a7bacb10d12010899b370c75fed19b991d9c0cdd02891abad@47.75.99.169:443", + "enode://436cc6f674928fdc9a9f7990f2944002b685d1c37f025c1be425185b5b1f0900feaf1ccc2a6130268f9901be4a7d252f37302c8335a2c1a62736e9232691cc3a@178.128.138.128:443", + "enode://32ff6d88760b0947a3dee54ceff4d8d7f0b4c023c6dad34568615fcae89e26cc2753f28f12485a4116c977be937a72665116596265aa0736b53d46b27446296a@34.70.75.208:443", + "enode://23d0740b11919358625d79d4cac7d50a34d79e9c69e16831c5c70573757a1f5d7d884510bc595d7ee4da3c1508adf87bbc9e9260d804ef03f8c1e37f2fb2fc69@47.52.106.107:443", + "enode://5395aab7833f1ecb671b59bf0521cf20224fe8162fc3d2675de4ee4d5636a75ec32d13268fc184df8d1ddfa803943906882da62a4df42d4fccf6d17808156a87@178.128.140.188:443", + "enode://5405c509df683c962e7c9470b251bb679dd6978f82d5b469f1f6c64d11d50fbd5dd9f7801c6ad51f3b20a5f6c7ffe248cc9ab223f8bcbaeaf14bb1c0ef295fd0@35.223.215.156:443", + ] + + # curl -s https://fleets.status.im | jq '"\"" + .fleets["eth.staging"].boot[] + "\","' -r + StatusBootNodesStaging* = [ + "enode://630b0342ca4e9552f50714b6c8e28d6955bc0fd14e7950f93bc3b2b8cc8c1f3b6d103df66f51a13d773b5db0f130661fb5c7b8fa21c48890c64c79b41a56a490@47.91.229.44:443", + "enode://f79fb3919f72ca560ad0434dcc387abfe41e0666201ebdada8ede0462454a13deb05cda15f287d2c4bd85da81f0eb25d0a486bbbc8df427b971ac51533bd00fe@174.138.107.239:443", + "enode://10a78c17929a7019ef4aa2249d7302f76ae8a06f40b2dc88b7b31ebff4a623fbb44b4a627acba296c1ced3775d91fbe18463c15097a6a36fdb2c804ff3fc5b35@35.238.97.234:443", + ] + + # curl -s https://fleets.status.im | jq '"\"" + .fleets["eth.test"].boot[] + "\","' -r + StatusBootNodesTest* = [ + "enode://daae2e72820e86e942fa2a8aa7d6e9954d4043a753483d8bd338e16be82cf962392d5c0e1ae57c3d793c3d3dddd8fd58339262e4234dc966f953cd73b535f5fa@47.52.188.149:443", + "enode://9e0988575eb7717c25dea72fd11c7b37767dc09c1a7686f7c2ec577d308d24b377ceb675de4317474a1a870e47882732967f4fa785b02ba95d669b31d464dec0@206.189.243.164:443", + "enode://c1e5018887c863d64e431b69bf617561087825430e4401733f5ba77c70db14236df381fefb0ebe1ac42294b9e261bbe233dbdb83e32c586c66ae26c8de70cb4c@35.188.168.137:443", + ]