deploy: 79ae1370dbb4eba21fab20a71947b439cee92599

This commit is contained in:
decanus 2020-12-21 11:45:07 +00:00
commit 3ead423f92
110 changed files with 11623 additions and 0 deletions

7
.dockerignore Normal file
View File

@ -0,0 +1,7 @@
/README.md
/Dockerfile
/.*ignore
/LICENSE*
/tests
/metrics
**/vendor/*

30
.gitignore vendored Normal file
View File

@ -0,0 +1,30 @@
/nimcache
# Executables shall be put in an ignored build/ directory
/build
# Nimble packages
/vendor/.nimble
# Generated Files
*.generated.nim
# ntags/ctags output
/tags
# a symlink that can't be added to the repo because of Windows
/waku.nims
# Ignore dynamic, static libs and libtool archive files
*.so
*.dylib
*.a
*.la
*.exe
*.dll
.DS_Store
# Ignore simulation generated metrics files
/metrics/prometheus
/metrics/waku-sim-all-nodes-grafana-dashboard.json

108
.gitmodules vendored Normal file
View File

@ -0,0 +1,108 @@
[submodule "vendor/nim-eth"]
path = vendor/nim-eth
url = https://github.com/status-im/nim-eth.git
ignore = dirty
branch = master
[submodule "vendor/nim-secp256k1"]
path = vendor/nim-secp256k1
url = https://github.com/status-im/nim-secp256k1.git
ignore = dirty
branch = master
[submodule "vendor/nim-libp2p"]
path = vendor/nim-libp2p
url = https://github.com/status-im/nim-libp2p.git
ignore = dirty
branch = master
[submodule "vendor/nim-stew"]
path = vendor/nim-stew
url = https://github.com/status-im/nim-stew.git
ignore = dirty
branch = master
[submodule "vendor/nimbus-build-system"]
path = vendor/nimbus-build-system
url = https://github.com/status-im/nimbus-build-system.git
ignore = dirty
branch = master
[submodule "vendor/nim-nat-traversal"]
path = vendor/nim-nat-traversal
url = https://github.com/status-im/nim-nat-traversal.git
ignore = dirty
branch = master
[submodule "vendor/nim-libbacktrace"]
path = vendor/nim-libbacktrace
url = https://github.com/status-im/nim-libbacktrace.git
ignore = dirty
branch = master
[submodule "vendor/nim-confutils"]
path = vendor/nim-confutils
url = https://github.com/status-im/nim-confutils.git
ignore = dirty
branch = master
[submodule "vendor/nim-chronicles"]
path = vendor/nim-chronicles
url = https://github.com/status-im/nim-chronicles.git
ignore = dirty
branch = master
[submodule "vendor/nim-faststreams"]
path = vendor/nim-faststreams
url = https://github.com/status-im/nim-faststreams.git
ignore = dirty
branch = master
[submodule "vendor/nim-chronos"]
path = vendor/nim-chronos
url = https://github.com/status-im/nim-chronos.git
ignore = dirty
branch = master
[submodule "vendor/nim-json-serialization"]
path = vendor/nim-json-serialization
url = https://github.com/status-im/nim-json-serialization.git
ignore = dirty
branch = master
[submodule "vendor/nim-serialization"]
path = vendor/nim-serialization
url = https://github.com/status-im/nim-serialization.git
ignore = dirty
branch = master
[submodule "vendor/nimcrypto"]
path = vendor/nimcrypto
url = https://github.com/cheatfate/nimcrypto.git
ignore = dirty
branch = master
[submodule "vendor/nim-metrics"]
path = vendor/nim-metrics
url = https://github.com/status-im/nim-metrics.git
ignore = dirty
branch = master
[submodule "vendor/nim-stint"]
path = vendor/nim-stint
url = https://github.com/status-im/nim-stint.git
ignore = dirty
branch = master
[submodule "vendor/nim-json-rpc"]
path = vendor/nim-json-rpc
url = https://github.com/status-im/nim-json-rpc.git
ignore = dirty
branch = master
[submodule "vendor/nim-http-utils"]
path = vendor/nim-http-utils
url = https://github.com/status-im/nim-http-utils.git
ignore = dirty
branch = master
[submodule "vendor/news"]
path = vendor/news
url = https://github.com/tormund/news.git
ignore = dirty
branch = master
[submodule "vendor/nim-bearssl"]
path = vendor/nim-bearssl
url = https://github.com/status-im/nim-bearssl.git
ignore = dirty
branch = master
[submodule "vendor/nim-sqlite3-abi"]
path = vendor/nim-sqlite3-abi
url = https://github.com/arnetheduck/nim-sqlite3-abi.git
ignore = dirty
branch = master
[submodule "vendor/nim-web3"]
path = vendor/nim-web3
url = https://github.com/status-im/nim-web3.git

0
.nojekyll Normal file
View File

24
CHANGELOG.md Normal file
View File

@ -0,0 +1,24 @@
# Changelog
## Next version
- Calls to `publish` a message on `wakunode2` now `await` instead of `discard` dispatched [`WakuRelay`](https://github.com/vacp2p/specs/blob/master/specs/waku/v2/waku-relay.md) procedures
- Added JSON-RPC Admin API to retrieve information about peers registered on the `wakunode2`
- `StrictNoSign` enabled.
## 2020-11-30 v0.1
Initial beta release.
This release contains:
- A Nim implementation of the [Waku v1 protocol](https://specs.vac.dev/waku/waku.html).
- A Nim implementation of the [Waku v2 protocol](https://specs.vac.dev/specs/waku/v2/waku-v2.html).
- CLI applications `wakunode` and `wakunode2` that allows you to run a Waku v1 or v2 node.
- Examples of Waku v1 and v2 usage.
- Various tests of above.
Currenty the Waku v2 implementation, and [most protocols it consist of](https://specs.vac.dev/specs/waku/),
are in a draft/beta state. The Waku v1 implementation is stable but not under active development.
Feedback welcome!

50
Dockerfile Normal file
View File

@ -0,0 +1,50 @@
# BUILD IMAGE --------------------------------------------------------
FROM alpine:3.12 AS nim-build
ARG NIM_PARAMS
ARG MAKE_TARGET=wakunode
# Get build tools and required header files
RUN apk add --no-cache bash build-base pcre-dev linux-headers git
WORKDIR /app
COPY . .
# Ran separately from 'make' to avoid re-doing
RUN git submodule update --init --recursive
# Slowest build step for the sake of caching layers
RUN make -j$(nproc) deps
# Build the final node binary
RUN make -j$(nproc) $MAKE_TARGET NIM_PARAMS="$NIM_PARAMS"
# ACTUAL IMAGE -------------------------------------------------------
FROM alpine:3.12
ARG MAKE_TARGET=wakunode2
LABEL maintainer="jakub@status.im"
LABEL source="https://github.com/status-im/nim-waku"
LABEL description="Wakunode: Waku and Whisper client"
# DevP2P, LibP2P, and JSON RPC ports
EXPOSE 30303 60000 8545
# Referenced in the binary
RUN apk add --no-cache libgcc pcre-dev
# Fix for 'Error loading shared library libpcre.so.3: No such file or directory'
RUN ln -s /usr/lib/libpcre.so /usr/lib/libpcre.so.3
# Copy to separate location to accomodate different MAKE_TARGET values
COPY --from=nim-build /app/build/$MAKE_TARGET /usr/local/bin/
# Symlink the correct wakunode binary
RUN ln -sv /usr/local/bin/$MAKE_TARGET /usr/bin/wakunode
ENTRYPOINT ["/usr/bin/wakunode"]
# By default just show help if called without arguments
CMD ["--help"]

61
Jenkinsfile vendored Normal file
View File

@ -0,0 +1,61 @@
pipeline {
agent { label 'linux' }
options {
timestamps()
/* manage how many builds we keep */
buildDiscarder(logRotator(
numToKeepStr: '10',
daysToKeepStr: '30',
))
}
/* WARNING: Two more parameters can be defined.
* See 'environment' section. */
parameters {
string(
name: 'MAKE_TARGET',
description: 'Makefile target to build. Optional Parameter.',
defaultValue: params.MAKE_TARGET ?: 'wakunode2',
)
string(
name: 'IMAGE_TAG',
description: 'Name of Docker tag to push. Optional Parameter.',
defaultValue: params.IMAGE_TAG ?: 'deploy-v2-test',
)
string(
name: 'IMAGE_NAME',
description: 'Name of Docker image to push.',
defaultValue: params.IMAGE_NAME ?: 'statusteam/nim-waku',
)
string(
name: 'NIM_PARAMS',
description: 'Flags for Nim compilation.',
defaultValue: params.NIM_PARAMS ?: '-d:disableMarchNative -d:chronicles_colors:none -d:insecure',
)
}
stages {
stage('Build') {
steps { script {
image = docker.build(
"${params.IMAGE_NAME}:${env.GIT_COMMIT.take(6)}",
"--build-arg=MAKE_TARGET='${params.MAKE_TARGET}' " +
"--build-arg=NIM_PARAMS='${params.NIM_PARAMS}' ."
)
} }
}
stage('Push') {
steps { script {
withDockerRegistry([credentialsId: "dockerhub-statusteam-auto", url: ""]) {
image.push()
image.push(env.IMAGE_TAG)
}
} }
}
} // stages
post {
always { sh 'docker image prune -f' }
} // post
} // pipeline

205
LICENSE-APACHEv2 Normal file
View File

@ -0,0 +1,205 @@
beacon_chain is licensed under the Apache License version 2
Copyright (c) 2018 Status Research & Development GmbH
-----------------------------------------------------
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2018 Status Research & Development GmbH
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
LICENSE-MIT Normal file
View File

@ -0,0 +1,25 @@
beacon_chain is licensed under the MIT License
Copyright (c) 2018 Status Research & Development GmbH
-----------------------------------------------------
The MIT License (MIT)
Copyright (c) 2018 Status Research & Development GmbH
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

151
Makefile Normal file
View File

@ -0,0 +1,151 @@
# Copyright (c) 2020 Status Research & Development GmbH. Licensed under
# either of:
# - Apache License, version 2.0
# - MIT license
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
SHELL := bash # the shell used internally by Make
# used inside the included makefiles
BUILD_SYSTEM_DIR := vendor/nimbus-build-system
# -d:insecure - Necessary to enable Prometheus HTTP endpoint for metrics
# -d:chronicles_colors:none - Necessary to disable colors in logs for Docker
DOCKER_IMAGE_NIM_PARAMS ?= -d:chronicles_colors:none -d:insecure
# we don't want an error here, so we can handle things later, in the ".DEFAULT" target
-include $(BUILD_SYSTEM_DIR)/makefiles/variables.mk
.PHONY: \
all \
deps \
update \
sim1 \
wakunode1 \
wakunode2 \
example1 \
example2 \
bridge \
test \
clean \
libbacktrace
ifeq ($(NIM_PARAMS),)
# "variables.mk" was not included, so we update the submodules.
GIT_SUBMODULE_UPDATE := git submodule update --init --recursive
.DEFAULT:
+@ echo -e "Git submodules not found. Running '$(GIT_SUBMODULE_UPDATE)'.\n"; \
$(GIT_SUBMODULE_UPDATE); \
echo
# Now that the included *.mk files appeared, and are newer than this file, Make will restart itself:
# https://www.gnu.org/software/make/manual/make.html#Remaking-Makefiles
#
# After restarting, it will execute its original goal, so we don't have to start a child Make here
# with "$(MAKE) $(MAKECMDGOALS)". Isn't hidden control flow great?
else # "variables.mk" was included. Business as usual until the end of this file.
# default target, because it's the first one that doesn't start with '.'
all: | wakunode1 sim1 example1 wakunode2 sim2 example2 chat2 bridge
# must be included after the default target
-include $(BUILD_SYSTEM_DIR)/makefiles/targets.mk
# "-d:release" implies "--stacktrace:off" and it cannot be added to config.nims
ifeq ($(USE_LIBBACKTRACE), 0)
NIM_PARAMS := $(NIM_PARAMS) -d:debug -d:disable_libbacktrace
else
NIM_PARAMS := $(NIM_PARAMS) -d:release
endif
deps: | deps-common nat-libs waku.nims
ifneq ($(USE_LIBBACKTRACE), 0)
deps: | libbacktrace
endif
#- deletes and recreates "waku.nims" which on Windows is a copy instead of a proper symlink
update: | update-common
rm -rf waku.nims && \
$(MAKE) waku.nims $(HANDLE_OUTPUT)
# a phony target, because teaching `make` how to do conditional recompilation of Nim projects is too complicated
# Waku v1 targets
wakunode1: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim wakunode1 $(NIM_PARAMS) waku.nims
sim1: | build deps wakunode1
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim sim1 $(NIM_PARAMS) waku.nims
example1: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim example1 $(NIM_PARAMS) waku.nims
test1: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim test1 $(NIM_PARAMS) waku.nims
# Waku v2 targets
wakunode2: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim wakunode2 $(NIM_PARAMS) waku.nims
sim2: | build deps wakunode2
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim sim2 $(NIM_PARAMS) waku.nims
example2: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim example2 $(NIM_PARAMS) waku.nims
test2: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim test2 $(NIM_PARAMS) waku.nims
scripts2: | build deps wakunode2
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim scripts2 $(NIM_PARAMS) waku.nims
chat2: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim chat2 $(NIM_PARAMS) waku.nims
bridge: | build deps
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim bridge $(NIM_PARAMS) waku.nims
# Builds and run the test suite (Waku v1 + v2)
test: | test1 test2
# symlink
waku.nims:
ln -s waku.nimble $@
# nim-libbacktrace
libbacktrace:
+ $(MAKE) -C vendor/nim-libbacktrace --no-print-directory BUILD_CXX_LIB=0
# build a docker image for the fleet
docker-image: MAKE_TARGET ?= wakunode1
docker-image: DOCKER_IMAGE_TAG ?= $(MAKE_TARGET)
docker-image: DOCKER_IMAGE_NAME ?= statusteam/nim-waku:$(DOCKER_IMAGE_TAG)
docker-image:
docker build \
--build-arg="MAKE_TARGET=$(MAKE_TARGET)" \
--build-arg="NIM_PARAMS=$(DOCKER_IMAGE_NIM_PARAMS)" \
--tag $(DOCKER_IMAGE_NAME) .
docker-push:
docker push $(DOCKER_IMAGE_NAME)
# usual cleaning
clean: | clean-common
rm -rf build
ifneq ($(USE_LIBBACKTRACE), 0)
+ $(MAKE) -C vendor/nim-libbacktrace clean $(HANDLE_OUTPUT)
endif
endif # "variables.mk" was not included

56
README.md Normal file
View File

@ -0,0 +1,56 @@
# nim-waku
## Introduction
The nim-waku repository implements Waku v1 and v2, and provides tools related to it.
- A Nim implementation of the [Waku v1 protocol](https://specs.vac.dev/waku/waku.html).
- A Nim implementation of the [Waku v2 protocol](https://specs.vac.dev/specs/waku/v2/waku-v2.html).
- CLI applications `wakunode` and `wakunode2` that allows you to run a Waku v1 or v2 node.
- Examples of Waku v1 and v2 usage.
- Various tests of above.
For more details on Waku v1 and v2, see their respective home folders:
- [Waku v1](waku/v1/README.md)
- [Waku v2](waku/v2/README.md)
## How to Build & Run
These instructions are generic and apply to both Waku v1 and v2. For more
detailed instructions, see Waku v1 and v2 home above.
### Prerequisites
* GNU Make, Bash and the usual POSIX utilities. Git 2.9.4 or newer.
* PCRE
More information on the installation of these can be found [here](https://github.com/status-im/nimbus#prerequisites).
### Wakunode
```bash
# The first `make` invocation will update all Git submodules.
# You'll run `make update` after each `git pull`, in the future, to keep those submodules up to date.
make wakunode1 wakunode2
# See available command line options
./build/wakunode --help
./build/wakunode2 --help
# Connect the client directly with the Status test fleet
./build/wakunode --log-level:debug --discovery:off --fleet:test --log-metrics
# TODO Equivalent for v2
```
### Waku Protocol Test Suite
```bash
# Run all the Waku v1 and v2 tests
make test
```
### Examples
Examples can be found in the examples folder. For Waku v2, there is a fully
featured chat example.

64
config.nims Normal file
View File

@ -0,0 +1,64 @@
if defined(release):
switch("nimcache", "nimcache/release/$projectName")
else:
switch("nimcache", "nimcache/debug/$projectName")
if defined(windows):
# disable timestamps in Windows PE headers - https://wiki.debian.org/ReproducibleBuilds/TimestampsInPEBinaries
switch("passL", "-Wl,--no-insert-timestamp")
# increase stack size
switch("passL", "-Wl,--stack,8388608")
# https://github.com/nim-lang/Nim/issues/4057
--tlsEmulation:off
if defined(i386):
# set the IMAGE_FILE_LARGE_ADDRESS_AWARE flag so we can use PAE, if enabled, and access more than 2 GiB of RAM
switch("passL", "-Wl,--large-address-aware")
# The dynamic Chronicles output currently prevents us from using colors on Windows
# because these require direct manipulations of the stdout File object.
switch("define", "chronicles_colors=off")
# This helps especially for 32-bit x86, which sans SSE2 and newer instructions
# requires quite roundabout code generation for cryptography, and other 64-bit
# and larger arithmetic use cases, along with register starvation issues. When
# engineering a more portable binary release, this should be tweaked but still
# use at least -msse2 or -msse3.
if defined(disableMarchNative):
switch("passC", "-msse3")
else:
switch("passC", "-march=native")
if defined(windows):
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65782
# ("-fno-asynchronous-unwind-tables" breaks Nim's exception raising, sometimes)
switch("passC", "-mno-avx512vl")
--threads:on
--opt:speed
--excessiveStackTrace:on
# enable metric collection
--define:metrics
# for heap-usage-by-instance-type metrics and object base-type strings
--define:nimTypeNames
# the default open files limit is too low on macOS (512), breaking the
# "--debugger:native" build. It can be increased with `ulimit -n 1024`.
if not defined(macosx):
# add debugging symbols and original files and line numbers
--debugger:native
if not (defined(windows) and defined(i386)) and not defined(disable_libbacktrace):
# light-weight stack traces using libbacktrace and libunwind
--define:nimStackTraceOverride
switch("import", "libbacktrace")
--define:nimOldCaseObjects # https://github.com/status-im/nim-confutils/issues/9
# `switch("warning[CaseTransition]", "off")` fails with "Error: invalid command line option: '--warning[CaseTransition]'"
switch("warning", "CaseTransition:off")
# The compiler doth protest too much, methinks, about all these cases where it can't
# do its (N)RVO pass: https://github.com/nim-lang/RFCs/issues/230
switch("warning", "ObservableStores:off")
# Too many false positives for "Warning: method has lock level <unknown>, but another method has 0 [LockLevel]"
switch("warning", "LockLevel:off")

81
docs/api/v2/node.md Normal file
View File

@ -0,0 +1,81 @@
# Waku APIs
## Nim API
The Nim Waku API consist of a set of methods opearting on the Waku Node object.
Some of them have different arity depending on what privacy/bandwidth trade-off
the consumer wants to make. These methods are:
1. **Init** - create a node.
2. **Start** - start a created node.
3. **Subscribe** - to a topic or a specific content filter.
4. **Unsubscribe** - to a topic or a specific content filter.
5. **Publish** - to a topic, or a topic and a specific content filter.
6. **Query** - for historical messages.
7. **Info** - to get information about the node.
```Nim
proc init*(T: type WakuNode, nodeKey: crypto.PrivateKey,
bindIp: ValidIpAddress, bindPort: Port,
extIp = none[ValidIpAddress](), extPort = none[Port]()): T =
## Creates a Waku Node.
##
## Status: Implemented.
proc start*(node: WakuNode) {.async.} =
## Starts a created Waku Node.
##
## Status: Implemented.
proc subscribe*(node: WakuNode, topic: Topic, handler: TopicHandler) {.async.} =
## Subscribes to a PubSub topic. Triggers handler when receiving messages on
## this topic. TopicHandler is a method that takes a topic and some data.
##
## NOTE The data field SHOULD be decoded as a WakuMessage.
## Status: Implemented.
proc subscribe*(node: WakuNode, request: FilterRequest, handler: ContentFilterHandler) {.async, gcsafe.} =
## Registers for messages that match a specific filter. Triggers the handler whenever a message is received.
## FilterHandler is a method that takes a MessagePush.
##
## Status: Implemented.
proc unsubscribe*(node: WakuNode, topic: Topic, handler: TopicHandler) {.async.} =
## Unsubscribes a handler from a PubSub topic.
##
## Status: Implemented.
proc unsubscribeAll*(node: WakuNode, topic: Topic) {.async.} =
## Unsubscribes all handlers registered on a specific PubSub topic.
##
## Status: Implemented.
proc unsubscribe*(w: WakuNode, contentFilter: ContentFilter) =
## Unsubscribe from a content filter.
##
## Status: Not yet implemented.
## TODO Implement.
proc publish*(node: WakuNode, topic: Topic, message: WakuMessage) =
## Publish a `WakuMessage` to a PubSub topic. `WakuMessage` should contain a
## `contentTopic` field for light node functionality. This field may be also
## be omitted.
##
## Status: Implemented.
proc query*(w: WakuNode, query: HistoryQuery, handler: QueryHandlerFunc) {.async, gcsafe.} =
## Queries known nodes for historical messages. Triggers the handler whenever a response is received.
## QueryHandlerFunc is a method that takes a HistoryResponse.
##
## Status: Implemented.
proc info*(node: WakuNode): WakuInfo =
## Returns information about the Node, such as what multiaddress it can be reached at.
##
## Status: Implemented.
##
```
## JSON RPC
TODO To specify

View File

@ -0,0 +1,9 @@
# Contributors
This folder contains documentation that is primarily useful for contributors. Some links and
resources here might require privileged access.
Example resources:
- How to do releases
- Viewing and modifying metrics dashboard

View File

@ -0,0 +1,32 @@
# Description
This document describes the continuous integration setup for `nim-waku`.
# Details
The CI setup exists on the Status.im Jenkins instance:
https://ci.status.im/job/nim-waku/
It currently consists four jobs:
* [manual](https://ci.status.im/job/nim-waku/job/manual/) - For manually executing builds using parameters.
* [deploy-v1-test](https://ci.status.im/job/nim-waku/job/deploy-v1-test/) - Builds every new commit in `master` and deploys to `wakuv1.test` fleet.
* [deploy-v2-test](https://ci.status.im/job/nim-waku/job/deploy-v2-test/) - Builds every new commit in `master` and deploys to `wakuv2.test` fleet.
* [deploy-v2-prod](https://ci.status.im/job/nim-waku/job/deploy-v2-prod/) - Currently has no automatic trigger, and deploys to `wakuv2.prod` fleet.
# Configuration
The main configuration file is [`Jenkinsfile`](../../Jenkinsfile) at the root of this repo.
Key part is the definition of four `parameters`:
* `MAKE_TARGET` - Which `Makefile` target is built.
* `IMAGE_TAG` - Tag of the Docker image to push.
* `IMAGE_NAME` - Name of the Docker image to push.
* `NIM_PARAMS` - Nim compilation parameters.
The use of `?:` [Elvis operator](http://groovy-lang.org/operators.html#_elvis_operator) plays a key role in allowing parameters to be changed for each defined job in Jenkins without it being overridden by the `Jenkinsfile` defaults after every job run.
```groovy
defaultValue: params.IMAGE_TAG ?: 'deploy-v2-test',
```

View File

@ -0,0 +1,41 @@
# Release Process
How to do releases.
For more context, see https://trunkbaseddevelopment.com/branch-for-release/
## How to to do releases
1. Checkout a release branch from master
`git checkout -b release/v0.1`
2. Update `CHANGELOG.md` and ensure it is up to date
3. Create a tag with the same name as release and push it
```
git tag -as v0.1 -m "Initial release."
git push origin v0.1
```
4. Open a PR
5. Harden release in release branch
6. Modify tag
If you need to update stuff, remove tag and make sure the new tag is associated
with CHANGELOG update.
```
# Delete tag
git tag -d v0.1
git push --delete origin v0.1
# Make changes, rebase and tag again
# Squash to one commit and make a nice commit message
git rebase -i origin/master
git tag -as v0.1 -m "Initial release."
git push origin v0.1
```

19
docs/faq.md Normal file
View File

@ -0,0 +1,19 @@
# FAQ
## Where do I find cluster node logs? (internal)
At [Kibana](https://kibana.status.im/app/kibana#/discover?_g=(filters:!(),refreshInterval:(pause:!t,value:0),time:(from:'2020-09-09T20:21:49.910Z',to:now))&_a=(columns:!(message,severity_name),filters:!(('$state':(store:appState),meta:(alias:!n,disabled:!f,index:d6db7610-60fd-11e9-98fa-2f101d13f938,key:program.keyword,negate:!f,params:(query:docker%2Fnim-waku-node),type:phrase),query:(match_phrase:(program.keyword:docker%2Fnim-waku-node))),('$state':(store:appState),meta:(alias:!n,disabled:!f,index:d6db7610-60fd-11e9-98fa-2f101d13f938,key:fleet.keyword,negate:!f,params:(query:wakuv2.test),type:phrase),query:(match_phrase:(fleet.keyword:wakuv2.test)))),index:d6db7610-60fd-11e9-98fa-2f101d13f938,interval:auto,query:(language:kuery,query:Listening),sort:!()))
Login with Github. For access issues, contact devops.
Modify search field and time window as appropriate.
## How do I see what address a node is listening for?
Grep for "Listening on". It should be printed at INFO level at the beginning. E.g. from Kibana:
`Oct 7, 2020 @ 23:17:00.383INF 2020-10-07 23:17:00.375+00:00 Listening on topics="wakunode" tid=1 file=wakunode2.nim:140 full=/ip4/0.0.0.0/tcp/60000/p2p/16Uiu2HAmJb2e28qLXxT5kZxVUUoJt72EMzNGXB47Rxx5hw3q4YjS`
## How do I update all submodules at once?
`git submodule foreach --recursive git submodule update --init`

42
docs/tutorial/dingpu.md Normal file
View File

@ -0,0 +1,42 @@
# Dingpu testnet
## Basic chat usage
> If historical messaging is desired, the chat app requires that the remote peer specified in `storenode` option supports the WakuStore protocol. For the current cluster node deployed as part of Dingpu this is already the case.
Start two chat apps:
```
./build/chat2 --ports-shift:0 --storenode:/ip4/134.209.139.210/tcp/60000/p2p/16Uiu2HAmJb2e28qLXxT5kZxVUUoJt72EMzNGXB47Rxx5hw3q4YjS --staticnode:/ip4/134.209.139.210/tcp/60000/p2p/16Uiu2HAmJb2e28qLXxT5kZxVUUoJt72EMzNGXB47Rxx5hw3q4YjS
./build/chat2 --ports-shift:1 --storenode:/ip4/134.209.139.210/tcp/60000/p2p/16Uiu2HAmJb2e28qLXxT5kZxVUUoJt72EMzNGXB47Rxx5hw3q4YjS --staticnode:/ip4/134.209.139.210/tcp/60000/p2p/16Uiu2HAmJb2e28qLXxT5kZxVUUoJt72EMzNGXB47Rxx5hw3q4YjS
```
By specifying `staticnode` it connects to that node subscribes to the `waku` topic. This ensures messages are relayed properly.
Then type messages to publish.
## Interactively add a node
There is also an interactive mode. Type `/connect` then paste address of other node. However, this currently has some timing issues with mesh not being updated, so it is adviced not to use this until this has been addressed. See https://github.com/status-im/nim-waku/issues/231
## Dingpu cluster node
```
/ip4/134.209.139.210/tcp/60000/p2p/16Uiu2HAmJb2e28qLXxT5kZxVUUoJt72EMzNGXB47Rxx5hw3q4YjS
```
## Run a node
To just run a node and not interact on the chat it is enough to run `wakunode2`:
```
./build/wakunode2 --staticnode:<multiaddr>
```
You can also run the `wakubridge` process, which runs both a Waku v1 and Waku v2
node. Currently, it has the same effect as running a `wakunode` and `wakunode2`
process separately, but bridging functionality will be added later to this
application.
```
./build/wakubridge --staticnodev2:<multiaddr> --fleetv1:test
```

33
docs/tutorial/filter.md Normal file
View File

@ -0,0 +1,33 @@
# Running Filter Protocol
## How to
Build:
```
# make wakunode2 is run as part of scripts2 target
make scripts2
```
Run two nodes and connect them:
```
# Starts listening on 60000 with RPC server on 8545.
# Note the "listening on address" in logs.
./build/wakunode2 --ports-shift:0
# Run another node with staticnode argument
./build/wakunode2 --ports-shift:1 --staticnode:/ip4/0.0.0.0/tcp/60000/p2p/16Uiu2HAmF4tuht6fmna6uDqoSMgFqhUrdaVR6VQRyGr6sCpfS2jp --filternode:/ip4/0.0.0.0/tcp/60000/p2p/16Uiu2HAmF4tuht6fmna6uDqoSMgFqhUrdaVR6VQRyGr6sCpfS2jp
```
You should see your nodes connecting.
Do basic RPC calls:
```
./build/rpc_subscribe 8545
./build/rpc_subscribe_filter 8546 # enter your topic default is "foobar"
./build/rpc_publish 8545 # enter your message in STDIN
```
You should see other node receive something.

42
docs/tutorial/nangang.md Normal file
View File

@ -0,0 +1,42 @@
# Nangang Test
Nangang is the first internal testnet. See
https://github.com/vacp2p/research/issues/43 for more.
## How to
Build:
```
# make wakunode2 is run as part of scripts2 target
make scripts2
```
Run two nodes and connect them:
```
# Starts listening on 60000 with RPC server on 8545.
# Note the "listening on address" in logs.
./build/wakunode2 --ports-shift:0
# Run another node with staticnode argument
./build/wakunode2 --ports-shift:1 --staticnode:/ip4/0.0.0.0/tcp/60000/p2p/16Uiu2HAmF4tuht6fmna6uDqoSMgFqhUrdaVR6VQRyGr6sCpfS2jp
```
You should see your nodes connecting.
Do basic RPC calls:
```
./build/rpc_subscribe 8545
./build/rpc_subscribe 8546
./build/rpc_publish 8545 # enter your message in STDIN
```
You should see other node receive something.
## Nangang cluster node
```
/ip4/134.209.139.210/tcp/60000/p2p/16Uiu2HAmJb2e28qLXxT5kZxVUUoJt72EMzNGXB47Rxx5hw3q4YjS
```

37
docs/tutorial/store.md Normal file
View File

@ -0,0 +1,37 @@
# Running Store Protocol
## How to
Build:
```
# make wakunode2 is run as part of scripts2 target
make scripts2
```
Run two nodes and connect them:
```
# Starts listening on 60000 with RPC server on 8545.
# Note the "listening on address" in logs.
./build/wakunode2 --ports-shift:0
# Run another node with staticnode argument
./build/wakunode2 --ports-shift:1 --staticnode:/ip4/0.0.0.0/tcp/60000/p2p/16Uiu2HAmF4tuht6fmna6uDqoSMgFqhUrdaVR6VQRyGr6sCpfS2jp --storenode:/ip4/0.0.0.0/tcp/60000/p2p/16Uiu2HAmF4tuht6fmna6uDqoSMgFqhUrdaVR6VQRyGr6sCpfS2jp
```
When passing the flag `dbpath` with a path, messages are persisted and stored in a database called `store` under the specified path.
When none is passed, messages are not persisted and are only stored in-memory.
You should see your nodes connecting.
Do basic RPC calls:
```
./build/rpc_subscribe 8545
./build/rpc_subscribe 8546
./build/rpc_publish 8545 # enter your message in STDIN
./build/rpc_query 8546 # enter your topic default is "foobar"
```
You should see other node receive something.

8
env.sh Executable file
View File

@ -0,0 +1,8 @@
#!/bin/bash
# We use ${BASH_SOURCE[0]} instead of $0 to allow sourcing this file
# and we fall back to a Zsh-specific special var to also support Zsh.
REL_PATH="$(dirname ${BASH_SOURCE[0]:-${(%):-%x}})"
ABS_PATH="$(cd ${REL_PATH}; pwd)"
source ${ABS_PATH}/vendor/nimbus-build-system/scripts/env.sh

42
examples/v1/README.md Normal file
View File

@ -0,0 +1,42 @@
# Waku v1 example
## Introduction
This is a basic Waku v1 example to show the Waku v1 API usage.
It can be run as a single node, in which case it will just post and receive its
own messages.
Or multiple nodes can be started and can connect to each other, so that
messages can be passed around.
## How to build
```sh
make example1
```
## How to run
### Single node
```sh
# Lauch example node
./build/example
```
Messages will be posted and received.
### Multiple nodes
```sh
# Launch first example node
./build/example
```
Now look for an `INFO` log containing the enode address, e.g.:
`enode://26..5b@0.0.0.0:30303` (but with full address)
Copy the full enode string of the first node and start the second
node with that enode string as staticnode config option:
```sh
# Launch second example node, providing the enode address of the first node
./build/example --staticnode:enode://26..5b@0.0.0.0:30303 --ports-shift:1
```
Now both nodes will receive also messages from each other.

View File

@ -0,0 +1,65 @@
import
confutils/defs, chronicles, chronos, eth/keys
type
WakuNodeCmd* = enum
noCommand
WakuNodeConf* = object
logLevel* {.
desc: "Sets the log level."
defaultValue: LogLevel.INFO
name: "log-level" .}: LogLevel
case cmd* {.
command
defaultValue: noCommand .}: WakuNodeCmd
of noCommand:
tcpPort* {.
desc: "TCP listening port."
defaultValue: 30303
name: "tcp-port" .}: uint16
udpPort* {.
desc: "UDP listening port."
defaultValue: 30303
name: "udp-port" .}: uint16
portsShift* {.
desc: "Add a shift to all port numbers."
defaultValue: 0
name: "ports-shift" .}: uint16
nat* {.
desc: "Specify method to use for determining public address. " &
"Must be one of: any, none, upnp, pmp, extip:<IP>."
defaultValue: "any" .}: string
staticnodes* {.
desc: "Enode URL to directly connect with. Argument may be repeated."
name: "staticnode" .}: seq[string]
nodekey* {.
desc: "P2P node private key as hex.",
defaultValue: KeyPair.random(keys.newRng()[])
name: "nodekey" .}: KeyPair
proc parseCmdArg*(T: type KeyPair, p: TaintedString): T =
try:
let privkey = PrivateKey.fromHex(string(p)).tryGet()
result = privkey.toKeyPair()
except CatchableError as e:
raise newException(ConfigurationError, "Invalid private key")
proc completeCmdArg*(T: type KeyPair, val: TaintedString): seq[string] =
return @[]
proc parseCmdArg*(T: type IpAddress, p: TaintedString): T =
try:
result = parseIpAddress(p)
except CatchableError as e:
raise newException(ConfigurationError, "Invalid IP address")
proc completeCmdArg*(T: type IpAddress, val: TaintedString): seq[string] =
return @[]

108
examples/v1/example.nim Normal file
View File

@ -0,0 +1,108 @@
import
confutils, chronicles, chronos, stew/byteutils, stew/shims/net as stewNet,
eth/[keys, p2p],
../../waku/v1/protocol/waku_protocol,
../../waku/v1/node/waku_helpers,
../../waku/common/utils/nat,
./config_example
## This is a simple Waku v1 example to show the Waku v1 API usage.
const clientId = "Waku example v1"
let
# Load the cli configuration from `config_example.nim`.
config = WakuNodeConf.load()
# Seed the rng.
rng = keys.newRng()
# Set up the address according to NAT information.
(ipExt, tcpPortExt, udpPortExt) = setupNat(config.nat, clientId,
Port(config.tcpPort + config.portsShift),
Port(config.udpPort + config.portsShift))
# TODO: EthereumNode should have a better split of binding address and
# external address. Also, can't have different ports as it stands now.
address = if ipExt.isNone():
Address(ip: parseIpAddress("0.0.0.0"),
tcpPort: Port(config.tcpPort + config.portsShift),
udpPort: Port(config.udpPort + config.portsShift))
else:
Address(ip: ipExt.get(),
tcpPort: Port(config.tcpPort + config.portsShift),
udpPort: Port(config.udpPort + config.portsShift))
# Create Ethereum Node
var node = newEthereumNode(config.nodekey, # Node identifier
address, # Address reachable for incoming requests
1, # Network Id, only applicable for ETH protocol
nil, # Database, not required for Waku
clientId, # Client id string
addAllCapabilities = false, # Disable default all RLPx capabilities
rng = rng)
node.addCapability Waku # Enable only the Waku protocol.
# Set up the Waku configuration.
let wakuConfig = WakuConfig(powRequirement: 0.002,
bloom: some(fullBloom()), # Full bloom filter
isLightNode: false, # Full node
maxMsgSize: waku_protocol.defaultMaxMsgSize,
topics: none(seq[waku_protocol.Topic]) # empty topic interest
)
node.configureWaku(wakuConfig)
# Optionally direct connect to a set of nodes.
if config.staticnodes.len > 0:
connectToNodes(node, config.staticnodes)
# Connect to the network, which will make the node start listening and/or
# connect to bootnodes, and/or start discovery.
# This will block until first connection is made, which in this case can only
# happen if we directly connect to nodes (step above) or if an incoming
# connection occurs, which is why we use a callback to exit on errors instead of
# using `await`.
# TODO: This looks a bit awkward and the API should perhaps be altered here.
let connectedFut = node.connectToNetwork(@[],
true, # Enable listening
false # Disable discovery (only discovery v4 is currently supported)
)
connectedFut.callback = proc(data: pointer) {.gcsafe.} =
{.gcsafe.}:
if connectedFut.failed:
fatal "connectToNetwork failed", msg = connectedFut.readError.msg
quit(1)
# Using a hardcoded symmetric key for encryption of the payload for the sake of
# simplicity.
var symKey: SymKey
symKey[31] = 1
# Asymmetric keypair to sign the payload.
let signKeyPair = KeyPair.random(rng[])
# Code to be executed on receival of a message on filter.
proc handler(msg: ReceivedMessage) =
if msg.decoded.src.isSome():
echo "Received message from ", $msg.decoded.src.get(), ": ",
string.fromBytes(msg.decoded.payload)
# Create and subscribe filter with above handler.
let
topic = [byte 0, 0, 0, 0]
filter = initFilter(symKey = some(symKey), topics = @[topic])
discard node.subscribeFilter(filter, handler)
# Repeat the posting of a message every 5 seconds.
proc repeatMessage(udata: pointer) {.gcsafe.} =
{.gcsafe.}:
# Post a waku message on the network, encrypted with provided symmetric key,
# signed with asymmetric key, on topic and with ttl of 30 seconds.
let posted = node.postMessage(
symKey = some(symKey), src = some(signKeyPair.seckey),
ttl = 30, topic = topic, payload = @[byte 0x48, 0x65, 0x6C, 0x6C, 0x6F])
if posted: echo "Posted message as ", $signKeyPair.pubkey
else: echo "Posting message failed."
discard setTimer(Moment.fromNow(5.seconds), repeatMessage)
discard setTimer(Moment.fromNow(5.seconds), repeatMessage)
runForever()

48
examples/v2/basic2.nim Normal file
View File

@ -0,0 +1,48 @@
## Here's a basic example of how you would start a Waku node, subscribe to
## topics, and publish to them.
import
std/[os,options],
confutils, chronicles, chronos,
stew/shims/net as stewNet,
libp2p/crypto/[crypto,secp],
eth/keys,
json_rpc/[rpcclient, rpcserver],
../../waku/v2/node/[config, wakunode2],
../../waku/common/utils/nat,
../../waku/v2/waku_types
type
Topic* = waku_types.Topic
# Node operations happens asynchronously
proc runBackground() {.async.} =
let
conf = WakuNodeConf.load()
(extIp, extTcpPort, extUdpPort) = setupNat(conf.nat, clientId,
Port(uint16(conf.tcpPort) + conf.portsShift),
Port(uint16(conf.udpPort) + conf.portsShift))
node = WakuNode.init(conf.nodeKey, conf.listenAddress,
Port(uint16(conf.tcpPort) + conf.portsShift), extIp, extTcpPort)
await node.start()
await node.mountRelay()
# Subscribe to a topic
let topic = cast[Topic]("foobar")
proc handler(topic: Topic, data: seq[byte]) {.async, gcsafe.} =
let message = WakuMessage.init(data).value
let payload = cast[string](message.payload)
info "Hit subscribe handler", topic=topic, payload=payload, contentTopic=message.contentTopic
await node.subscribe(topic, handler)
# Publish to a topic
let payload = cast[seq[byte]]("hello world")
let message = WakuMessage(payload: payload, contentTopic: ContentTopic(1))
await node.publish(topic, message)
# TODO Await with try/except here
discard runBackground()
runForever()

298
examples/v2/chat2.nim Normal file
View File

@ -0,0 +1,298 @@
when not(compileOption("threads")):
{.fatal: "Please, compile this program with the --threads:on option!".}
import std/[tables, strformat, strutils]
import confutils, chronicles, chronos, stew/shims/net as stewNet,
eth/keys, bearssl, stew/[byteutils, endians2],
nimcrypto/pbkdf2
import libp2p/[switch, # manage transports, a single entry point for dialing and listening
crypto/crypto, # cryptographic functions
protocols/identify, # identify the peer info of a peer
stream/connection, # create and close stream read / write connections
transports/tcptransport, # listen and dial to other peers using client-server protocol
multiaddress, # encode different addressing schemes. For example, /ip4/7.7.7.7/tcp/6543 means it is using IPv4 protocol and TCP
peerinfo, # manage the information of a peer, such as peer ID and public / private key
peerid, # Implement how peers interact
protocols/protocol, # define the protocol base type
protocols/secure/secure, # define the protocol of secure connection
protocols/secure/secio, # define the protocol of secure input / output, allows encrypted communication that uses public keys to validate signed messages instead of a certificate authority like in TLS
muxers/muxer, # define an interface for stream multiplexing, allowing peers to offer many protocols over a single connection
muxers/mplex/mplex] # define some contants and message types for stream multiplexing
import ../../waku/v2/node/[config, wakunode2, waku_payload],
../../waku/v2/protocol/[waku_relay, waku_filter],
../../waku/v2/protocol/waku_store/waku_store,
../../waku/common/utils/nat,
../../waku/v2/waku_types
const Help = """
Commands: /[?|help|connect|disconnect|exit]
help: Prints this help
connect: dials a remote peer
disconnect: ends current session
exit: closes the chat
"""
const
PayloadV1* {.booldefine.} = false
DefaultTopic = "/waku/2/default-waku/proto"
Dingpu = "dingpu".toBytes
DefaultContentTopic = ContentTopic(uint32.fromBytes(Dingpu))
# XXX Connected is a bit annoying, because incoming connections don't trigger state change
# Could poll connection pool or something here, I suppose
# TODO Ensure connected turns true on incoming connections, or get rid of it
type Chat = ref object
node: WakuNode # waku node for publishing, subscribing, etc
transp: StreamTransport # transport streams between read & write file descriptor
subscribed: bool # indicates if a node is subscribed or not to a topic
connected: bool # if the node is connected to another peer
started: bool # if the node has started
type
PrivateKey* = crypto.PrivateKey
Topic* = waku_types.Topic
# Similarly as Status public chats now.
proc generateSymKey(contentTopic: ContentTopic): SymKey =
var ctx: HMAC[sha256]
var symKey: SymKey
if pbkdf2(ctx, contentTopic.toBytes(), "", 65356, symKey) != sizeof(SymKey):
raise (ref Defect)(msg: "Should not occur as array is properly sized")
symKey
let DefaultSymKey = generateSymKey(DefaultContentTopic)
proc initAddress(T: type MultiAddress, str: string): T =
let address = MultiAddress.init(str).tryGet()
if IPFS.match(address) and matchPartial(multiaddress.TCP, address):
result = address
else:
raise newException(ValueError,
"Invalid bootstrap node multi-address")
proc parsePeer(address: string): PeerInfo =
let multiAddr = MultiAddress.initAddress(address)
let parts = address.split("/")
result = PeerInfo.init(parts[^1], [multiAddr])
proc connectToNodes(c: Chat, nodes: seq[string]) {.async.} =
echo "Connecting to nodes"
await c.node.connectToNodes(nodes)
c.connected = true
proc publish(c: Chat, line: string) =
when PayloadV1:
# Use Waku v1 payload encoding/encryption
let
payload = Payload(payload: line.toBytes(), symKey: some(DefaultSymKey))
version = 1'u32
encodedPayload = payload.encode(version, c.node.rng[])
if encodedPayload.isOk():
let message = WakuMessage(payload: encodedPayload.get(),
contentTopic: DefaultContentTopic, version: version)
asyncSpawn c.node.publish(DefaultTopic, message)
else:
warn "Payload encoding failed", error = encodedPayload.error
else:
# No payload encoding/encryption from Waku
let message = WakuMessage(payload: line.toBytes(),
contentTopic: DefaultContentTopic, version: 0)
asyncSpawn c.node.publish(DefaultTopic, message)
# TODO This should read or be subscribe handler subscribe
proc readAndPrint(c: Chat) {.async.} =
while true:
# while p.connected:
# # TODO: echo &"{p.id} -> "
#
# echo cast[string](await p.conn.readLp(1024))
#echo "readAndPrint subscribe NYI"
await sleepAsync(100.millis)
# TODO Implement
proc writeAndPrint(c: Chat) {.async.} =
while true:
# Connect state not updated on incoming WakuRelay connections
# if not c.connected:
# echo "type an address or wait for a connection:"
# echo "type /[help|?] for help"
let line = await c.transp.readLine()
if line.startsWith("/help") or line.startsWith("/?") or not c.started:
echo Help
continue
# if line.startsWith("/disconnect"):
# echo "Ending current session"
# if p.connected and p.conn.closed.not:
# await p.conn.close()
# p.connected = false
elif line.startsWith("/connect"):
# TODO Should be able to connect to multiple peers for Waku chat
if c.connected:
echo "already connected to at least one peer"
continue
echo "enter address of remote peer"
let address = await c.transp.readLine()
if address.len > 0:
await c.connectToNodes(@[address])
# elif line.startsWith("/exit"):
# if p.connected and p.conn.closed.not:
# await p.conn.close()
# p.connected = false
#
# await p.switch.stop()
# echo "quitting..."
# quit(0)
else:
# XXX connected state problematic
if c.started:
c.publish(line)
# TODO Connect to peer logic?
else:
try:
if line.startsWith("/") and "p2p" in line:
await c.connectToNodes(@[line])
except:
echo &"unable to dial remote peer {line}"
echo getCurrentExceptionMsg()
proc readWriteLoop(c: Chat) {.async.} =
asyncCheck c.writeAndPrint() # execute the async function but does not block
asyncCheck c.readAndPrint()
proc readInput(wfd: AsyncFD) {.thread.} =
## This procedure performs reading from `stdin` and sends data over
## pipe to main thread.
let transp = fromPipe(wfd)
while true:
let line = stdin.readLine()
discard waitFor transp.write(line & "\r\n")
proc processInput(rfd: AsyncFD, rng: ref BrHmacDrbgContext) {.async.} =
let transp = fromPipe(rfd)
let
conf = WakuNodeConf.load()
(extIp, extTcpPort, extUdpPort) = setupNat(conf.nat, clientId,
Port(uint16(conf.tcpPort) + conf.portsShift),
Port(uint16(conf.udpPort) + conf.portsShift))
node = WakuNode.init(conf.nodeKey, conf.listenAddress,
Port(uint16(conf.tcpPort) + conf.portsShift), extIp, extTcpPort)
await node.start()
if conf.filternode != "":
await node.mountRelay(conf.topics.split(" "))
else:
await node.mountRelay(@[])
var chat = Chat(node: node, transp: transp, subscribed: true, connected: false, started: true)
if conf.staticnodes.len > 0:
await connectToNodes(chat, conf.staticnodes)
let peerInfo = node.peerInfo
let listenStr = $peerInfo.addrs[0] & "/p2p/" & $peerInfo.peerId
echo &"Listening on\n {listenStr}"
if conf.swap:
node.mountSwap()
if conf.storenode != "":
node.mountStore()
node.wakuStore.setPeer(parsePeer(conf.storenode))
proc storeHandler(response: HistoryResponse) {.gcsafe.} =
for msg in response.messages:
let payload = string.fromBytes(msg.payload)
echo &"{payload}"
info "Hit store handler"
await node.query(HistoryQuery(topics: @[DefaultContentTopic]), storeHandler)
if conf.filternode != "":
node.mountFilter()
node.wakuFilter.setPeer(parsePeer(conf.filternode))
proc filterHandler(msg: WakuMessage) {.gcsafe.} =
let payload = string.fromBytes(msg.payload)
echo &"{payload}"
info "Hit filter handler"
await node.subscribe(
FilterRequest(contentFilters: @[ContentFilter(topics: @[DefaultContentTopic])], topic: DefaultTopic, subscribe: true),
filterHandler
)
# Subscribe to a topic
# TODO To get end to end sender would require more information in payload
# We could possibly indicate the relayer point with connection somehow probably (?)
proc handler(topic: Topic, data: seq[byte]) {.async, gcsafe.} =
let decoded = WakuMessage.init(data)
if decoded.isOk():
let msg = decoded.get()
when PayloadV1:
# Use Waku v1 payload encoding/encryption
let
keyInfo = KeyInfo(kind: Symmetric, symKey: DefaultSymKey)
decodedPayload = decodePayload(decoded.get(), keyInfo)
if decodedPayload.isOK():
let payload = string.fromBytes(decodedPayload.get().payload)
echo &"{payload}"
info "Hit subscribe handler", topic, payload,
contentTopic = msg.contentTopic
else:
debug "Invalid encoded WakuMessage payload",
error = decodedPayload.error
else:
# No payload encoding/encryption from Waku
let payload = string.fromBytes(msg.payload)
echo &"{payload}"
info "Hit subscribe handler", topic, payload,
contentTopic = msg.contentTopic
else:
trace "Invalid encoded WakuMessage", error = decoded.error
let topic = cast[Topic](DefaultTopic)
await node.subscribe(topic, handler)
await chat.readWriteLoop()
runForever()
#await allFuturesThrowing(libp2pFuts)
proc main() {.async.} =
let rng = crypto.newRng() # Singe random number source for the whole application
let (rfd, wfd) = createAsyncPipe()
if rfd == asyncInvalidPipe or wfd == asyncInvalidPipe:
raise newException(ValueError, "Could not initialize pipe!")
var thread: Thread[AsyncFD]
thread.createThread(readInput, wfd)
await processInput(rfd, rng)
when isMainModule: # isMainModule = true when the module is compiled as the main file
waitFor(main())
## Dump of things that can be improved:
##
## - Incoming dialed peer does not change connected state (not relying on it for now)
## - Unclear if staticnode argument works (can enter manually)
## - Don't trigger self / double publish own messages
## - Integrate store protocol (fetch messages in beginning)
## - Integrate filter protocol (default/option to be light node, connect to filter node)
## - Test/default to cluster node connection (diff protocol version)
## - Redirect logs to separate file
## - Expose basic publish/subscribe etc commands with /syntax
## - Show part of peerid to know who sent message
## - Deal with protobuf messages (e.g. other chat protocol, or encrypted)

View File

@ -0,0 +1,830 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"id": 2,
"links": [],
"panels": [
{
"datasource": null,
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"custom": {},
"mappings": [],
"max": 100,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 6,
"x": 0,
"y": 0
},
"id": 16,
"options": {
"orientation": "auto",
"reduceOptions": {
"calcs": [
"last"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true
},
"pluginVersion": "7.0.3",
"targets": [
{
"expr": "connected_peers{node=\"0\"}",
"refId": "A"
}
],
"timeFrom": null,
"timeShift": null,
"title": "Connected Peers #0",
"type": "gauge"
},
{
"cacheTimeout": null,
"colorBackground": false,
"colorValue": false,
"colors": [
"#299c46",
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
"datasource": null,
"fieldConfig": {
"defaults": {
"custom": {}
},
"overrides": []
},
"format": "none",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": false,
"thresholdLabels": false,
"thresholdMarkers": true
},
"gridPos": {
"h": 4,
"w": 4,
"x": 6,
"y": 0
},
"id": 22,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
"prefixFontSize": "50%",
"rangeMaps": [
{
"from": "null",
"text": "N/A",
"to": "null"
}
],
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": false,
"ymax": null,
"ymin": null
},
"tableColumn": "envelopes_valid_total{instance=\"127.0.0.1:8010\", job=\"wakusim\", node=\"0\"}",
"targets": [
{
"expr": "envelopes_valid_total{node=\"0\"}",
"interval": "",
"legendFormat": "",
"refId": "A"
}
],
"thresholds": "",
"timeFrom": null,
"timeShift": null,
"title": "Valid Envelopes #0",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
{
"op": "=",
"text": "N/A",
"value": "null"
}
],
"valueName": "current"
},
{
"cacheTimeout": null,
"colorBackground": false,
"colorValue": false,
"colors": [
"#299c46",
"rgba(237, 129, 40, 0.89)",
"#d44a3a"
],
"datasource": null,
"fieldConfig": {
"defaults": {
"custom": {}
},
"overrides": []
},
"format": "none",
"gauge": {
"maxValue": 100,
"minValue": 0,
"show": false,
"thresholdLabels": false,
"thresholdMarkers": true
},
"gridPos": {
"h": 4,
"w": 4,
"x": 10,
"y": 0
},
"id": 20,
"interval": null,
"links": [],
"mappingType": 1,
"mappingTypes": [
{
"name": "value to text",
"value": 1
},
{
"name": "range to text",
"value": 2
}
],
"maxDataPoints": 100,
"nullPointMode": "connected",
"nullText": null,
"pluginVersion": "6.4.5",
"postfix": "",
"postfixFontSize": "50%",
"prefix": "",
"prefixFontSize": "50%",
"rangeMaps": [
{
"from": "null",
"text": "N/A",
"to": "null"
}
],
"sparkline": {
"fillColor": "rgba(31, 118, 189, 0.18)",
"full": false,
"lineColor": "rgb(31, 120, 193)",
"show": false,
"ymax": null,
"ymin": null
},
"tableColumn": "Dropped envelopes",
"targets": [
{
"expr": "sum(envelopes_dropped_total{node=\"0\"})",
"interval": "",
"legendFormat": "Dropped envelopes",
"refId": "A"
}
],
"thresholds": "",
"timeFrom": null,
"timeShift": null,
"title": "Dropped Envelopes #0",
"type": "singlestat",
"valueFontSize": "80%",
"valueMaps": [
{
"op": "=",
"text": "N/A",
"value": "null"
}
],
"valueName": "current"
},
{
"datasource": null,
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"custom": {},
"mappings": [],
"max": 200,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 200
}
]
},
"unit": "percent"
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 5,
"x": 14,
"y": 0
},
"id": 14,
"options": {
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true
},
"pluginVersion": "7.0.3",
"targets": [
{
"expr": "rate(process_cpu_seconds_total{node=\"0\"}[5s]) * 100",
"legendFormat": "CPU Usage",
"refId": "A"
}
],
"timeFrom": null,
"timeShift": null,
"title": "CPU Usage #0",
"type": "gauge"
},
{
"datasource": null,
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"custom": {},
"mappings": [],
"max": 2147483648,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 2147483648
}
]
},
"unit": "bytes"
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 5,
"x": 19,
"y": 0
},
"id": 18,
"options": {
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"showThresholdLabels": false,
"showThresholdMarkers": true
},
"pluginVersion": "7.0.3",
"targets": [
{
"expr": "process_resident_memory_bytes{node=\"0\"}",
"refId": "A"
}
],
"timeFrom": null,
"timeShift": null,
"title": "RSS Memory #0",
"type": "gauge"
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": null,
"fieldConfig": {
"defaults": {
"custom": {}
},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
"y": 4
},
"hiddenSeries": false,
"id": 6,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pluginVersion": "6.4.5",
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "envelopes_valid_total{node=\"0\"}",
"hide": false,
"instant": false,
"interval": "",
"legendFormat": "Valid",
"refId": "A"
},
{
"expr": "envelopes_dropped_total{node=\"0\"}",
"hide": false,
"instant": false,
"interval": "",
"legendFormat": "Dropped {{reason}}",
"refId": "B"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Waku Envelopes #0",
"tooltip": {
"shared": true,
"sort": 1,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": null,
"fieldConfig": {
"defaults": {
"custom": {}
},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
"y": 4
},
"hiddenSeries": false,
"id": 2,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [
{
"alias": "RSS Memory",
"yaxis": 2
}
],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "connected_peers{node=\"0\"}",
"intervalFactor": 1,
"legendFormat": "Connected Peers",
"refId": "A"
},
{
"expr": "process_resident_memory_bytes{node=\"0\"}",
"interval": "",
"intervalFactor": 1,
"legendFormat": "RSS Memory",
"refId": "B"
},
{
"expr": "rate(process_cpu_seconds_total{node=\"0\"}[15s]) * 100",
"legendFormat": "CPU usage %",
"refId": "C"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Waku Node #0",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": null,
"fieldConfig": {
"defaults": {
"custom": {}
},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 13
},
"hiddenSeries": false,
"id": 8,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "process_max_fds{node=\"0\"}",
"legendFormat": "Maximum file descriptors",
"refId": "A"
},
{
"expr": "process_open_fds{node=\"0\"}",
"legendFormat": "Open file descriptors",
"refId": "B"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "File Descriptors #0",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": null,
"fieldConfig": {
"defaults": {
"custom": {}
},
"overrides": []
},
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 13
},
"hiddenSeries": false,
"id": 4,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "nim_gc_mem_bytes{node=\"0\"}",
"legendFormat": "Nim GC total memory",
"refId": "A"
},
{
"expr": "nim_gc_mem_occupied_bytes{node=\"0\"}",
"legendFormat": "Nim GC used memory",
"refId": "B"
},
{
"expr": "process_resident_memory_bytes{node=\"0\"}",
"legendFormat": "RSS memory",
"refId": "C"
},
{
"expr": "process_virtual_memory_bytes{node=\"0\"}",
"legendFormat": "Virtual memory",
"refId": "D"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Memory Usage #0",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}
],
"refresh": "5s",
"schemaVersion": 25,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-30m",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
]
},
"timezone": "",
"title": "Waku Node2",
"uid": "K7Z6IoBZk",
"version": 5
}

8
tests/all_tests_v1.nim Normal file
View File

@ -0,0 +1,8 @@
import
# Waku v1 tests
./v1/test_waku_connect,
./v1/test_waku_config,
./v1/test_waku_bridge,
./v1/test_waku_mail,
./v1/test_rpc_waku

14
tests/all_tests_v2.nim Normal file
View File

@ -0,0 +1,14 @@
import
# Waku v2 tests
# TODO: enable this when it is altered into a proper waku relay test
# ./v2/test_waku,
./v2/test_wakunode,
./v2/test_waku_store,
./v2/test_waku_filter,
./v2/test_waku_pagination,
./v2/test_waku_payload,
./v2/test_rpc_waku,
./v2/test_waku_swap,
./v2/test_message_store,
./v2/test_jsonrpc_waku,
./v2/test_web3 # will remove it when rln-relay tests get added

52
tests/test_helpers.nim Normal file
View File

@ -0,0 +1,52 @@
import
unittest, chronos, bearssl,
eth/[keys, p2p]
import libp2p/crypto/crypto
var nextPort = 30303
proc localAddress*(port: int): Address =
let port = Port(port)
result = Address(udpPort: port, tcpPort: port,
ip: parseIpAddress("127.0.0.1"))
proc setupTestNode*(
rng: ref BrHmacDrbgContext,
capabilities: varargs[ProtocolInfo, `protocolInfo`]): EthereumNode =
let keys1 = keys.KeyPair.random(rng[])
result = newEthereumNode(keys1, localAddress(nextPort), 1, nil,
addAllCapabilities = false, rng = rng)
nextPort.inc
for capability in capabilities:
result.addCapability capability
template asyncTest*(name, body: untyped) =
test name:
proc scenario {.async.} = body
waitFor scenario()
template procSuite*(name, body: untyped) =
proc suitePayload =
suite name:
body
suitePayload()
# Copied from here: https://github.com/status-im/nim-libp2p/blob/d522537b19a532bc4af94fcd146f779c1f23bad0/tests/helpers.nim#L28
type RngWrap = object
rng: ref BrHmacDrbgContext
var rngVar: RngWrap
proc getRng(): ref BrHmacDrbgContext =
# TODO if `rngVar` is a threadvar like it should be, there are random and
# spurious compile failures on mac - this is not gcsafe but for the
# purpose of the tests, it's ok as long as we only use a single thread
{.gcsafe.}:
if rngVar.rng.isNil:
rngVar.rng = crypto.newRng()
rngVar.rng
template rng*(): ref BrHmacDrbgContext =
getRng()

240
tests/v1/test_rpc_waku.nim Normal file
View File

@ -0,0 +1,240 @@
{.used.}
import
std/[unittest, options, os, strutils],
stew/byteutils, json_rpc/[rpcserver, rpcclient],
eth/common as eth_common, eth/[rlp, keys, p2p],
../../waku/v1/protocol/waku_protocol,
../../waku/v1/node/rpc/[hexstrings, rpc_types, waku, key_storage]
template sourceDir*: string = currentSourcePath.rsplit(DirSep, 1)[0]
## Generate client convenience marshalling wrappers from forward declarations
## For testing, ethcallsigs needs to be kept in sync with ../waku/node/v1/rpc/waku
const sigPath = sourceDir / ParDir / ParDir / "waku" / "v1" / "node" / "rpc" / "wakucallsigs.nim"
createRpcSigs(RpcSocketClient, sigPath)
proc setupNode(capabilities: varargs[ProtocolInfo, `protocolInfo`],
rng: ref BrHmacDrbgContext, ): EthereumNode =
let
keypair = KeyPair.random(rng[])
srvAddress = Address(ip: parseIpAddress("0.0.0.0"), tcpPort: Port(30303),
udpPort: Port(30303))
result = newEthereumNode(keypair, srvAddress, 1, nil, "waku test rpc",
addAllCapabilities = false, rng = rng)
for capability in capabilities:
result.addCapability capability
proc doTests {.async.} =
let rng = keys.newRng()
var ethNode = setupNode(Waku, rng)
# Create Ethereum RPCs
let rpcPort = 8545
var
rpcServer = newRpcSocketServer(["localhost:" & $rpcPort])
client = newRpcSocketClient()
let keys = newKeyStorage()
setupWakuRPC(ethNode, keys, rpcServer, rng)
# Begin tests
rpcServer.start()
await client.connect("localhost", Port(rpcPort))
suite "Waku Remote Procedure Calls":
test "waku_version":
check await(client.waku_version()) == wakuVersionStr
test "waku_info":
let info = await client.waku_info()
check info.maxMessageSize == defaultMaxMsgSize
test "waku_setMaxMessageSize":
let testValue = 1024'u64
check await(client.waku_setMaxMessageSize(testValue)) == true
var info = await client.waku_info()
check info.maxMessageSize == testValue
expect ValueError:
discard await(client.waku_setMaxMessageSize(defaultMaxMsgSize + 1))
info = await client.waku_info()
check info.maxMessageSize == testValue
test "waku_setMinPoW":
let testValue = 0.0001
check await(client.waku_setMinPoW(testValue)) == true
let info = await client.waku_info()
check info.minPow == testValue
# test "waku_markTrustedPeer":
# TODO: need to connect a peer to test
test "waku asymKey tests":
let keyID = await client.waku_newKeyPair()
check:
await(client.waku_hasKeyPair(keyID)) == true
await(client.waku_deleteKeyPair(keyID)) == true
await(client.waku_hasKeyPair(keyID)) == false
expect ValueError:
discard await(client.waku_deleteKeyPair(keyID))
let privkey = "0x5dc5381cae54ba3174dc0d46040fe11614d0cc94d41185922585198b4fcef9d3"
let pubkey = "0x04e5fd642a0f630bbb1e4cd7df629d7b8b019457a9a74f983c0484a045cebb176def86a54185b50bbba6bbf97779173695e92835d63109c23471e6da382f922fdb"
let keyID2 = await client.waku_addPrivateKey(privkey)
check:
await(client.waku_getPublicKey(keyID2)) == pubkey.toPublicKey
await(client.waku_getPrivateKey(keyID2)).toRaw() == privkey.toPrivateKey.toRaw()
await(client.waku_hasKeyPair(keyID2)) == true
await(client.waku_deleteKeyPair(keyID2)) == true
await(client.waku_hasKeyPair(keyID2)) == false
expect ValueError:
discard await(client.waku_deleteKeyPair(keyID2))
test "waku symKey tests":
let keyID = await client.waku_newSymKey()
check:
await(client.waku_hasSymKey(keyID)) == true
await(client.waku_deleteSymKey(keyID)) == true
await(client.waku_hasSymKey(keyID)) == false
expect ValueError:
discard await(client.waku_deleteSymKey(keyID))
let symKey = "0x0000000000000000000000000000000000000000000000000000000000000001"
let keyID2 = await client.waku_addSymKey(symKey)
check:
await(client.waku_getSymKey(keyID2)) == symKey.toSymKey
await(client.waku_hasSymKey(keyID2)) == true
await(client.waku_deleteSymKey(keyID2)) == true
await(client.waku_hasSymKey(keyID2)) == false
expect ValueError:
discard await(client.waku_deleteSymKey(keyID2))
let keyID3 = await client.waku_generateSymKeyFromPassword("password")
let keyID4 = await client.waku_generateSymKeyFromPassword("password")
let keyID5 = await client.waku_generateSymKeyFromPassword("nimbus!")
check:
await(client.waku_getSymKey(keyID3)) ==
await(client.waku_getSymKey(keyID4))
await(client.waku_getSymKey(keyID3)) !=
await(client.waku_getSymKey(keyID5))
await(client.waku_hasSymKey(keyID3)) == true
await(client.waku_deleteSymKey(keyID3)) == true
await(client.waku_hasSymKey(keyID3)) == false
expect ValueError:
discard await(client.waku_deleteSymKey(keyID3))
# Some defaults for the filter & post tests
let
ttl = 30'u64
topicStr = "0x12345678"
payload = "0x45879632"
# A very low target and long time so we are sure the test never fails
# because of this
powTarget = 0.001
powTime = 1.0
test "waku filter create and delete":
let
topic = topicStr.toTopic()
symKeyID = await client.waku_newSymKey()
options = WakuFilterOptions(symKeyID: some(symKeyID),
topics: some(@[topic]))
filterID = await client.waku_newMessageFilter(options)
check:
filterID.string.isValidIdentifier
await(client.waku_deleteMessageFilter(filterID)) == true
expect ValueError:
discard await(client.waku_deleteMessageFilter(filterID))
test "waku symKey post and filter loop":
let
topic = topicStr.toTopic()
symKeyID = await client.waku_newSymKey()
options = WakuFilterOptions(symKeyID: some(symKeyID),
topics: some(@[topic]))
filterID = await client.waku_newMessageFilter(options)
message = WakuPostMessage(symKeyID: some(symKeyID),
ttl: ttl,
topic: some(topic),
payload: payload.HexDataStr,
powTime: powTime,
powTarget: powTarget)
check:
await(client.waku_setMinPoW(powTarget)) == true
await(client.waku_post(message)) == true
let messages = await client.waku_getFilterMessages(filterID)
check:
messages.len == 1
messages[0].sig.isNone()
messages[0].recipientPublicKey.isNone()
messages[0].ttl == ttl
messages[0].topic == topic
messages[0].payload == hexToSeqByte(payload)
messages[0].padding.len > 0
messages[0].pow >= powTarget
await(client.waku_deleteMessageFilter(filterID)) == true
test "waku asymKey post and filter loop":
let
topic = topicStr.toTopic()
privateKeyID = await client.waku_newKeyPair()
options = WakuFilterOptions(privateKeyID: some(privateKeyID))
filterID = await client.waku_newMessageFilter(options)
pubKey = await client.waku_getPublicKey(privateKeyID)
message = WakuPostMessage(pubKey: some(pubKey),
ttl: ttl,
topic: some(topic),
payload: payload.HexDataStr,
powTime: powTime,
powTarget: powTarget)
check:
await(client.waku_setMinPoW(powTarget)) == true
await(client.waku_post(message)) == true
let messages = await client.waku_getFilterMessages(filterID)
check:
messages.len == 1
messages[0].sig.isNone()
messages[0].recipientPublicKey.get() == pubKey
messages[0].ttl == ttl
messages[0].topic == topic
messages[0].payload == hexToSeqByte(payload)
messages[0].padding.len > 0
messages[0].pow >= powTarget
await(client.waku_deleteMessageFilter(filterID)) == true
test "waku signature in post and filter loop":
let
topic = topicStr.toTopic()
symKeyID = await client.waku_newSymKey()
privateKeyID = await client.waku_newKeyPair()
pubKey = await client.waku_getPublicKey(privateKeyID)
options = WakuFilterOptions(symKeyID: some(symKeyID),
topics: some(@[topic]),
sig: some(pubKey))
filterID = await client.waku_newMessageFilter(options)
message = WakuPostMessage(symKeyID: some(symKeyID),
sig: some(privateKeyID),
ttl: ttl,
topic: some(topic),
payload: payload.HexDataStr,
powTime: powTime,
powTarget: powTarget)
check:
await(client.waku_setMinPoW(powTarget)) == true
await(client.waku_post(message)) == true
let messages = await client.waku_getFilterMessages(filterID)
check:
messages.len == 1
messages[0].sig.get() == pubKey
messages[0].recipientPublicKey.isNone()
messages[0].ttl == ttl
messages[0].topic == topic
messages[0].payload == hexToSeqByte(payload)
messages[0].padding.len > 0
messages[0].pow >= powTarget
await(client.waku_deleteMessageFilter(filterID)) == true
rpcServer.stop()
rpcServer.close()
waitFor doTests()

View File

@ -0,0 +1,98 @@
#
# Waku
# (c) Copyright 2019
# Status Research & Development GmbH
#
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
{.used.}
import
std/[sequtils, unittest, tables],
chronos, eth/p2p, eth/p2p/peer_pool,
eth/p2p/rlpx_protocols/whisper_protocol as whisper,
../../waku/v1/protocol/waku_protocol as waku,
../../waku/v1/protocol/waku_bridge,
../test_helpers
let safeTTL = 5'u32
let waitInterval = waku.messageInterval + 150.milliseconds
procSuite "Waku - Whisper bridge tests":
let rng = newRng()
# Waku Whisper node has both capabilities, listens to Whisper and Waku and
# relays traffic between the two.
var
nodeWakuWhisper = setupTestNode(rng, Whisper, Waku) # This will be the bridge
nodeWhisper = setupTestNode(rng, Whisper)
nodeWaku = setupTestNode(rng, Waku)
nodeWakuWhisper.startListening()
let bridgeNode = newNode(nodeWakuWhisper.toENode())
nodeWakuWhisper.shareMessageQueue()
waitFor nodeWhisper.peerPool.connectToNode(bridgeNode)
waitFor nodeWaku.peerPool.connectToNode(bridgeNode)
asyncTest "WakuWhisper and Whisper peers connected":
check:
nodeWhisper.peerPool.connectedNodes.len() == 1
nodeWaku.peerPool.connectedNodes.len() == 1
asyncTest "Whisper - Waku communcation via bridge":
# topic whisper node subscribes to, waku node posts to
let topic1 = [byte 0x12, 0, 0, 0]
# topic waku node subscribes to, whisper node posts to
let topic2 = [byte 0x34, 0, 0, 0]
var payloads = [repeat(byte 0, 10), repeat(byte 1, 10)]
var futures = [newFuture[int](), newFuture[int]()]
proc handler1(msg: whisper.ReceivedMessage) =
check msg.decoded.payload == payloads[0]
futures[0].complete(1)
proc handler2(msg: waku.ReceivedMessage) =
check msg.decoded.payload == payloads[1]
futures[1].complete(1)
var filter1 = whisper.subscribeFilter(nodeWhisper,
whisper.initFilter(topics = @[topic1]), handler1)
var filter2 = waku.subscribeFilter(nodeWaku,
waku.initFilter(topics = @[topic2]), handler2)
check:
# Message should also end up in the Whisper node its queue via the bridge
waku.postMessage(nodeWaku, ttl = safeTTL + 1, topic = topic1,
payload = payloads[0]) == true
# Message should also end up in the Waku node its queue via the bridge
whisper.postMessage(nodeWhisper, ttl = safeTTL, topic = topic2,
payload = payloads[1]) == true
nodeWhisper.protocolState(Whisper).queue.items.len == 1
nodeWaku.protocolState(Waku).queue.items.len == 1
# waitInterval*2 as messages have to pass the bridge also (2 hops)
await allFutures(futures).withTimeout(waitInterval*2)
# Relay can receive Whisper & Waku messages
nodeWakuWhisper.protocolState(Whisper).queue.items.len == 2
nodeWakuWhisper.protocolState(Waku).queue.items.len == 2
# Whisper node can receive Waku messages (via bridge)
nodeWhisper.protocolState(Whisper).queue.items.len == 2
# Waku node can receive Whisper messages (via bridge)
nodeWaku.protocolState(Waku).queue.items.len == 2
whisper.unsubscribeFilter(nodeWhisper, filter1) == true
waku.unsubscribeFilter(nodeWaku, filter2) == true
# XXX: This reads a bit weird, but eh
waku.resetMessageQueue(nodeWaku)
whisper.resetMessageQueue(nodeWhisper)
# shared queue so Waku and Whisper should be set to 0
waku.resetMessageQueue(nodeWakuWhisper)
check:
nodeWhisper.protocolState(Whisper).queue.items.len == 0
nodeWaku.protocolState(Waku).queue.items.len == 0
nodeWakuWhisper.protocolState(Whisper).queue.items.len == 0
nodeWakuWhisper.protocolState(Waku).queue.items.len == 0

View File

@ -0,0 +1,65 @@
#
# Waku
# (c) Copyright 2020
# Status Research & Development GmbH
#
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
{.used.}
import
std/[sequtils, options, unittest, times],
../../waku/v1/protocol/waku_protocol
suite "Waku envelope validation":
test "should validate and allow envelope according to config":
let ttl = 1'u32
let topic = [byte 1, 2, 3, 4]
let config = WakuConfig(powRequirement: 0, bloom: some(topic.topicBloom()),
isLightNode: false, maxMsgSize: defaultMaxMsgSize)
let env = Envelope(expiry:epochTime().uint32 + ttl, ttl: ttl, topic: topic,
data: repeat(byte 9, 256), nonce: 0)
check env.valid()
let msg = initMessage(env)
check msg.allowed(config)
test "should invalidate envelope due to ttl 0":
let ttl = 0'u32
let topic = [byte 1, 2, 3, 4]
let env = Envelope(expiry:epochTime().uint32 + ttl, ttl: ttl, topic: topic,
data: repeat(byte 9, 256), nonce: 0)
check env.valid() == false
test "should invalidate envelope due to expired":
let ttl = 1'u32
let topic = [byte 1, 2, 3, 4]
let env = Envelope(expiry:epochTime().uint32, ttl: ttl, topic: topic,
data: repeat(byte 9, 256), nonce: 0)
check env.valid() == false
test "should invalidate envelope due to in the future":
let ttl = 1'u32
let topic = [byte 1, 2, 3, 4]
# there is currently a 2 second tolerance, hence the + 3
let env = Envelope(expiry:epochTime().uint32 + ttl + 3, ttl: ttl,
topic: topic, data: repeat(byte 9, 256), nonce: 0)
check env.valid() == false
test "should not allow envelope due to bloom filter":
let topic = [byte 1, 2, 3, 4]
let wrongTopic = [byte 9, 8, 7, 6]
let config = WakuConfig(powRequirement: 0,
bloom: some(wrongTopic.topicBloom()),
isLightNode: false, maxMsgSize: defaultMaxMsgSize)
let env = Envelope(expiry:100000 , ttl: 30, topic: topic,
data: repeat(byte 9, 256), nonce: 0)
let msg = initMessage(env)
check msg.allowed(config) == false

View File

@ -0,0 +1,560 @@
#
# Waku
# (c) Copyright 2019
# Status Research & Development GmbH
#
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
{.used.}
import
std/[sequtils, tables, unittest],
chronos, eth/[keys, p2p], eth/p2p/peer_pool,
../../waku/v1/protocol/waku_protocol,
../test_helpers
const
safeTTL = 5'u32
waitInterval = messageInterval + 150.milliseconds
conditionTimeoutMs = 3000.milliseconds
proc resetMessageQueues(nodes: varargs[EthereumNode]) =
for node in nodes:
node.resetMessageQueue()
# check on a condition until true or return a future containing false
# if timeout expires first
proc eventually(timeout: Duration, condition: proc(): bool {.gcsafe.}):
Future[bool] =
let wrappedCondition = proc(): Future[bool] {.async.} =
let f = newFuture[bool]()
while not condition():
await sleepAsync(100.milliseconds)
f.complete(true)
return await f
return withTimeout(wrappedCondition(), timeout)
procSuite "Waku connections":
let rng = keys.newRng()
asyncTest "Waku connections":
var
n1 = setupTestNode(rng, Waku)
n2 = setupTestNode(rng, Waku)
n3 = setupTestNode(rng, Waku)
n4 = setupTestNode(rng, Waku)
var topics: seq[Topic]
n1.protocolState(Waku).config.topics = some(topics)
n2.protocolState(Waku).config.topics = some(topics)
n3.protocolState(Waku).config.topics = none(seq[Topic])
n4.protocolState(Waku).config.topics = none(seq[Topic])
n1.startListening()
n3.startListening()
let
p1 = await n2.rlpxConnect(newNode(n1.toENode()))
p2 = await n2.rlpxConnect(newNode(n3.toENode()))
p3 = await n4.rlpxConnect(newNode(n3.toENode()))
check:
p1.isNil
p2.isNil == false
p3.isNil == false
asyncTest "Filters with encryption and signing":
var node1 = setupTestNode(rng, Waku)
var node2 = setupTestNode(rng, Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let encryptKeyPair = KeyPair.random(rng[])
let signKeyPair = KeyPair.random(rng[])
var symKey: SymKey
let topic = [byte 0x12, 0, 0, 0]
var filters: seq[string] = @[]
var payloads = [repeat(byte 1, 10), repeat(byte 2, 10),
repeat(byte 3, 10), repeat(byte 4, 10)]
var futures = [newFuture[int](), newFuture[int](),
newFuture[int](), newFuture[int]()]
proc handler1(msg: ReceivedMessage) =
var count {.global.}: int
check msg.decoded.payload == payloads[0] or
msg.decoded.payload == payloads[1]
count += 1
if count == 2: futures[0].complete(1)
proc handler2(msg: ReceivedMessage) =
check msg.decoded.payload == payloads[1]
futures[1].complete(1)
proc handler3(msg: ReceivedMessage) =
var count {.global.}: int
check msg.decoded.payload == payloads[2] or
msg.decoded.payload == payloads[3]
count += 1
if count == 2: futures[2].complete(1)
proc handler4(msg: ReceivedMessage) =
check msg.decoded.payload == payloads[3]
futures[3].complete(1)
# Filters
# filter for encrypted asym
filters.add(node1.subscribeFilter(initFilter(
privateKey = some(encryptKeyPair.seckey), topics = @[topic]), handler1))
# filter for encrypted asym + signed
filters.add(node1.subscribeFilter(initFilter(some(signKeyPair.pubkey),
privateKey = some(encryptKeyPair.seckey), topics = @[topic]), handler2))
# filter for encrypted sym
filters.add(node1.subscribeFilter(initFilter(symKey = some(symKey),
topics = @[topic]), handler3))
# filter for encrypted sym + signed
filters.add(node1.subscribeFilter(initFilter(some(signKeyPair.pubkey),
symKey = some(symKey), topics = @[topic]), handler4))
# Messages
check:
# encrypted asym
node2.postMessage(some(encryptKeyPair.pubkey), ttl = safeTTL,
topic = topic, payload = payloads[0]) == true
# encrypted asym + signed
node2.postMessage(some(encryptKeyPair.pubkey),
src = some(signKeyPair.seckey), ttl = safeTTL,
topic = topic, payload = payloads[1]) == true
# encrypted sym
node2.postMessage(symKey = some(symKey), ttl = safeTTL, topic = topic,
payload = payloads[2]) == true
# encrypted sym + signed
node2.postMessage(symKey = some(symKey),
src = some(signKeyPair.seckey),
ttl = safeTTL, topic = topic,
payload = payloads[3]) == true
node2.protocolState(Waku).queue.items.len == 4
check:
await allFutures(futures).withTimeout(waitInterval)
node1.protocolState(Waku).queue.items.len == 4
for filter in filters:
check node1.unsubscribeFilter(filter) == true
asyncTest "Filters with topics":
var node1 = setupTestNode(rng, Waku)
var node2 = setupTestNode(rng, Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let topic1 = [byte 0x12, 0, 0, 0]
let topic2 = [byte 0x34, 0, 0, 0]
var payloads = [repeat(byte 0, 10), repeat(byte 1, 10)]
var futures = [newFuture[int](), newFuture[int]()]
proc handler1(msg: ReceivedMessage) =
check msg.decoded.payload == payloads[0]
futures[0].complete(1)
proc handler2(msg: ReceivedMessage) =
check msg.decoded.payload == payloads[1]
futures[1].complete(1)
var filter1 = node1.subscribeFilter(initFilter(topics = @[topic1]), handler1)
var filter2 = node1.subscribeFilter(initFilter(topics = @[topic2]), handler2)
check:
node2.postMessage(ttl = safeTTL + 1, topic = topic1,
payload = payloads[0]) == true
node2.postMessage(ttl = safeTTL, topic = topic2,
payload = payloads[1]) == true
node2.protocolState(Waku).queue.items.len == 2
await allFutures(futures).withTimeout(waitInterval)
node1.protocolState(Waku).queue.items.len == 2
node1.unsubscribeFilter(filter1) == true
node1.unsubscribeFilter(filter2) == true
asyncTest "Filters with PoW":
var node1 = setupTestNode(rng, Waku)
var node2 = setupTestNode(rng, Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let topic = [byte 0x12, 0, 0, 0]
var payload = repeat(byte 0, 10)
var futures = [newFuture[int](), newFuture[int]()]
proc handler1(msg: ReceivedMessage) =
check msg.decoded.payload == payload
futures[0].complete(1)
proc handler2(msg: ReceivedMessage) =
check msg.decoded.payload == payload
futures[1].complete(1)
var filter1 = node1.subscribeFilter(
initFilter(topics = @[topic], powReq = 0), handler1)
var filter2 = node1.subscribeFilter(
initFilter(topics = @[topic], powReq = 1_000_000), handler2)
check:
node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true
(await futures[0].withTimeout(waitInterval)) == true
(await futures[1].withTimeout(waitInterval)) == false
node1.protocolState(Waku).queue.items.len == 1
node1.unsubscribeFilter(filter1) == true
node1.unsubscribeFilter(filter2) == true
asyncTest "Filters with queues":
var node1 = setupTestNode(rng, Waku)
var node2 = setupTestNode(rng, Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let topic = [byte 0, 0, 0, 0]
let payload = repeat(byte 0, 10)
var filter = node1.subscribeFilter(initFilter(topics = @[topic]))
for i in countdown(10, 1):
check node2.postMessage(ttl = safeTTL, topic = topic,
payload = payload) == true
await sleepAsync(waitInterval)
check:
node1.getFilterMessages(filter).len() == 10
node1.getFilterMessages(filter).len() == 0
node1.unsubscribeFilter(filter) == true
asyncTest "Local filter notify":
var node1 = setupTestNode(rng, Waku)
var node2 = setupTestNode(rng, Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let topic = [byte 0, 0, 0, 0]
var filter = node1.subscribeFilter(initFilter(topics = @[topic]))
check:
node1.postMessage(ttl = safeTTL, topic = topic,
payload = repeat(byte 4, 10)) == true
node1.getFilterMessages(filter).len() == 1
node1.unsubscribeFilter(filter) == true
asyncTest "Bloomfilter blocking":
var node1 = setupTestNode(rng, Waku)
var node2 = setupTestNode(rng, Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let sendTopic1 = [byte 0x12, 0, 0, 0]
let sendTopic2 = [byte 0x34, 0, 0, 0]
let filterTopics = @[[byte 0x34, 0, 0, 0],[byte 0x56, 0, 0, 0]]
let payload = repeat(byte 0, 10)
var f: Future[int] = newFuture[int]()
proc handler(msg: ReceivedMessage) =
check msg.decoded.payload == payload
f.complete(1)
var filter = node1.subscribeFilter(
initFilter(topics = filterTopics), handler)
await node1.setBloomFilter(node1.filtersToBloom())
check:
node2.postMessage(ttl = safeTTL, topic = sendTopic1,
payload = payload) == true
node2.protocolState(Waku).queue.items.len == 1
(await f.withTimeout(waitInterval)) == false
node1.protocolState(Waku).queue.items.len == 0
resetMessageQueues(node1, node2)
f = newFuture[int]()
check:
node2.postMessage(ttl = safeTTL, topic = sendTopic2,
payload = payload) == true
node2.protocolState(Waku).queue.items.len == 1
await f.withTimeout(waitInterval)
f.read() == 1
node1.protocolState(Waku).queue.items.len == 1
node1.unsubscribeFilter(filter) == true
await node1.setBloomFilter(fullBloom())
asyncTest "PoW blocking":
var node1 = setupTestNode(rng, Waku)
var node2 = setupTestNode(rng, Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let topic = [byte 0, 0, 0, 0]
let payload = repeat(byte 0, 10)
await node1.setPowRequirement(1_000_000)
check:
node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true
node2.protocolState(Waku).queue.items.len == 1
await sleepAsync(waitInterval)
check:
node1.protocolState(Waku).queue.items.len == 0
resetMessageQueues(node1, node2)
await node1.setPowRequirement(0.0)
check:
node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true
node2.protocolState(Waku).queue.items.len == 1
await sleepAsync(waitInterval)
check:
node1.protocolState(Waku).queue.items.len == 1
asyncTest "Queue pruning":
var node1 = setupTestNode(rng, Waku)
var node2 = setupTestNode(rng, Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let topic = [byte 0, 0, 0, 0]
let payload = repeat(byte 0, 10)
# We need a minimum TTL of 2 as when set to 1 there is a small chance that
# it is already expired after messageInterval due to rounding down of float
# to uint32 in postMessage()
let lowerTTL = 2'u32 # Lower TTL as we need to wait for messages to expire
for i in countdown(10, 1):
check node2.postMessage(ttl = lowerTTL, topic = topic, payload = payload)
check node2.protocolState(Waku).queue.items.len == 10
await sleepAsync(waitInterval)
check node1.protocolState(Waku).queue.items.len == 10
await sleepAsync(milliseconds((lowerTTL+1)*1000))
check node1.protocolState(Waku).queue.items.len == 0
check node2.protocolState(Waku).queue.items.len == 0
asyncTest "P2P post":
var node1 = setupTestNode(rng, Waku)
var node2 = setupTestNode(rng, Waku)
node2.startListening()
waitFor node1.peerPool.connectToNode(newNode(node2.toENode()))
let topic = [byte 0, 0, 0, 0]
var f: Future[int] = newFuture[int]()
proc handler(msg: ReceivedMessage) =
check msg.decoded.payload == repeat(byte 4, 10)
f.complete(1)
var filter = node1.subscribeFilter(initFilter(topics = @[topic],
allowP2P = true), handler)
# Need to be sure that node1 is added in the peerpool of node2 as
# postMessage with target will iterate over the peers
require await eventually(conditionTimeoutMs,
proc(): bool = node2.peerPool.len == 1)
check:
node1.setPeerTrusted(toNodeId(node2.keys.pubkey)) == true
node2.postMessage(ttl = 10, topic = topic,
payload = repeat(byte 4, 10),
targetPeer = some(toNodeId(node1.keys.pubkey))) == true
await f.withTimeout(waitInterval)
f.read() == 1
node1.protocolState(Waku).queue.items.len == 0
node2.protocolState(Waku).queue.items.len == 0
node1.unsubscribeFilter(filter) == true
asyncTest "Light node posting":
var ln = setupTestNode(rng, Waku)
await ln.setLightNode(true)
var fn = setupTestNode(rng, Waku)
fn.startListening()
await ln.peerPool.connectToNode(newNode(fn.toENode()))
let topic = [byte 0, 0, 0, 0]
check:
ln.peerPool.connectedNodes.len() == 1
# normal post
ln.postMessage(ttl = safeTTL, topic = topic,
payload = repeat(byte 0, 10)) == true
ln.protocolState(Waku).queue.items.len == 1
# TODO: add test on message relaying
asyncTest "Connect two light nodes":
var ln1 = setupTestNode(rng, Waku)
var ln2 = setupTestNode(rng, Waku)
await ln1.setLightNode(true)
await ln2.setLightNode(true)
ln2.startListening()
let peer = await ln1.rlpxConnect(newNode(ln2.toENode()))
check peer.isNil == true
asyncTest "Waku set-topic-interest":
var
wakuTopicNode = setupTestNode(rng, Waku)
wakuNode = setupTestNode(rng, Waku)
let
topic1 = [byte 0xDA, 0xDA, 0xDA, 0xAA]
topic2 = [byte 0xD0, 0xD0, 0xD0, 0x00]
wrongTopic = [byte 0x4B, 0x1D, 0x4B, 0x1D]
# Set one topic so we are not considered a full node
wakuTopicNode.protocolState(Waku).config.topics = some(@[topic1])
wakuNode.startListening()
await wakuTopicNode.peerPool.connectToNode(newNode(wakuNode.toENode()))
# Update topic interest
check:
await setTopicInterest(wakuTopicNode, @[topic1, topic2])
let payload = repeat(byte 0, 10)
check:
wakuNode.postMessage(ttl = safeTTL, topic = topic1, payload = payload)
wakuNode.postMessage(ttl = safeTTL, topic = topic2, payload = payload)
wakuNode.postMessage(ttl = safeTTL, topic = wrongTopic, payload = payload)
wakuNode.protocolState(Waku).queue.items.len == 3
await sleepAsync(waitInterval)
check:
wakuTopicNode.protocolState(Waku).queue.items.len == 2
asyncTest "Waku set-minimum-pow":
var
wakuPowNode = setupTestNode(rng, Waku)
wakuNode = setupTestNode(rng, Waku)
wakuNode.startListening()
await wakuPowNode.peerPool.connectToNode(newNode(wakuNode.toENode()))
# Update minimum pow
await setPowRequirement(wakuPowNode, 1.0)
await sleepAsync(waitInterval)
check:
wakuNode.peerPool.len == 1
# check powRequirement is updated
for peer in wakuNode.peerPool.peers:
check:
peer.state(Waku).powRequirement == 1.0
asyncTest "Waku set-light-node":
var
wakuLightNode = setupTestNode(rng, Waku)
wakuNode = setupTestNode(rng, Waku)
wakuNode.startListening()
await wakuLightNode.peerPool.connectToNode(newNode(wakuNode.toENode()))
# Update minimum pow
await setLightNode(wakuLightNode, true)
await sleepAsync(waitInterval)
check:
wakuNode.peerPool.len == 1
# check lightNode is updated
for peer in wakuNode.peerPool.peers:
check:
peer.state(Waku).isLightNode
asyncTest "Waku set-bloom-filter":
var
wakuBloomNode = setupTestNode(rng, Waku)
wakuNode = setupTestNode(rng, Waku)
bloom = fullBloom()
topics = @[[byte 0xDA, 0xDA, 0xDA, 0xAA]]
# Set topic interest
discard await wakuBloomNode.setTopicInterest(topics)
wakuBloomNode.startListening()
await wakuNode.peerPool.connectToNode(newNode(wakuBloomNode.toENode()))
# Sanity check
check:
wakuNode.peerPool.len == 1
# check bloom filter is updated
for peer in wakuNode.peerPool.peers:
check:
peer.state(Waku).bloom == bloom
peer.state(Waku).topics == some(topics)
let hasBloomNodeConnectedCondition = proc(): bool =
wakuBloomNode.peerPool.len == 1
# wait for the peer to be connected on the other side
let hasBloomNodeConnected =
await eventually(conditionTimeoutMs, hasBloomNodeConnectedCondition)
# check bloom filter is updated
check:
hasBloomNodeConnected
# disable one bit in the bloom filter
bloom[0] = 0x0
# and set it
await setBloomFilter(wakuBloomNode, bloom)
let bloomFilterUpdatedCondition = proc(): bool =
for peer in wakuNode.peerPool.peers:
return peer.state(Waku).bloom == bloom and
peer.state(Waku).topics == none(seq[Topic])
let bloomFilterUpdated =
await eventually(conditionTimeoutMs, bloomFilterUpdatedCondition)
# check bloom filter is updated
check:
bloomFilterUpdated
asyncTest "Waku topic-interest":
var
wakuTopicNode = setupTestNode(rng, Waku)
wakuNode = setupTestNode(rng, Waku)
let
topic1 = [byte 0xDA, 0xDA, 0xDA, 0xAA]
topic2 = [byte 0xD0, 0xD0, 0xD0, 0x00]
wrongTopic = [byte 0x4B, 0x1D, 0x4B, 0x1D]
wakuTopicNode.protocolState(Waku).config.topics = some(@[topic1, topic2])
wakuNode.startListening()
await wakuTopicNode.peerPool.connectToNode(newNode(wakuNode.toENode()))
let payload = repeat(byte 0, 10)
check:
wakuNode.postMessage(ttl = safeTTL, topic = topic1, payload = payload)
wakuNode.postMessage(ttl = safeTTL, topic = topic2, payload = payload)
wakuNode.postMessage(ttl = safeTTL, topic = wrongTopic, payload = payload)
wakuNode.protocolState(Waku).queue.items.len == 3
await eventually(conditionTimeoutMs,
proc (): bool = wakuTopicNode.protocolState(Waku).queue.items.len == 2)
asyncTest "Waku topic-interest versus bloom filter":
var
wakuTopicNode = setupTestNode(rng, Waku)
wakuNode = setupTestNode(rng, Waku)
let
topic1 = [byte 0xDA, 0xDA, 0xDA, 0xAA]
topic2 = [byte 0xD0, 0xD0, 0xD0, 0x00]
bloomTopic = [byte 0x4B, 0x1D, 0x4B, 0x1D]
# It was checked that the topics don't trigger false positives on the bloom.
wakuTopicNode.protocolState(Waku).config.topics = some(@[topic1, topic2])
wakuTopicNode.protocolState(Waku).config.bloom = some(toBloom([bloomTopic]))
wakuNode.startListening()
await wakuTopicNode.peerPool.connectToNode(newNode(wakuNode.toENode()))
let payload = repeat(byte 0, 10)
check:
wakuNode.postMessage(ttl = safeTTL, topic = topic1, payload = payload)
wakuNode.postMessage(ttl = safeTTL, topic = topic2, payload = payload)
wakuNode.postMessage(ttl = safeTTL, topic = bloomTopic, payload = payload)
wakuNode.protocolState(Waku).queue.items.len == 3
await sleepAsync(waitInterval)
check:
wakuTopicNode.protocolState(Waku).queue.items.len == 2

119
tests/v1/test_waku_mail.nim Normal file
View File

@ -0,0 +1,119 @@
{.used.}
import
std/[unittest, tables, sequtils, times],
chronos, eth/[p2p, async_utils], eth/p2p/peer_pool,
../../waku/v1/protocol/[waku_protocol, waku_mail],
../test_helpers
const
transmissionTimeout = chronos.milliseconds(100)
proc waitForConnected(node: EthereumNode) {.async.} =
while node.peerPool.connectedNodes.len == 0:
await sleepAsync(chronos.milliseconds(1))
procSuite "Waku Mail Client":
let rng = newRng()
var client = setupTestNode(rng, Waku)
var simpleServer = setupTestNode(rng, Waku)
simpleServer.startListening()
let simpleServerNode = newNode(simpleServer.toENode())
let clientNode = newNode(client.toENode())
waitFor client.peerPool.connectToNode(simpleServerNode)
require:
waitFor simpleServer.waitForConnected().withTimeout(transmissionTimeout)
asyncTest "Two peers connected":
check:
client.peerPool.connectedNodes.len() == 1
simpleServer.peerPool.connectedNodes.len() == 1
asyncTest "Mail Request and Request Complete":
let
topic = [byte 0, 0, 0, 0]
bloom = toBloom(@[topic])
lower = 0'u32
upper = epochTime().uint32
limit = 100'u32
request = MailRequest(lower: lower, upper: upper, bloom: @bloom,
limit: limit)
var symKey: SymKey
check client.setPeerTrusted(simpleServerNode.id)
var cursorFut = client.requestMail(simpleServerNode.id, request, symKey, 1)
# Simple mailserver part
let peer = simpleServer.peerPool.connectedNodes[clientNode]
var f = peer.nextMsg(Waku.p2pRequest)
require await f.withTimeout(transmissionTimeout)
let response = f.read()
let decoded = decode(response.envelope.data, symKey = some(symKey))
require decoded.isSome()
var rlp = rlpFromBytes(decoded.get().payload)
let output = rlp.read(MailRequest)
check:
output.lower == lower
output.upper == upper
output.bloom == bloom
output.limit == limit
var dummy: Hash
await peer.p2pRequestComplete(dummy, dummy, @[])
check await cursorFut.withTimeout(transmissionTimeout)
asyncTest "Mail Send":
let topic = [byte 0x12, 0x34, 0x56, 0x78]
let payload = repeat(byte 0, 10)
var f = newFuture[int]()
proc handler(msg: ReceivedMessage) =
check msg.decoded.payload == payload
f.complete(1)
let filter = subscribeFilter(client,
initFilter(topics = @[topic], allowP2P = true), handler)
check:
client.setPeerTrusted(simpleServerNode.id)
# ttl 0 to show that ttl should be ignored
# TODO: perhaps not the best way to test this, means no PoW calculation
# may be done, and not sure if that is OK?
simpleServer.postMessage(ttl = 0, topic = topic, payload = payload,
targetPeer = some(clientNode.id))
await f.withTimeout(transmissionTimeout)
client.unsubscribeFilter(filter)
asyncTest "Multiple Client Request and Complete":
var count = 5
proc customHandler(peer: Peer, envelope: Envelope)=
var envelopes: seq[Envelope]
traceAsyncErrors peer.p2pMessage(envelopes)
var cursor: seq[byte]
count = count - 1
if count == 0:
cursor = @[]
else:
cursor = @[byte count]
var dummy: Hash
traceAsyncErrors peer.p2pRequestComplete(dummy, dummy, cursor)
simpleServer.registerP2PRequestHandler(customHandler)
check client.setPeerTrusted(simpleServerNode.id)
var request: MailRequest
var symKey: SymKey
let cursor =
await client.requestMail(simpleServerNode.id, request, symKey, 5)
require cursor.isSome()
check:
cursor.get().len == 0
count == 0
# TODO: Also check for received envelopes.

View File

@ -0,0 +1,413 @@
import
std/[unittest, options, sets, tables, os, strutils, sequtils],
stew/shims/net as stewNet,
json_rpc/[rpcserver, rpcclient],
libp2p/[standard_setup, switch, multiaddress],
libp2p/protobuf/minprotobuf,
libp2p/stream/[bufferstream, connection],
libp2p/crypto/crypto,
libp2p/protocols/pubsub/pubsub,
libp2p/protocols/pubsub/rpc/message,
../../waku/v2/waku_types,
../../waku/v2/node/wakunode2,
../../waku/v2/node/jsonrpc/[jsonrpc_types,store_api,relay_api,debug_api,filter_api,admin_api],
../../waku/v2/protocol/message_notifier,
../../waku/v2/protocol/waku_filter,
../../waku/v2/protocol/waku_store/waku_store,
../../waku/v2/protocol/waku_swap/waku_swap,
../test_helpers
template sourceDir*: string = currentSourcePath.rsplit(DirSep, 1)[0]
const sigPath = sourceDir / ParDir / ParDir / "waku" / "v2" / "node" / "jsonrpc" / "jsonrpc_callsigs.nim"
createRpcSigs(RpcHttpClient, sigPath)
procSuite "Waku v2 JSON-RPC API":
const defaultTopic = "/waku/2/default-waku/proto"
const testCodec = "/waku/2/default-waku/codec"
let
rng = crypto.newRng()
privkey = crypto.PrivateKey.random(Secp256k1, rng[]).tryGet()
bindIp = ValidIpAddress.init("0.0.0.0")
extIp = ValidIpAddress.init("127.0.0.1")
port = Port(9000)
node = WakuNode.init(privkey, bindIp, port, some(extIp), some(port))
asyncTest "Debug API: get node info":
waitFor node.start()
waitFor node.mountRelay()
# RPC server setup
let
rpcPort = Port(8545)
ta = initTAddress(bindIp, rpcPort)
server = newRpcHttpServer([ta])
installDebugApiHandlers(node, server)
server.start()
let client = newRpcHttpClient()
await client.connect("127.0.0.1", rpcPort)
let response = await client.get_waku_v2_debug_v1_info()
check:
response.listenStr == $node.peerInfo.addrs[0] & "/p2p/" & $node.peerInfo.peerId
server.stop()
server.close()
waitfor node.stop()
asyncTest "Relay API: publish and subscribe/unsubscribe":
waitFor node.start()
waitFor node.mountRelay()
# RPC server setup
let
rpcPort = Port(8545)
ta = initTAddress(bindIp, rpcPort)
server = newRpcHttpServer([ta])
installRelayApiHandlers(node, server)
server.start()
let client = newRpcHttpClient()
await client.connect("127.0.0.1", rpcPort)
check:
# At this stage the node is only subscribed to the default topic
PubSub(node.wakuRelay).topics.len == 1
# Subscribe to new topics
let newTopics = @["1","2","3"]
var response = await client.post_waku_v2_relay_v1_subscriptions(newTopics)
check:
# Node is now subscribed to default + new topics
PubSub(node.wakuRelay).topics.len == 1 + newTopics.len
response == true
# Publish a message on the default topic
response = await client.post_waku_v2_relay_v1_message(defaultTopic, WakuRelayMessage(payload: @[byte 1], contentTopic: some(ContentTopic(1))))
check:
# @TODO poll topic to verify message has been published
response == true
# Unsubscribe from new topics
response = await client.delete_waku_v2_relay_v1_subscriptions(newTopics)
check:
# Node is now unsubscribed from new topics
PubSub(node.wakuRelay).topics.len == 1
response == true
server.stop()
server.close()
waitfor node.stop()
asyncTest "Relay API: get latest messages":
let
nodeKey1 = crypto.PrivateKey.random(Secp256k1, rng[])[]
node1 = WakuNode.init(nodeKey1, bindIp, Port(60000))
nodeKey2 = crypto.PrivateKey.random(Secp256k1, rng[])[]
node2 = WakuNode.init(nodeKey2, bindIp, Port(60002))
nodeKey3 = crypto.PrivateKey.random(Secp256k1, rng[])[]
node3 = WakuNode.init(nodeKey3, bindIp, Port(60003), some(extIp), some(port))
pubSubTopic = "polling"
contentTopic = ContentTopic(1)
payload = @[byte 9]
message = WakuMessage(payload: payload, contentTopic: contentTopic)
await node1.start()
await node1.mountRelay(@[pubSubTopic])
await node2.start()
await node2.mountRelay(@[pubSubTopic])
await node3.start()
await node3.mountRelay(@[pubSubTopic])
await node1.connectToNodes(@[node2.peerInfo])
await node3.connectToNodes(@[node2.peerInfo])
# RPC server setup
let
rpcPort = Port(8545)
ta = initTAddress(bindIp, rpcPort)
server = newRpcHttpServer([ta])
# Let's connect to node 3 via the API
installRelayApiHandlers(node3, server)
server.start()
let client = newRpcHttpClient()
await client.connect("127.0.0.1", rpcPort)
# Now try to subscribe using API
var response = await client.post_waku_v2_relay_v1_subscriptions(@[pubSubTopic])
await sleepAsync(2000.millis)
check:
# Node is now subscribed to pubSubTopic
response == true
# Now publish a message on node1 and see if we receive it on node3
await node1.publish(pubSubTopic, message)
await sleepAsync(2000.millis)
var messages = await client.get_waku_v2_relay_v1_messages(pubSubTopic)
check:
messages.len == 1
messages[0].contentTopic == contentTopic
messages[0].payload == payload
# Ensure that read messages are cleared from cache
messages = await client.get_waku_v2_relay_v1_messages(pubSubTopic)
check:
messages.len == 0
server.stop()
server.close()
await node1.stop()
await node2.stop()
await node3.stop()
asyncTest "Store API: retrieve historical messages":
waitFor node.start()
waitFor node.mountRelay(@[defaultTopic])
# RPC server setup
let
rpcPort = Port(8545)
ta = initTAddress(bindIp, rpcPort)
server = newRpcHttpServer([ta])
installStoreApiHandlers(node, server)
server.start()
# WakuStore setup
let
key = wakunode2.PrivateKey.random(ECDSA, rng[]).get()
peer = PeerInfo.init(key)
node.mountStore()
let
subscription = node.wakuStore.subscription()
var listenSwitch = newStandardSwitch(some(key))
discard waitFor listenSwitch.start()
node.wakuStore.setPeer(listenSwitch.peerInfo)
listenSwitch.mount(node.wakuStore)
var subscriptions = newTable[string, MessageNotificationSubscription]()
subscriptions[testCodec] = subscription
# Now prime it with some history before tests
var
msgList = @[WakuMessage(payload: @[byte 0], contentTopic: ContentTopic(2)),
WakuMessage(payload: @[byte 1], contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 2], contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 3], contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 4], contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 5], contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 6], contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 7], contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 8], contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 9], contentTopic: ContentTopic(2))]
for wakuMsg in msgList:
waitFor subscriptions.notify(defaultTopic, wakuMsg)
let client = newRpcHttpClient()
await client.connect("127.0.0.1", rpcPort)
let response = await client.get_waku_v2_store_v1_messages(@[ContentTopic(1)], some(StorePagingOptions()))
check:
response.messages.len() == 8
response.pagingOptions.isNone
server.stop()
server.close()
waitfor node.stop()
asyncTest "Filter API: subscribe/unsubscribe":
waitFor node.start()
waitFor node.mountRelay()
node.mountFilter()
# RPC server setup
let
rpcPort = Port(8545)
ta = initTAddress(bindIp, rpcPort)
server = newRpcHttpServer([ta])
installFilterApiHandlers(node, server)
server.start()
let client = newRpcHttpClient()
await client.connect("127.0.0.1", rpcPort)
check:
# Light node has not yet subscribed to any filters
node.filters.len() == 0
let contentFilters = @[ContentFilter(topics: @[ContentTopic(1), ContentTopic(2)]),
ContentFilter(topics: @[ContentTopic(3), ContentTopic(4)])]
var response = await client.post_waku_v2_filter_v1_subscription(contentFilters = contentFilters, topic = some(defaultTopic))
check:
# Light node has successfully subscribed to a single filter
node.filters.len() == 1
response == true
response = await client.delete_waku_v2_filter_v1_subscription(contentFilters = contentFilters, topic = some(defaultTopic))
check:
# Light node has successfully unsubscribed from all filters
node.filters.len() == 0
response == true
server.stop()
server.close()
waitfor node.stop()
asyncTest "Filter API: get latest messages":
const cTopic = ContentTopic(1)
waitFor node.start()
# RPC server setup
let
rpcPort = Port(8545)
ta = initTAddress(bindIp, rpcPort)
server = newRpcHttpServer([ta])
installFilterApiHandlers(node, server)
server.start()
node.mountFilter()
let client = newRpcHttpClient()
await client.connect("127.0.0.1", rpcPort)
# First ensure subscription exists
let sub = await client.post_waku_v2_filter_v1_subscription(contentFilters = @[ContentFilter(topics: @[cTopic])], topic = some(defaultTopic))
check:
sub
# Now prime the node with some messages before tests
var
msgList = @[WakuMessage(payload: @[byte 0], contentTopic: ContentTopic(2)),
WakuMessage(payload: @[byte 1], contentTopic: cTopic),
WakuMessage(payload: @[byte 2], contentTopic: cTopic),
WakuMessage(payload: @[byte 3], contentTopic: cTopic),
WakuMessage(payload: @[byte 4], contentTopic: cTopic),
WakuMessage(payload: @[byte 5], contentTopic: cTopic),
WakuMessage(payload: @[byte 6], contentTopic: cTopic),
WakuMessage(payload: @[byte 7], contentTopic: cTopic),
WakuMessage(payload: @[byte 8], contentTopic: cTopic),
WakuMessage(payload: @[byte 9], contentTopic: ContentTopic(2))]
let
filters = node.filters
requestId = toSeq(Table(filters).keys)[0]
for wakuMsg in msgList:
filters.notify(wakuMsg, requestId)
var response = await client.get_waku_v2_filter_v1_messages(cTopic)
check:
response.len() == 8
response.allIt(it.contentTopic == cTopic)
# No new messages
response = await client.get_waku_v2_filter_v1_messages(cTopic)
check:
response.len() == 0
# Now ensure that no more than the preset max messages can be cached
let maxSize = filter_api.maxCache
for x in 1..(maxSize + 1):
# Try to cache 1 more than maximum allowed
filters.notify(WakuMessage(payload: @[byte x], contentTopic: cTopic), requestId)
response = await client.get_waku_v2_filter_v1_messages(cTopic)
check:
# Max messages has not been exceeded
response.len == maxSize
response.allIt(it.contentTopic == cTopic)
# Check that oldest item has been removed
response[0].payload == @[byte 2]
response[maxSize - 1].payload == @[byte (maxSize + 1)]
server.stop()
server.close()
waitfor node.stop()
asyncTest "Admin API: get peer information":
const cTopic = ContentTopic(1)
waitFor node.start()
# RPC server setup
let
rpcPort = Port(8545)
ta = initTAddress(bindIp, rpcPort)
server = newRpcHttpServer([ta])
installAdminApiHandlers(node, server)
server.start()
let client = newRpcHttpClient()
await client.connect("127.0.0.1", rpcPort)
node.mountFilter()
node.mountSwap()
node.mountStore()
# Create and set some peers
let
locationAddr = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet()
filterKey = wakunode2.PrivateKey.random(ECDSA, rng[]).get()
filterPeer = PeerInfo.init(filterKey, @[locationAddr])
swapKey = wakunode2.PrivateKey.random(ECDSA, rng[]).get()
swapPeer = PeerInfo.init(swapKey, @[locationAddr])
storeKey = wakunode2.PrivateKey.random(ECDSA, rng[]).get()
storePeer = PeerInfo.init(storeKey, @[locationAddr])
node.wakuFilter.setPeer(filterPeer)
node.wakuSwap.setPeer(swapPeer)
node.wakuStore.setPeer(storePeer)
let response = await client.get_waku_v2_admin_v1_peers()
check:
response.len == 3
# Check filter peer
(response.filterIt(it.protocol == WakuFilterCodec)[0]).multiaddr == constructMultiaddrStr(filterPeer)
# Check swap peer
(response.filterIt(it.protocol == WakuSwapCodec)[0]).multiaddr == constructMultiaddrStr(swapPeer)
# Check store peer
(response.filterIt(it.protocol == WakuStoreCodec)[0]).multiaddr == constructMultiaddrStr(storePeer)
server.close()
waitfor node.stop()

View File

@ -0,0 +1,38 @@
import
std/[unittest, options, tables, sets],
chronos, chronicles,
../../waku/v2/node/message_store,
../../waku/v2/protocol/waku_store/waku_store,
./utils,
../../waku/v2/waku_types,
../../waku/v2/node/sqlite
suite "Message Store":
test "set and get works":
let
database = SqliteDatabase.init("", inMemory = true)[]
store = MessageStore.init(database)[]
topic = ContentTopic(1)
var msgs = @[
WakuMessage(payload: @[byte 1, 2, 3], contentTopic: topic),
WakuMessage(payload: @[byte 1, 2, 3, 4], contentTopic: topic),
WakuMessage(payload: @[byte 1, 2, 3, 4, 5], contentTopic: topic),
]
defer: store.close()
for msg in msgs:
discard store.put(computeIndex(msg), msg)
var responseCount = 0
proc data(timestamp: uint64, msg: WakuMessage) =
responseCount += 1
check msg in msgs
let res = store.getAll(data)
check:
res.isErr == false
responseCount == 3

View File

@ -0,0 +1,51 @@
{.used.}
import
std/[unittest, options, os, strutils],
stew/shims/net as stewNet,
json_rpc/[rpcserver, rpcclient],
libp2p/crypto/crypto,
../../waku/v2/node/wakunode2,
../../waku/v2/node/rpc/wakurpc,
../../waku/v2/protocol/waku_relay,
../../waku/v2/waku_types,
../test_helpers
template sourceDir*: string = currentSourcePath.rsplit(DirSep, 1)[0]
const sigPath = sourceDir / ParDir / ParDir / "waku" / "v2" / "node" / "rpc" / "wakucallsigs.nim"
createRpcSigs(RpcHttpClient, sigPath)
suite "Waku v2 Remote Procedure Calls":
# WakuNode setup
let
rng = crypto.newRng()
privkey = crypto.PrivateKey.random(Secp256k1, rng[]).tryGet()
bindIp = ValidIpAddress.init("0.0.0.0")
extIp = ValidIpAddress.init("127.0.0.1")
port = Port(9000)
node = WakuNode.init(privkey, bindIp, port, some(extIp), some(port))
waitFor node.start()
waitFor node.mountRelay(@["waku"])
# RPC server setup
let
rpcPort = Port(8545)
ta = initTAddress(bindIp, rpcPort)
server = newRpcHttpServer([ta])
setupWakuRPC(node, server)
server.start()
asyncTest "waku_info":
# RPC client setup
let client = newRpcHttpClient()
await client.connect("127.0.0.1", rpcPort)
check await(client.waku_version()) == WakuRelayCodec
server.stop()
server.close()
waitfor node.stop()

108
tests/v2/test_waku.nim Normal file
View File

@ -0,0 +1,108 @@
#
# Waku
# (c) Copyright 2019
# Status Research & Development GmbH
#
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
{.used.}
import unittest, options, tables, sets, sequtils
import chronos, chronicles
import utils,
libp2p/errors,
libp2p/switch,
libp2p/protobuf/minprotobuf,
libp2p/stream/[bufferstream, connection],
libp2p/crypto/crypto,
libp2p/protocols/pubsub/floodsub
import ../../waku/v2/protocol/waku_relay
import ../test_helpers
const
StreamTransportTrackerName = "stream.transport"
StreamServerTrackerName = "stream.server"
# TODO: Start with floodsub here, then move other logic here
# XXX: If I cast to WakuRelay here I get a SIGSEGV
proc waitSub(sender, receiver: auto; key: string) {.async, gcsafe.} =
# turn things deterministic
# this is for testing purposes only
var ceil = 15
let fsub = cast[WakuRelay](sender.pubSub.get())
while not fsub.floodsub.hasKey(key) or
not fsub.floodsub[key].anyIt(it.peerInfo.id == receiver.peerInfo.id):
await sleepAsync(100.millis)
dec ceil
doAssert(ceil > 0, "waitSub timeout!")
proc message(): seq[byte] =
var pb = initProtoBuffer()
pb.write(1, "hello")
pb.finish()
pb.buffer
proc decodeMessage(data: seq[byte]): string =
var pb = initProtoBuffer(data)
result = ""
let res = pb.getField(1, result)
procSuite "FloodSub":
teardown:
let
trackers = [
# getTracker(ConnectionTrackerName),
getTracker(BufferStreamTrackerName),
getTracker(AsyncStreamWriterTrackerName),
getTracker(AsyncStreamReaderTrackerName),
getTracker(StreamTransportTrackerName),
getTracker(StreamServerTrackerName)
]
for tracker in trackers:
if not isNil(tracker):
check tracker.isLeaked() == false
asyncTest "FloodSub basic publish/subscribe A -> B":
var completionFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
debug "Hit handler", topic
let msg = decodeMessage(data)
check topic == "foobar"
check msg == "hello"
completionFut.complete(true)
let
nodes = generateNodes(2)
nodesFut = await allFinished(
nodes[0].start(),
nodes[1].start()
)
for node in nodes:
await node.mountRelay()
await subscribeNodes(nodes)
await nodes[1].subscribe("foobar", handler)
await waitSub(nodes[0], nodes[1], "foobar")
# TODO: you might want to check the value here
let msg = message()
discard await nodes[0].publish("foobar", msg)
check: await completionFut.wait(5.seconds)
await allFuturesThrowing(
nodes[0].stop(),
nodes[1].stop()
)
for fut in nodesFut:
let res = fut.read()
await allFuturesThrowing(res)

View File

@ -0,0 +1,130 @@
{.used.}
import
std/[unittest, options, tables, sets],
chronos, chronicles,
libp2p/switch,
libp2p/protobuf/minprotobuf,
libp2p/stream/[bufferstream, connection],
libp2p/crypto/crypto,
libp2p/multistream,
../../waku/v2/protocol/[waku_filter, message_notifier],
../../waku/v2/waku_types,
../test_helpers, ./utils
procSuite "Waku Filter":
asyncTest "handle filter":
const defaultTopic = "/waku/2/default-waku/proto"
let
key = PrivateKey.random(ECDSA, rng[]).get()
peer = PeerInfo.init(key)
contentTopic = ContentTopic(1)
post = WakuMessage(payload: @[byte 1, 2, 3], contentTopic: contentTopic)
var dialSwitch = newStandardSwitch()
discard await dialSwitch.start()
var listenSwitch = newStandardSwitch(some(key))
discard await listenSwitch.start()
var responseRequestIdFuture = newFuture[string]()
proc handle(requestId: string, msg: MessagePush) {.gcsafe, closure.} =
check:
msg.messages.len() == 1
msg.messages[0] == post
responseRequestIdFuture.complete(requestId)
let
proto = WakuFilter.init(dialSwitch, crypto.newRng(), handle)
rpc = FilterRequest(contentFilters: @[ContentFilter(topics: @[contentTopic])], topic: defaultTopic, subscribe: true)
dialSwitch.mount(proto)
proto.setPeer(listenSwitch.peerInfo)
proc emptyHandle(requestId: string, msg: MessagePush) {.gcsafe, closure.} =
discard
let
proto2 = WakuFilter.init(listenSwitch, crypto.newRng(), emptyHandle)
subscription = proto2.subscription()
var subscriptions = newTable[string, MessageNotificationSubscription]()
subscriptions["test"] = subscription
listenSwitch.mount(proto2)
let id = await proto.subscribe(rpc)
await sleepAsync(2.seconds)
await subscriptions.notify(defaultTopic, post)
check:
(await responseRequestIdFuture) == id
asyncTest "Can subscribe and unsubscribe from content filter":
const defaultTopic = "/waku/2/default-waku/proto"
let
key = PrivateKey.random(ECDSA, rng[]).get()
peer = PeerInfo.init(key)
contentTopic = ContentTopic(1)
post = WakuMessage(payload: @[byte 1, 2, 3], contentTopic: contentTopic)
var dialSwitch = newStandardSwitch()
discard await dialSwitch.start()
var listenSwitch = newStandardSwitch(some(key))
discard await listenSwitch.start()
var responseCompletionFuture = newFuture[bool]()
proc handle(requestId: string, msg: MessagePush) {.gcsafe, closure.} =
check:
msg.messages.len() == 1
msg.messages[0] == post
responseCompletionFuture.complete(true)
let
proto = WakuFilter.init(dialSwitch, crypto.newRng(), handle)
rpc = FilterRequest(contentFilters: @[ContentFilter(topics: @[contentTopic])], topic: defaultTopic, subscribe: true)
dialSwitch.mount(proto)
proto.setPeer(listenSwitch.peerInfo)
proc emptyHandle(requestId: string, msg: MessagePush) {.gcsafe, closure.} =
discard
let
proto2 = WakuFilter.init(listenSwitch, crypto.newRng(), emptyHandle)
subscription = proto2.subscription()
var subscriptions = newTable[string, MessageNotificationSubscription]()
subscriptions["test"] = subscription
listenSwitch.mount(proto2)
let id = await proto.subscribe(rpc)
await sleepAsync(2.seconds)
await subscriptions.notify(defaultTopic, post)
check:
# Check that subscription works as expected
(await responseCompletionFuture.withTimeout(3.seconds)) == true
# Reset to test unsubscribe
responseCompletionFuture = newFuture[bool]()
let
rpcU = FilterRequest(contentFilters: @[ContentFilter(topics: @[contentTopic])], topic: defaultTopic, subscribe: false)
await proto.unsubscribe(rpcU)
await sleepAsync(2.seconds)
await subscriptions.notify(defaultTopic, post)
check:
# Check that unsubscribe works as expected
(await responseCompletionFuture.withTimeout(5.seconds)) == false

View File

@ -0,0 +1,217 @@
{.used.}
import
std/[unittest,algorithm,options],
nimcrypto/sha2,
../../waku/v2/waku_types,
../../waku/v2/protocol/waku_store/waku_store,
../test_helpers
proc createSampleList(s: int): seq[IndexedWakuMessage] =
## takes s as input and outputs a sequence with s amount of IndexedWakuMessage
var data {.noinit.}: array[32, byte]
for x in data.mitems: x = 1
for i in 0..<s:
result.add(IndexedWakuMessage(msg: WakuMessage(payload: @[byte i]), index: Index(receivedTime: float64(i), digest: MDigest[256](data: data)) ))
procSuite "pagination":
test "Index computation test":
let
wm = WakuMessage(payload: @[byte 1, 2, 3])
index = wm.computeIndex()
check:
# the fields of the index should be non-empty
len(index.digest.data) != 0
len(index.digest.data) == 32 # sha2 output length in bytes
index.receivedTime != 0 # the timestamp should be a non-zero value
let
wm1 = WakuMessage(payload: @[byte 1, 2, 3], contentTopic: ContentTopic(1))
index1 = wm1.computeIndex()
wm2 = WakuMessage(payload: @[byte 1, 2, 3], contentTopic: ContentTopic(1))
index2 = wm2.computeIndex()
check:
# the digests of two identical WakuMessages must be the same
index1.digest == index2.digest
test "Index comparison, IndexedWakuMessage comparison, and Sorting tests":
var data1 {.noinit.}: array[32, byte]
for x in data1.mitems: x = 1
var data2 {.noinit.}: array[32, byte]
for x in data2.mitems: x = 2
var data3 {.noinit.}: array[32, byte]
for x in data3.mitems: x = 3
let
index1 = Index(receivedTime: 1, digest: MDigest[256](data: data1))
index2 = Index(receivedTime: 1, digest: MDigest[256](data: data2))
index3 = Index(receivedTime: 2, digest: MDigest[256](data: data3))
iwm1 = IndexedWakuMessage(index: index1)
iwm2 = IndexedWakuMessage(index: index2)
iwm3 = IndexedWakuMessage(index: index3)
check:
indexComparison(index1, index1) == 0
indexComparison(index1, index2) == -1
indexComparison(index2, index1) == 1
indexComparison(index1, index3) == -1
indexComparison(index3, index1) == 1
check:
indexedWakuMessageComparison(iwm1, iwm1) == 0
indexedWakuMessageComparison(iwm1, iwm2) == -1
indexedWakuMessageComparison(iwm2, iwm1) == 1
indexedWakuMessageComparison(iwm1, iwm3) == -1
indexedWakuMessageComparison(iwm3, iwm1) == 1
var sortingList = @[iwm3, iwm1, iwm2]
sortingList.sort(indexedWakuMessageComparison)
check:
sortingList[0] == iwm1
sortingList[1] == iwm2
sortingList[2] == iwm3
test "Find Index test":
let msgList = createSampleList(10)
check:
msgList.findIndex(msgList[3].index).get() == 3
msgList.findIndex(Index()).isNone == true
test "Forward pagination test":
var
msgList = createSampleList(10)
pagingInfo = PagingInfo(pageSize: 2, cursor: msgList[3].index, direction: PagingDirection.FORWARD)
# test for a normal pagination
var (data, newPagingInfo) = paginateWithIndex(msgList, pagingInfo)
check:
data.len == 2
data == msgList[4..5]
newPagingInfo.cursor == msgList[5].index
newPagingInfo.direction == pagingInfo.direction
newPagingInfo.pageSize == pagingInfo.pageSize
# test for an initial pagination request with an empty cursor
pagingInfo = PagingInfo(pageSize: 2, direction: PagingDirection.FORWARD)
(data, newPagingInfo) = paginateWithIndex(msgList, pagingInfo)
check:
data.len == 2
data == msgList[0..1]
newPagingInfo.cursor == msgList[1].index
newPagingInfo.direction == pagingInfo.direction
newPagingInfo.pageSize == 2
# test for an empty msgList
pagingInfo = PagingInfo(pageSize: 2, direction: PagingDirection.FORWARD)
(data, newPagingInfo) = paginateWithIndex(@[], pagingInfo)
check:
data.len == 0
newPagingInfo.pageSize == 0
newPagingInfo.direction == pagingInfo.direction
newPagingInfo.cursor == pagingInfo.cursor
# test for a page size larger than the remaining messages
pagingInfo = PagingInfo(pageSize: 10, cursor: msgList[3].index, direction: PagingDirection.FORWARD)
(data, newPagingInfo) = paginateWithIndex(msgList, pagingInfo)
check:
data.len == 6
data == msgList[4..9]
newPagingInfo.cursor == msgList[9].index
newPagingInfo.direction == pagingInfo.direction
newPagingInfo.pageSize == 6
# test for a page size larger than the maximum allowed page size
pagingInfo = PagingInfo(pageSize: MaxPageSize+1, cursor: msgList[3].index, direction: PagingDirection.FORWARD)
(data, newPagingInfo) = paginateWithIndex(msgList, pagingInfo)
check:
data.len <= MaxPageSize
newPagingInfo.direction == pagingInfo.direction
newPagingInfo.pageSize <= MaxPageSize
# test for a cursor poiting to the end of the message list
pagingInfo = PagingInfo(pageSize: 10, cursor: msgList[9].index, direction: PagingDirection.FORWARD)
(data, newPagingInfo) = paginateWithIndex(msgList, pagingInfo)
check:
data.len == 0
newPagingInfo.cursor == msgList[9].index
newPagingInfo.direction == pagingInfo.direction
newPagingInfo.pageSize == 0
# test for an invalid cursor
pagingInfo = PagingInfo(pageSize: 10, cursor: computeIndex(WakuMessage(payload: @[byte 10])), direction: PagingDirection.FORWARD)
(data, newPagingInfo) = paginateWithIndex(msgList, pagingInfo)
check:
data.len == 0
newPagingInfo.cursor == pagingInfo.cursor
newPagingInfo.direction == pagingInfo.direction
newPagingInfo.pageSize == 0
test "Backward pagination test":
var
msgList = createSampleList(10)
pagingInfo = PagingInfo(pageSize: 2, cursor: msgList[3].index, direction: PagingDirection.BACKWARD)
# test for a normal pagination
var (data, newPagingInfo) = paginateWithIndex(msgList, pagingInfo)
check:
data == msgList[1..2]
newPagingInfo.cursor == msgList[1].index
newPagingInfo.direction == pagingInfo.direction
newPagingInfo.pageSize == pagingInfo.pageSize
# test for an empty msgList
pagingInfo = PagingInfo(pageSize: 2, direction: PagingDirection.BACKWARD)
(data, newPagingInfo) = paginateWithIndex(@[], pagingInfo)
check:
data.len == 0
newPagingInfo.pageSize == 0
newPagingInfo.direction == pagingInfo.direction
newPagingInfo.cursor == pagingInfo.cursor
# test for an initial pagination request with an empty cursor
pagingInfo = PagingInfo(pageSize: 2, direction: PagingDirection.BACKWARD)
(data, newPagingInfo) = paginateWithIndex(msgList, pagingInfo)
check:
data.len == 2
data == msgList[8..9]
newPagingInfo.cursor == msgList[8].index
newPagingInfo.direction == pagingInfo.direction
newPagingInfo.pageSize == 2
# test for a page size larger than the remaining messages
pagingInfo = PagingInfo(pageSize: 5, cursor: msgList[3].index, direction: PagingDirection.BACKWARD)
(data, newPagingInfo) = paginateWithIndex(msgList, pagingInfo)
check:
data == msgList[0..2]
newPagingInfo.cursor == msgList[0].index
newPagingInfo.direction == pagingInfo.direction
newPagingInfo.pageSize == 3
# test for a page size larger than the Maximum allowed page size
pagingInfo = PagingInfo(pageSize: MaxPageSize+1, cursor: msgList[3].index, direction: PagingDirection.BACKWARD)
(data, newPagingInfo) = paginateWithIndex(msgList, pagingInfo)
check:
data.len <= MaxPageSize
newPagingInfo.direction == pagingInfo.direction
newPagingInfo.pageSize <= MaxPageSize
# test for a cursor pointing to the begining of the message list
pagingInfo = PagingInfo(pageSize: 5, cursor: msgList[0].index, direction: PagingDirection.BACKWARD)
(data, newPagingInfo) = paginateWithIndex(msgList, pagingInfo)
check:
data.len == 0
newPagingInfo.cursor == msgList[0].index
newPagingInfo.direction == pagingInfo.direction
newPagingInfo.pageSize == 0
# test for an invalid cursor
pagingInfo = PagingInfo(pageSize: 5, cursor: computeIndex(WakuMessage(payload: @[byte 10])), direction: PagingDirection.BACKWARD)
(data, newPagingInfo) = paginateWithIndex(msgList, pagingInfo)
check:
data.len == 0
newPagingInfo.cursor == pagingInfo.cursor
newPagingInfo.direction == pagingInfo.direction
newPagingInfo.pageSize == 0

View File

@ -0,0 +1,111 @@
{.used.}
import
std/unittest,
../../waku/v2/node/waku_payload,
../test_helpers
procSuite "Waku Payload":
let rng = newRng()
test "Encode/Decode without encryption (version 0)":
## This would be the usual way when no encryption is done or when it is done
## on the application layer.
# Encoding
let
version = 0'u32
payload = @[byte 0, 1, 2]
msg = WakuMessage(payload: payload, version: version)
pb = msg.encode()
# Decoding
let msgDecoded = WakuMessage.init(pb.buffer)
check msgDecoded.isOk()
let
keyInfo = KeyInfo(kind:None)
decoded = decodePayload(msgDecoded.get(), keyInfo)
check:
decoded.isOk()
decoded.get().payload == payload
test "Encode/Decode without encryption (version 0) with encodePayload":
## This is a bit silly and only there for completeness
# Encoding
let
version = 0'u32
payload = Payload(payload: @[byte 0, 1, 2])
encodedPayload = payload.encode(version, rng[])
check encodedPayload.isOk()
let
msg = WakuMessage(payload: encodedPayload.get(), version: version)
pb = msg.encode()
# Decoding
let msgDecoded = WakuMessage.init(pb.buffer)
check msgDecoded.isOk()
let
keyInfo = KeyInfo(kind:None)
decoded = decodePayload(msgDecoded.get(), keyInfo)
check:
decoded.isOk()
decoded.get().payload == payload.payload
test "Encode/Decode with encryption (version 1)":
# Encoding
let
privKey = PrivateKey.random(rng[])
version = 1'u32
payload = Payload(payload: @[byte 0, 1, 2],
dst: some(privKey.toPublicKey()))
encodedPayload = payload.encode(version, rng[])
check encodedPayload.isOk()
let
msg = WakuMessage(payload: encodedPayload.get(), version: version)
pb = msg.encode()
# Decoding
let msgDecoded = WakuMessage.init(pb.buffer)
check msgDecoded.isOk()
let
keyInfo = KeyInfo(kind: Asymmetric, privKey: privKey)
decoded = decodePayload(msgDecoded.get(), keyInfo)
check:
decoded.isOk()
decoded.get().payload == payload.payload
test "Encode with unsupported version":
let
version = 2'u32
payload = Payload(payload: @[byte 0, 1, 2])
encodedPayload = payload.encode(version, rng[])
check encodedPayload.isErr()
test "Decode with unsupported version":
# Encoding
let
version = 2'u32
payload = @[byte 0, 1, 2]
msg = WakuMessage(payload: payload, version: version)
pb = msg.encode()
# Decoding
let msgDecoded = WakuMessage.init(pb.buffer)
check msgDecoded.isOk()
let
keyInfo = KeyInfo(kind:None)
decoded = decodePayload(msgDecoded.get(), keyInfo)
check:
decoded.isErr()

View File

@ -0,0 +1,379 @@
{.used.}
import
std/[unittest, options, tables, sets],
chronos, chronicles,
libp2p/switch,
libp2p/protobuf/minprotobuf,
libp2p/stream/[bufferstream, connection],
libp2p/crypto/crypto,
libp2p/protocols/pubsub/rpc/message,
../../waku/v2/protocol/message_notifier,
../../waku/v2/protocol/waku_store/waku_store,
../../waku/v2/node/[message_store, sqlite],
../test_helpers, ./utils,
../../waku/v2/waku_types
procSuite "Waku Store":
asyncTest "handle query":
let
key = PrivateKey.random(ECDSA, rng[]).get()
peer = PeerInfo.init(key)
topic = ContentTopic(1)
msg = WakuMessage(payload: @[byte 1, 2, 3], contentTopic: topic)
msg2 = WakuMessage(payload: @[byte 1, 2, 3], contentTopic: ContentTopic(2))
var dialSwitch = newStandardSwitch()
discard await dialSwitch.start()
var listenSwitch = newStandardSwitch(some(key))
discard await listenSwitch.start()
let
proto = WakuStore.init(dialSwitch, crypto.newRng())
subscription = proto.subscription()
rpc = HistoryQuery(topics: @[topic])
proto.setPeer(listenSwitch.peerInfo)
var subscriptions = newTable[string, MessageNotificationSubscription]()
subscriptions["test"] = subscription
listenSwitch.mount(proto)
await subscriptions.notify("foo", msg)
await subscriptions.notify("foo", msg2)
var completionFut = newFuture[bool]()
proc handler(response: HistoryResponse) {.gcsafe, closure.} =
check:
response.messages.len() == 1
response.messages[0] == msg
completionFut.complete(true)
await proto.query(rpc, handler)
check:
(await completionFut.withTimeout(5.seconds)) == true
asyncTest "handle query with store and restarts":
let
key = PrivateKey.random(ECDSA, rng[]).get()
peer = PeerInfo.init(key)
topic = ContentTopic(1)
database = SqliteDatabase.init("", inMemory = true)[]
store = MessageStore.init(database)[]
msg = WakuMessage(payload: @[byte 1, 2, 3], contentTopic: topic)
msg2 = WakuMessage(payload: @[byte 1, 2, 3], contentTopic: ContentTopic(2))
var dialSwitch = newStandardSwitch()
discard await dialSwitch.start()
var listenSwitch = newStandardSwitch(some(key))
discard await listenSwitch.start()
let
proto = WakuStore.init(dialSwitch, crypto.newRng(), store)
subscription = proto.subscription()
rpc = HistoryQuery(topics: @[topic])
proto.setPeer(listenSwitch.peerInfo)
var subscriptions = newTable[string, MessageNotificationSubscription]()
subscriptions["test"] = subscription
listenSwitch.mount(proto)
await subscriptions.notify("foo", msg)
await subscriptions.notify("foo", msg2)
var completionFut = newFuture[bool]()
proc handler(response: HistoryResponse) {.gcsafe, closure.} =
check:
response.messages.len() == 1
response.messages[0] == msg
completionFut.complete(true)
await proto.query(rpc, handler)
check:
(await completionFut.withTimeout(5.seconds)) == true
let
proto2 = WakuStore.init(dialSwitch, crypto.newRng(), store)
key2 = PrivateKey.random(ECDSA, rng[]).get()
var listenSwitch2 = newStandardSwitch(some(key2))
discard await listenSwitch2.start()
proto2.setPeer(listenSwitch2.peerInfo)
listenSwitch2.mount(proto2)
var completionFut2 = newFuture[bool]()
proc handler2(response: HistoryResponse) {.gcsafe, closure.} =
check:
response.messages.len() == 1
response.messages[0] == msg
completionFut2.complete(true)
await proto2.query(rpc, handler2)
check:
(await completionFut2.withTimeout(5.seconds)) == true
asyncTest "handle query with forward pagination":
let
key = PrivateKey.random(ECDSA, rng[]).get()
peer = PeerInfo.init(key)
var
msgList = @[WakuMessage(payload: @[byte 0], contentTopic: ContentTopic(2)),
WakuMessage(payload: @[byte 1],contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 2],contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 3],contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 4],contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 5],contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 6],contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 7],contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 8],contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 9],contentTopic: ContentTopic(2))]
var dialSwitch = newStandardSwitch()
discard await dialSwitch.start()
var listenSwitch = newStandardSwitch(some(key))
discard await listenSwitch.start()
let
proto = WakuStore.init(dialSwitch, crypto.newRng())
subscription = proto.subscription()
rpc = HistoryQuery(topics: @[ContentTopic(1)], pagingInfo: PagingInfo(pageSize: 2, direction: PagingDirection.FORWARD) )
proto.setPeer(listenSwitch.peerInfo)
var subscriptions = newTable[string, MessageNotificationSubscription]()
subscriptions["test"] = subscription
listenSwitch.mount(proto)
for wakuMsg in msgList:
await subscriptions.notify("foo", wakuMsg)
var completionFut = newFuture[bool]()
proc handler(response: HistoryResponse) {.gcsafe, closure.} =
check:
response.messages.len() == 2
response.pagingInfo.pageSize == 2
response.pagingInfo.direction == PagingDirection.FORWARD
response.pagingInfo.cursor != Index()
completionFut.complete(true)
await proto.query(rpc, handler)
check:
(await completionFut.withTimeout(5.seconds)) == true
asyncTest "handle query with backward pagination":
let
key = PrivateKey.random(ECDSA, rng[]).get()
peer = PeerInfo.init(key)
var
msgList = @[WakuMessage(payload: @[byte 0], contentTopic: ContentTopic(2)),
WakuMessage(payload: @[byte 1],contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 2],contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 3],contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 4],contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 5],contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 6],contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 7],contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 8],contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 9],contentTopic: ContentTopic(2))]
var dialSwitch = newStandardSwitch()
discard await dialSwitch.start()
var listenSwitch = newStandardSwitch(some(key))
discard await listenSwitch.start()
let
proto = WakuStore.init(dialSwitch, crypto.newRng())
subscription = proto.subscription()
proto.setPeer(listenSwitch.peerInfo)
var subscriptions = newTable[string, MessageNotificationSubscription]()
subscriptions["test"] = subscription
listenSwitch.mount(proto)
for wakuMsg in msgList:
await subscriptions.notify("foo", wakuMsg)
var completionFut = newFuture[bool]()
proc handler(response: HistoryResponse) {.gcsafe, closure.} =
check:
response.messages.len() == 2
response.pagingInfo.pageSize == 2
response.pagingInfo.direction == PagingDirection.BACKWARD
response.pagingInfo.cursor != Index()
completionFut.complete(true)
let rpc = HistoryQuery(topics: @[ContentTopic(1)], pagingInfo: PagingInfo(pageSize: 2, direction: PagingDirection.BACKWARD) )
await proto.query(rpc, handler)
check:
(await completionFut.withTimeout(5.seconds)) == true
asyncTest "handle queries with no pagination":
let
key = PrivateKey.random(ECDSA, rng[]).get()
peer = PeerInfo.init(key)
var
msgList = @[WakuMessage(payload: @[byte 0], contentTopic: ContentTopic(2)),
WakuMessage(payload: @[byte 1], contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 2], contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 3], contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 4], contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 5], contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 6], contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 7], contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 8], contentTopic: ContentTopic(1)),
WakuMessage(payload: @[byte 9], contentTopic: ContentTopic(2))]
var dialSwitch = newStandardSwitch()
discard await dialSwitch.start()
var listenSwitch = newStandardSwitch(some(key))
discard await listenSwitch.start()
let
proto = WakuStore.init(dialSwitch, crypto.newRng())
subscription = proto.subscription()
proto.setPeer(listenSwitch.peerInfo)
var subscriptions = newTable[string, MessageNotificationSubscription]()
subscriptions["test"] = subscription
listenSwitch.mount(proto)
for wakuMsg in msgList:
await subscriptions.notify("foo", wakuMsg)
var completionFut = newFuture[bool]()
proc handler(response: HistoryResponse) {.gcsafe, closure.} =
check:
response.messages.len() == 8
response.pagingInfo == PagingInfo()
completionFut.complete(true)
let rpc = HistoryQuery(topics: @[ContentTopic(1)] )
await proto.query(rpc, handler)
check:
(await completionFut.withTimeout(5.seconds)) == true
test "Index Protobuf encoder/decoder test":
let
index = computeIndex(WakuMessage(payload: @[byte 1], contentTopic: ContentTopic(1)))
pb = index.encode()
decodedIndex = Index.init(pb.buffer)
check:
# the fields of decodedIndex must be the same as the original index
decodedIndex.isErr == false
decodedIndex.value == index
let
emptyIndex = Index()
epb = emptyIndex.encode()
decodedEmptyIndex = Index.init(epb.buffer)
check:
# check the correctness of init and encode for an empty Index
decodedEmptyIndex.isErr == false
decodedEmptyIndex.value == emptyIndex
test "PagingDirection Protobuf encod/init test":
let
pagingDirection = PagingDirection.BACKWARD
pb = pagingDirection.encode()
decodedPagingDirection = PagingDirection.init(pb.buffer)
check:
# the decodedPagingDirection must be the same as the original pagingDirection
decodedPagingDirection.isErr == false
decodedPagingDirection.value == pagingDirection
test "PagingInfo Protobuf encod/init test":
let
index = computeIndex(WakuMessage(payload: @[byte 1], contentTopic: ContentTopic(1)))
pagingInfo = PagingInfo(pageSize: 1, cursor: index, direction: PagingDirection.BACKWARD)
pb = pagingInfo.encode()
decodedPagingInfo = PagingInfo.init(pb.buffer)
check:
# the fields of decodedPagingInfo must be the same as the original pagingInfo
decodedPagingInfo.isErr == false
decodedPagingInfo.value == pagingInfo
let
emptyPagingInfo = PagingInfo()
epb = emptyPagingInfo.encode()
decodedEmptyPagingInfo = PagingInfo.init(epb.buffer)
check:
# check the correctness of init and encode for an empty PagingInfo
decodedEmptyPagingInfo.isErr == false
decodedEmptyPagingInfo.value == emptyPagingInfo
test "HistoryQuery Protobuf encod/init test":
let
index = computeIndex(WakuMessage(payload: @[byte 1], contentTopic: ContentTopic(1)))
pagingInfo = PagingInfo(pageSize: 1, cursor: index, direction: PagingDirection.BACKWARD)
query=HistoryQuery(topics: @[ContentTopic(1)], pagingInfo: pagingInfo)
pb = query.encode()
decodedQuery = HistoryQuery.init(pb.buffer)
check:
# the fields of decoded query decodedQuery must be the same as the original query query
decodedQuery.isErr == false
decodedQuery.value == query
let
emptyQuery=HistoryQuery()
epb = emptyQuery.encode()
decodedEmptyQuery = HistoryQuery.init(epb.buffer)
check:
# check the correctness of init and encode for an empty HistoryQuery
decodedEmptyQuery.isErr == false
decodedEmptyQuery.value == emptyQuery
test "HistoryResponse Protobuf encod/init test":
let
wm = WakuMessage(payload: @[byte 1], contentTopic: ContentTopic(1))
index = computeIndex(wm)
pagingInfo = PagingInfo(pageSize: 1, cursor: index, direction: PagingDirection.BACKWARD)
res = HistoryResponse(messages: @[wm], pagingInfo:pagingInfo)
pb = res.encode()
decodedRes = HistoryResponse.init(pb.buffer)
check:
# the fields of decoded response decodedRes must be the same as the original response res
decodedRes.isErr == false
decodedRes.value == res
let
emptyRes=HistoryResponse()
epb = emptyRes.encode()
decodedEmptyRes = HistoryResponse.init(epb.buffer)
check:
# check the correctness of init and encode for an empty HistoryResponse
decodedEmptyRes.isErr == false
decodedEmptyRes.value == emptyRes

135
tests/v2/test_waku_swap.nim Normal file
View File

@ -0,0 +1,135 @@
import
std/[unittest, options, tables, sets],
chronos, chronicles, stew/shims/net as stewNet, stew/byteutils,
libp2p/switch,
libp2p/protobuf/minprotobuf,
libp2p/stream/[bufferstream, connection],
libp2p/crypto/[crypto, secp],
libp2p/switch,
eth/keys,
../../waku/v2/protocol/[message_notifier],
../../waku/v2/protocol/waku_store/waku_store,
../../waku/v2/protocol/waku_swap/waku_swap,
../../waku/v2/node/wakunode2,
../test_helpers, ./utils,
../../waku/v2/waku_types
procSuite "Waku SWAP Accounting":
test "Handshake Encode/Decode":
let
beneficiary = @[byte 0, 1, 2]
handshake = Handshake(beneficiary: beneficiary)
pb = handshake.encode()
let decodedHandshake = Handshake.init(pb.buffer)
check:
decodedHandshake.isErr == false
decodedHandshake.get().beneficiary == beneficiary
test "Cheque Encode/Decode":
let
amount = 1'u32
date = 9000'u32
beneficiary = @[byte 0, 1, 2]
cheque = Cheque(beneficiary: beneficiary, amount: amount, date: date)
pb = cheque.encode()
let decodedCheque = Cheque.init(pb.buffer)
check:
decodedCheque.isErr == false
decodedCheque.get() == cheque
asyncTest "Update accounting state after store operations":
let
nodeKey1 = crypto.PrivateKey.random(Secp256k1, rng[])[]
node1 = WakuNode.init(nodeKey1, ValidIpAddress.init("0.0.0.0"),
Port(60000))
nodeKey2 = crypto.PrivateKey.random(Secp256k1, rng[])[]
node2 = WakuNode.init(nodeKey2, ValidIpAddress.init("0.0.0.0"),
Port(60001))
contentTopic = ContentTopic(1)
message = WakuMessage(payload: "hello world".toBytes(), contentTopic: contentTopic)
var completionFut = newFuture[bool]()
# Start nodes and mount protocols
await node1.start()
node1.mountSwap()
node1.mountStore()
await node2.start()
node2.mountSwap()
node2.mountStore()
await node2.subscriptions.notify("/waku/2/default-waku/proto", message)
await sleepAsync(2000.millis)
node1.wakuStore.setPeer(node2.peerInfo)
node1.wakuSwap.setPeer(node2.peerInfo)
node2.wakuSwap.setPeer(node1.peerInfo)
proc storeHandler(response: HistoryResponse) {.gcsafe, closure.} =
debug "storeHandler hit"
check:
response.messages[0] == message
completionFut.complete(true)
await node1.query(HistoryQuery(topics: @[contentTopic]), storeHandler)
check:
(await completionFut.withTimeout(5.seconds)) == true
# Accounting table updated with credit and debit, respectively
node1.wakuSwap.accounting[node2.peerInfo.peerId] == 1
node2.wakuSwap.accounting[node1.peerInfo.peerId] == -1
await node1.stop()
await node2.stop()
# TODO Add cheque here
asyncTest "Update accounting state after sending cheque":
let
nodeKey1 = crypto.PrivateKey.random(Secp256k1, rng[])[]
node1 = WakuNode.init(nodeKey1, ValidIpAddress.init("0.0.0.0"),
Port(60000))
nodeKey2 = crypto.PrivateKey.random(Secp256k1, rng[])[]
node2 = WakuNode.init(nodeKey2, ValidIpAddress.init("0.0.0.0"),
Port(60001))
contentTopic = ContentTopic(1)
message = WakuMessage(payload: "hello world".toBytes(), contentTopic: contentTopic)
var futures = [newFuture[bool](), newFuture[bool]()]
# Start nodes and mount protocols
await node1.start()
node1.mountSwap()
node1.mountStore()
await node2.start()
node2.mountSwap()
node2.mountStore()
await node2.subscriptions.notify("/waku/2/default-waku/proto", message)
await sleepAsync(2000.millis)
node1.wakuStore.setPeer(node2.peerInfo)
node1.wakuSwap.setPeer(node2.peerInfo)
node2.wakuSwap.setPeer(node1.peerInfo)
proc handler1(response: HistoryResponse) {.gcsafe, closure.} =
futures[0].complete(true)
proc handler2(response: HistoryResponse) {.gcsafe, closure.} =
futures[1].complete(true)
# TODO Handshakes - for now we assume implicit, e2e still works for PoC
await node1.query(HistoryQuery(topics: @[contentTopic]), handler1)
await node1.query(HistoryQuery(topics: @[contentTopic]), handler2)
check:
(await allFutures(futures).withTimeout(5.seconds)) == true
# Accounting table updated with credit and debit, respectively
# After sending a cheque the balance is partially adjusted
node1.wakuSwap.accounting[node2.peerInfo.peerId] == 1
node2.wakuSwap.accounting[node1.peerInfo.peerId] == -1
await node1.stop()
await node2.stop()

253
tests/v2/test_wakunode.nim Normal file
View File

@ -0,0 +1,253 @@
{.used.}
import
std/unittest,
chronicles, chronos, stew/shims/net as stewNet, stew/byteutils,
libp2p/crypto/crypto,
libp2p/crypto/secp,
libp2p/switch,
eth/keys,
../../waku/v2/protocol/[waku_relay, waku_filter, message_notifier],
../../waku/v2/protocol/waku_store/waku_store,
../../waku/v2/node/wakunode2,
../test_helpers,
../../waku/v2/waku_types
procSuite "WakuNode":
let rng = keys.newRng()
asyncTest "Message published with content filter is retrievable":
let
nodeKey = crypto.PrivateKey.random(Secp256k1, rng[])[]
node = WakuNode.init(nodeKey, ValidIpAddress.init("0.0.0.0"),
Port(60000))
pubSubTopic = "chat"
contentTopic = ContentTopic(1)
filterRequest = FilterRequest(topic: pubSubTopic, contentFilters: @[ContentFilter(topics: @[contentTopic])], subscribe: true)
message = WakuMessage(payload: "hello world".toBytes(),
contentTopic: contentTopic)
# This could/should become a more fixed handler (at least default) that
# would be enforced on WakuNode level.
proc relayHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
let msg = WakuMessage.init(data)
if msg.isOk():
check:
topic == "chat"
node.filters.notify(msg.value(), topic)
var completionFut = newFuture[bool]()
# This would be the actual application handler
proc contentHandler(msg: WakuMessage) {.gcsafe, closure.} =
let message = string.fromBytes(msg.payload)
check:
message == "hello world"
completionFut.complete(true)
await node.start()
await node.mountRelay()
# Subscribe our node to the pubSubTopic where all chat data go onto.
await node.subscribe(pubSubTopic, relayHandler)
# Subscribe a contentFilter to trigger a specific application handler when
# WakuMessages with that content are received
await node.subscribe(filterRequest, contentHandler)
await sleepAsync(2000.millis)
await node.publish(pubSubTopic, message)
check:
(await completionFut.withTimeout(5.seconds)) == true
await node.stop()
asyncTest "Content filtered publishing over network":
let
nodeKey1 = crypto.PrivateKey.random(Secp256k1, rng[])[]
node1 = WakuNode.init(nodeKey1, ValidIpAddress.init("0.0.0.0"),
Port(60000))
nodeKey2 = crypto.PrivateKey.random(Secp256k1, rng[])[]
node2 = WakuNode.init(nodeKey2, ValidIpAddress.init("0.0.0.0"),
Port(60002))
pubSubTopic = "chat"
contentTopic = ContentTopic(1)
filterRequest = FilterRequest(topic: pubSubTopic, contentFilters: @[ContentFilter(topics: @[contentTopic])], subscribe: true)
message = WakuMessage(payload: "hello world".toBytes(),
contentTopic: contentTopic)
var completionFut = newFuture[bool]()
# This could/should become a more fixed handler (at least default) that
# would be enforced on WakuNode level.
proc relayHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
let msg = WakuMessage.init(data)
if msg.isOk():
check:
topic == "chat"
node1.filters.notify(msg.value(), topic)
# This would be the actual application handler
proc contentHandler(msg: WakuMessage) {.gcsafe, closure.} =
let message = string.fromBytes(msg.payload)
check:
message == "hello world"
completionFut.complete(true)
await allFutures([node1.start(), node2.start()])
await node1.mountRelay()
await node2.mountRelay()
node1.mountFilter()
node2.mountFilter()
# Subscribe our node to the pubSubTopic where all chat data go onto.
await node1.subscribe(pubSubTopic, relayHandler)
# Subscribe a contentFilter to trigger a specific application handler when
# WakuMessages with that content are received
node1.wakuFilter.setPeer(node2.peerInfo)
await node1.subscribe(filterRequest, contentHandler)
await sleepAsync(2000.millis)
# Connect peers by dialing from node2 to node1
let conn = await node2.switch.dial(node1.peerInfo.peerId, node1.peerInfo.addrs, WakuRelayCodec)
# We need to sleep to allow the subscription to go through
info "Going to sleep to allow subscribe to go through"
await sleepAsync(2000.millis)
info "Waking up and publishing"
await node2.publish(pubSubTopic, message)
check:
(await completionFut.withTimeout(5.seconds)) == true
await node1.stop()
await node2.stop()
asyncTest "Store protocol returns expected message":
let
nodeKey1 = crypto.PrivateKey.random(Secp256k1, rng[])[]
node1 = WakuNode.init(nodeKey1, ValidIpAddress.init("0.0.0.0"),
Port(60000))
nodeKey2 = crypto.PrivateKey.random(Secp256k1, rng[])[]
node2 = WakuNode.init(nodeKey2, ValidIpAddress.init("0.0.0.0"),
Port(60002))
contentTopic = ContentTopic(1)
message = WakuMessage(payload: "hello world".toBytes(), contentTopic: contentTopic)
var completionFut = newFuture[bool]()
await node1.start()
node1.mountStore()
await node2.start()
node2.mountStore()
await node2.subscriptions.notify("/waku/2/default-waku/proto", message)
await sleepAsync(2000.millis)
node1.wakuStore.setPeer(node2.peerInfo)
proc storeHandler(response: HistoryResponse) {.gcsafe, closure.} =
check:
response.messages[0] == message
completionFut.complete(true)
await node1.query(HistoryQuery(topics: @[contentTopic]), storeHandler)
check:
(await completionFut.withTimeout(5.seconds)) == true
await node1.stop()
await node2.stop()
asyncTest "Filter protocol returns expected message":
let
nodeKey1 = crypto.PrivateKey.random(Secp256k1, rng[])[]
node1 = WakuNode.init(nodeKey1, ValidIpAddress.init("0.0.0.0"),
Port(60000))
nodeKey2 = crypto.PrivateKey.random(Secp256k1, rng[])[]
node2 = WakuNode.init(nodeKey2, ValidIpAddress.init("0.0.0.0"),
Port(60002))
contentTopic = ContentTopic(1)
message = WakuMessage(payload: "hello world".toBytes(), contentTopic: contentTopic)
var completionFut = newFuture[bool]()
await node1.start()
node1.mountFilter()
await node2.start()
node2.mountFilter()
node1.wakuFilter.setPeer(node2.peerInfo)
proc handler(msg: WakuMessage) {.gcsafe, closure.} =
check:
msg == message
completionFut.complete(true)
await node1.subscribe(FilterRequest(topic: "/waku/2/default-waku/proto", contentFilters: @[ContentFilter(topics: @[contentTopic])], subscribe: true), handler)
await sleepAsync(2000.millis)
await node2.subscriptions.notify("/waku/2/default-waku/proto", message)
await sleepAsync(2000.millis)
check:
(await completionFut.withTimeout(5.seconds)) == true
await node1.stop()
await node2.stop()
asyncTest "Messages are correctly relayed":
let
nodeKey1 = crypto.PrivateKey.random(Secp256k1, rng[])[]
node1 = WakuNode.init(nodeKey1, ValidIpAddress.init("0.0.0.0"),
Port(60000))
nodeKey2 = crypto.PrivateKey.random(Secp256k1, rng[])[]
node2 = WakuNode.init(nodeKey2, ValidIpAddress.init("0.0.0.0"),
Port(60002))
nodeKey3 = crypto.PrivateKey.random(Secp256k1, rng[])[]
node3 = WakuNode.init(nodeKey3, ValidIpAddress.init("0.0.0.0"),
Port(60003))
pubSubTopic = "test"
contentTopic = ContentTopic(1)
payload = "hello world".toBytes()
message = WakuMessage(payload: payload, contentTopic: contentTopic)
await node1.start()
await node1.mountRelay(@[pubSubTopic])
await node2.start()
await node2.mountRelay(@[pubSubTopic])
await node3.start()
await node3.mountRelay(@[pubSubTopic])
await node1.connectToNodes(@[node2.peerInfo])
await node3.connectToNodes(@[node2.peerInfo])
var completionFut = newFuture[bool]()
proc relayHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
let msg = WakuMessage.init(data)
if msg.isOk():
let val = msg.value()
check:
topic == pubSubTopic
val.contentTopic == contentTopic
val.payload == payload
completionFut.complete(true)
await node3.subscribe(pubSubTopic, relayHandler)
await sleepAsync(2000.millis)
await node1.publish(pubSubTopic, message)
await sleepAsync(2000.millis)
check:
(await completionFut.withTimeout(5.seconds)) == true
await node1.stop()
await node2.stop()
await node3.stop()

5
tests/v2/test_web3.nim Normal file
View File

@ -0,0 +1,5 @@
import web3
proc web3Test() =
var web3: Web3 # an identifier from web3 package
web3Test()

72
tests/v2/utils.nim Normal file
View File

@ -0,0 +1,72 @@
# compile time options here
const
libp2p_pubsub_sign {.booldefine.} = true
libp2p_pubsub_verify {.booldefine.} = true
import random
import chronos
import libp2p/[standard_setup,
protocols/pubsub/pubsub,
protocols/secure/secure]
import ../../waku/v2/waku_types
export standard_setup
randomize()
proc generateNodes*(
num: Natural,
secureManagers: openarray[SecureProtocol] = [
# array cos order matters
SecureProtocol.Secio,
SecureProtocol.Noise,
],
msgIdProvider: MsgIdProvider = nil,
gossip: bool = false,
triggerSelf: bool = false,
verifySignature: bool = libp2p_pubsub_verify,
sign: bool = libp2p_pubsub_sign): seq[PubSub] =
for i in 0..<num:
let switch = newStandardSwitch(secureManagers = secureManagers)
let wakuRelay = WakuRelay.init(
switch = switch,
triggerSelf = triggerSelf,
verifySignature = verifySignature,
sign = sign,
# XXX unclear why including this causes a compiler error, it is part of WakuRelay type
msgIdProvider = msgIdProvider).PubSub
switch.mount(wakuRelay)
result.add(wakuRelay)
proc subscribeNodes*(nodes: seq[PubSub]) {.async.} =
for dialer in nodes:
for node in nodes:
if dialer.switch.peerInfo.peerId != node.switch.peerInfo.peerId:
await dialer.switch.connect(node.peerInfo.peerId, node.peerInfo.addrs)
dialer.subscribePeer(node.peerInfo.peerId)
proc subscribeSparseNodes*(nodes: seq[PubSub], degree: int = 2) {.async.} =
if nodes.len < degree:
raise (ref CatchableError)(msg: "nodes count needs to be greater or equal to degree!")
for i, dialer in nodes:
if (i mod degree) != 0:
continue
for node in nodes:
if dialer.switch.peerInfo.peerId != node.peerInfo.peerId:
await dialer.switch.connect(node.peerInfo.peerId, node.peerInfo.addrs)
dialer.subscribePeer(node.peerInfo.peerId)
proc subscribeRandom*(nodes: seq[PubSub]) {.async.} =
for dialer in nodes:
var dialed: seq[PeerID]
while dialed.len < nodes.len - 1:
let node = sample(nodes)
if node.peerInfo.peerId notin dialed:
if dialer.peerInfo.peerId != node.peerInfo.peerId:
await dialer.switch.connect(node.peerInfo.peerId, node.peerInfo.addrs)
dialer.subscribePeer(node.peerInfo.peerId)
dialed.add(node.peerInfo.peerId)

View File

@ -0,0 +1 @@
proc tenTimes*(x: int): int = 10*x

View File

@ -0,0 +1,10 @@
discard """
output: "Successful"
"""
# Test for the compiler to be able to compile a Nim file with spaces in the directory name.
# Also test if import of a directory with a space works.
import "more spaces" / mspace
assert tenTimes(5) == 50
echo("Successful")

View File

@ -0,0 +1,11 @@
discard """
action: run
"""
# Tests that module names can contain multi byte characters
let a = 1
doAssert åäö.a == 1
proc inlined() {.inline.} = discard
inlined()

88
waku.nimble Normal file
View File

@ -0,0 +1,88 @@
mode = ScriptMode.Verbose
### Package
version = "0.1.0"
author = "Status Research & Development GmbH"
description = "Waku, Private P2P Messaging for Resource-Restricted Devices"
license = "MIT or Apache License 2.0"
srcDir = "src"
#bin = @["build/waku"]
### Dependencies
requires "nim >= 1.2.0",
"chronicles",
"confutils",
"chronos",
"eth",
"json_rpc",
"libbacktrace",
"nimcrypto",
"stew",
"stint",
"metrics",
"libp2p", # Only for Waku v2
"web3"
### Helper functions
proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
if not dirExists "build":
mkDir "build"
# allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims"
var extra_params = params
for i in 2..<paramCount():
extra_params &= " " & paramStr(i)
exec "nim " & lang & " --out:build/" & name & " " & extra_params & " " & srcDir & name & ".nim"
proc test(name: string, lang = "c") =
# XXX: When running `> NIM_PARAMS="-d:chronicles_log_level=INFO" make test2`
# I expect compiler flag to be overridden, however it stays with whatever is
# specified here.
buildBinary name, "tests/", "-d:chronicles_log_level=DEBUG"
#buildBinary name, "tests/", "-d:chronicles_log_level=ERROR"
exec "build/" & name
### Waku v1 tasks
task wakunode1, "Build Waku v1 cli node":
buildBinary "wakunode1", "waku/v1/node/", "-d:chronicles_log_level=TRACE"
task sim1, "Build Waku v1 simulation tools":
buildBinary "quicksim", "waku/v1/node/", "-d:chronicles_log_level=INFO"
buildBinary "start_network", "waku/v1/node/", "-d:chronicles_log_level=DEBUG"
task example1, "Build Waku v1 example":
buildBinary "example", "examples/v1/", "-d:chronicles_log_level=DEBUG"
task test1, "Build & run Waku v1 tests":
test "all_tests_v1"
### Waku v2 tasks
task wakunode2, "Build Waku v2 (experimental) cli node":
buildBinary "wakunode2", "waku/v2/node/", "-d:chronicles_log_level=TRACE"
task sim2, "Build Waku v2 simulation tools":
buildBinary "quicksim2", "waku/v2/node/", "-d:chronicles_log_level=DEBUG"
buildBinary "start_network2", "waku/v2/node/", "-d:chronicles_log_level=TRACE"
task example2, "Build Waku v2 example":
let name = "basic2"
buildBinary name, "examples/v2/", "-d:chronicles_log_level=DEBUG"
task test2, "Build & run Waku v2 tests":
test "all_tests_v2"
task scripts2, "Build Waku v2 scripts":
buildBinary "rpc_publish", "waku/v2/node/rpc/", "-d:chronicles_log_level=DEBUG"
buildBinary "rpc_subscribe", "waku/v2/node/rpc/", "-d:chronicles_log_level=DEBUG"
buildBinary "rpc_subscribe_filter", "waku/v2/node/rpc/", "-d:chronicles_log_level=DEBUG"
buildBinary "rpc_query", "waku/v2/node/rpc/", "-d:chronicles_log_level=DEBUG"
buildBinary "rpc_info", "waku/v2/node/rpc/", "-d:chronicles_log_level=DEBUG"
task chat2, "Build example Waku v2 chat usage":
let name = "chat2"
# NOTE For debugging, set debug level. For chat usage we want minimal log
# output to STDOUT. Can be fixed by redirecting logs to file (e.g.)
#buildBinary name, "examples/v2/", "-d:chronicles_log_level=WARN"
buildBinary name, "examples/v2/", "-d:chronicles_log_level=DEBUG"
task bridge, "Build Waku v1 - v2 bridge":
buildBinary "wakubridge", "waku/common/", "-d:chronicles_log_level=DEBUG"

7
waku/common/README.md Normal file
View File

@ -0,0 +1,7 @@
# Common
This folder contains (a) modules that use both Waku v1 and Waku v2. and (b) utilities that are useful for both Waku v1 and v2.
Examples include:
- Bridge between v1 and v2
- NAT traversal

View File

@ -0,0 +1,170 @@
import
confutils, confutils/defs, confutils/std/net, chronicles, chronos,
libp2p/crypto/[crypto, secp],
eth/keys
type
FleetV1* = enum
none
prod
staging
test
WakuNodeConf* = object
logLevel* {.
desc: "Sets the log level"
defaultValue: LogLevel.INFO
name: "log-level" .}: LogLevel
listenAddress* {.
defaultValue: defaultListenAddress(config)
desc: "Listening address for the LibP2P traffic"
name: "listen-address"}: ValidIpAddress
libp2pTcpPort* {.
desc: "Libp2p TCP listening port (for Waku v2)"
defaultValue: 9000
name: "libp2p-tcp-port" .}: uint16
devp2pTcpPort* {.
desc: "Devp2p TCP listening port (for Waku v1)"
defaultValue: 30303
name: "devp2p-tcp-port" .}: uint16
udpPort* {.
desc: "UDP listening port"
defaultValue: 9000
name: "udp-port" .}: uint16
portsShift* {.
desc: "Add a shift to all default port numbers"
defaultValue: 0
name: "ports-shift" .}: uint16
nat* {.
desc: "Specify method to use for determining public address. " &
"Must be one of: any, none, upnp, pmp, extip:<IP>"
defaultValue: "any" .}: string
rpc* {.
desc: "Enable Waku RPC server"
defaultValue: false
name: "rpc" .}: bool
rpcAddress* {.
desc: "Listening address of the RPC server",
defaultValue: ValidIpAddress.init("127.0.0.1")
name: "rpc-address" }: ValidIpAddress
rpcPort* {.
desc: "Listening port of the RPC server"
defaultValue: 8545
name: "rpc-port" .}: uint16
metricsServer* {.
desc: "Enable the metrics server"
defaultValue: false
name: "metrics-server" .}: bool
metricsServerAddress* {.
desc: "Listening address of the metrics server"
defaultValue: ValidIpAddress.init("127.0.0.1")
name: "metrics-server-address" }: ValidIpAddress
metricsServerPort* {.
desc: "Listening HTTP port of the metrics server"
defaultValue: 8008
name: "metrics-server-port" .}: uint16
### Waku v1 options
fleetv1* {.
desc: "Select the Waku v1 fleet to connect to"
defaultValue: FleetV1.none
name: "fleetv1" .}: FleetV1
staticnodesv1* {.
desc: "Enode URL to directly connect with. Argument may be repeated"
name: "staticnodev1" .}: seq[string]
nodekeyv1* {.
desc: "DevP2P node private key as hex",
# TODO: can the rng be passed in somehow via Load?
defaultValue: keys.KeyPair.random(keys.newRng()[])
name: "nodekeyv1" .}: keys.KeyPair
wakuPow* {.
desc: "PoW requirement of Waku node.",
defaultValue: 0.002
name: "waku-pow" .}: float64
### Waku v2 options
staticnodesv2* {.
desc: "Multiaddr of peer to directly connect with. Argument may be repeated"
name: "staticnodev2" }: seq[string]
nodekeyv2* {.
desc: "P2P node private key as hex"
defaultValue: crypto.PrivateKey.random(Secp256k1, keys.newRng()[]).tryGet()
name: "nodekeyv2" }: crypto.PrivateKey
topics* {.
desc: "Default topics to subscribe to (space separated list)"
defaultValue: "/waku/2/default-waku/proto"
name: "topics" .}: string
store* {.
desc: "Flag whether to start store protocol",
defaultValue: false
name: "store" }: bool
filter* {.
desc: "Flag whether to start filter protocol",
defaultValue: false
name: "filter" }: bool
relay* {.
desc: "Flag whether to start relay protocol",
defaultValue: true
name: "relay" }: bool
storenode* {.
desc: "Multiaddr of peer to connect with for waku store protocol"
defaultValue: ""
name: "storenode" }: string
filternode* {.
desc: "Multiaddr of peer to connect with for waku filter protocol"
defaultValue: ""
name: "filternode" }: string
proc parseCmdArg*(T: type keys.KeyPair, p: TaintedString): T =
try:
let privkey = keys.PrivateKey.fromHex(string(p)).tryGet()
result = privkey.toKeyPair()
except CatchableError:
raise newException(ConfigurationError, "Invalid private key")
proc completeCmdArg*(T: type keys.KeyPair, val: TaintedString): seq[string] =
return @[]
proc parseCmdArg*(T: type crypto.PrivateKey, p: TaintedString): T =
let key = SkPrivateKey.init(p)
if key.isOk():
crypto.PrivateKey(scheme: Secp256k1, skkey: key.get())
else:
raise newException(ConfigurationError, "Invalid private key")
proc completeCmdArg*(T: type crypto.PrivateKey, val: TaintedString): seq[string] =
return @[]
proc parseCmdArg*(T: type ValidIpAddress, p: TaintedString): T =
try:
result = ValidIpAddress.init(p)
except CatchableError:
raise newException(ConfigurationError, "Invalid IP address")
proc completeCmdArg*(T: type ValidIpAddress, val: TaintedString): seq[string] =
return @[]
func defaultListenAddress*(conf: WakuNodeConf): ValidIpAddress =
(static ValidIpAddress.init("0.0.0.0"))

46
waku/common/utils/nat.nim Normal file
View File

@ -0,0 +1,46 @@
import
std/[strutils, options],
chronicles, stew/shims/net as stewNet,
eth/net/nat
proc setupNat*(natConf, clientId: string, tcpPort, udpPort: Port):
tuple[ip: Option[ValidIpAddress], tcpPort: Option[Port],
udpPort: Option[Port]] {.gcsafe.} =
var nat: NatStrategy
case natConf.toLowerAscii:
of "any":
nat = NatAny
of "none":
nat = NatNone
of "upnp":
nat = NatUpnp
of "pmp":
nat = NatPmp
else:
if natConf.startsWith("extip:"):
try:
# any required port redirection is assumed to be done by hand
result.ip = some(ValidIpAddress.init(natConf[6..^1]))
nat = NatNone
except ValueError:
error "nor a valid IP address", address = natConf[6..^1]
quit QuitFailure
else:
error "not a valid NAT mechanism", value = natConf
quit QuitFailure
if nat != NatNone:
let extIp = getExternalIP(nat)
if extIP.isSome:
result.ip = some(ValidIpAddress.init extIp.get)
# TODO redirectPorts in considered a gcsafety violation
# because it obtains the address of a non-gcsafe proc?
let extPorts = ({.gcsafe.}:
redirectPorts(tcpPort = tcpPort,
udpPort = udpPort,
description = clientId))
if extPorts.isSome:
let (extTcpPort, extUdpPort) = extPorts.get()
result.tcpPort = some(extTcpPort)
result.udpPort = some(extUdpPort)

135
waku/common/wakubridge.nim Normal file
View File

@ -0,0 +1,135 @@
import
std/strutils,
chronos, confutils, chronicles, chronicles/topics_registry, metrics,
stew/shims/net as stewNet, json_rpc/rpcserver,
# Waku v1 imports
eth/[keys, p2p], eth/common/utils,
eth/p2p/[enode, whispernodes],
../v1/protocol/waku_protocol,
./utils/nat,
../v1/node/rpc/wakusim,
../v1/node/waku_helpers,
# Waku v2 imports
libp2p/crypto/crypto,
../v2/node/wakunode2,
../v2/node/rpc/wakurpc,
# Common cli config
./config_bridge
const clientIdV1 = "nim-waku v1 node"
proc startWakuV1(config: WakuNodeConf, rng: ref BrHmacDrbgContext):
EthereumNode =
let
(ipExt, _, _) = setupNat(config.nat, clientIdV1,
Port(config.devp2pTcpPort + config.portsShift),
Port(config.udpPort + config.portsShift))
# TODO: EthereumNode should have a better split of binding address and
# external address. Also, can't have different ports as it stands now.
address = if ipExt.isNone():
Address(ip: parseIpAddress("0.0.0.0"),
tcpPort: Port(config.devp2pTcpPort + config.portsShift),
udpPort: Port(config.udpPort + config.portsShift))
else:
Address(ip: ipExt.get(),
tcpPort: Port(config.devp2pTcpPort + config.portsShift),
udpPort: Port(config.udpPort + config.portsShift))
# Set-up node
var node = newEthereumNode(config.nodekeyv1, address, 1, nil, clientIdV1,
addAllCapabilities = false, rng = rng)
node.addCapability Waku # Always enable Waku protocol
# Set up the Waku configuration.
# This node is being set up as a bridge so it gets configured as a node with
# a full bloom filter so that it will receive and forward all messages.
# TODO: What is the PoW setting now?
let wakuConfig = WakuConfig(powRequirement: config.wakuPow,
bloom: some(fullBloom()), isLightNode: false,
maxMsgSize: waku_protocol.defaultMaxMsgSize,
topics: none(seq[waku_protocol.Topic]))
node.configureWaku(wakuConfig)
# Optionally direct connect with a set of nodes
if config.staticnodesv1.len > 0: connectToNodes(node, config.staticnodesv1)
elif config.fleetv1 == prod: connectToNodes(node, WhisperNodes)
elif config.fleetv1 == staging: connectToNodes(node, WhisperNodesStaging)
elif config.fleetv1 == test: connectToNodes(node, WhisperNodesTest)
let connectedFut = node.connectToNetwork(@[],
true, # Always enable listening
false # Disable discovery (only discovery v4 is currently supported)
)
connectedFut.callback = proc(data: pointer) {.gcsafe.} =
{.gcsafe.}:
if connectedFut.failed:
fatal "connectToNetwork failed", msg = connectedFut.readError.msg
quit(1)
return node
proc startWakuV2(config: WakuNodeConf): Future[WakuNode] {.async.} =
let
(extIp, extTcpPort, _) = setupNat(config.nat, clientId,
Port(uint16(config.libp2pTcpPort) + config.portsShift),
Port(uint16(config.udpPort) + config.portsShift))
node = WakuNode.init(config.nodeKeyv2, config.listenAddress,
Port(uint16(config.libp2pTcpPort) + config.portsShift), extIp, extTcpPort)
await node.start()
if config.store:
mountStore(node)
if config.filter:
mountFilter(node)
if config.relay:
waitFor mountRelay(node, config.topics.split(" "))
if config.staticnodesv2.len > 0:
waitFor connectToNodes(node, config.staticnodesv2)
if config.storenode != "":
setStorePeer(node, config.storenode)
if config.filternode != "":
setFilterPeer(node, config.filternode)
return node
when isMainModule:
let
rng = keys.newRng()
let conf = WakuNodeConf.load()
if conf.logLevel != LogLevel.NONE:
setLogLevel(conf.logLevel)
let
nodev1 = startWakuV1(conf, rng)
nodev2 = waitFor startWakuV2(conf)
if conf.rpc:
let ta = initTAddress(conf.rpcAddress,
Port(conf.rpcPort + conf.portsShift))
var rpcServer = newRpcHttpServer([ta])
# Waku v1 RPC
# TODO: Commented out the Waku v1 RPC calls as there is a conflict because
# of exact same named rpc calls between v1 and v2
# let keys = newKeyStorage()
# setupWakuRPC(nodev1, keys, rpcServer, rng)
setupWakuSimRPC(nodev1, rpcServer)
# Waku v2 rpc
setupWakuRPC(nodev2, rpcServer)
rpcServer.start()
when defined(insecure):
if conf.metricsServer:
let
address = conf.metricsServerAddress
port = conf.metricsServerPort + conf.portsShift
info "Starting metrics HTTP server", address, port
metrics.startHttpServer($address, Port(port))
runForever()

156
waku/v1/README.md Normal file
View File

@ -0,0 +1,156 @@
# Waku v1
This folder contains code related to Waku v1, both as a node and as a protocol.
## Introduction
This is a Nim implementation of the Nim implementation of the [Waku v1 protocol](https://specs.vac.dev/waku/waku.html) and a cli application `wakunode` that allows you to run a Waku enabled node from command line.
For supported specification details see [here](#spec-support).
Additionally the original Whisper (EIP-627) protocol can also be enabled as can
an experimental Whisper - Waku bridging option.
The underlying transport protocol is [rlpx + devp2p](https://github.com/ethereum/devp2p/blob/master/rlpx.md) and the [nim-eth](https://github.com/status-im/nim-eth) implementation is used.
## How to Build & Run
All of the below commands should be executed at the root level, i.e. `cd ../..`.
### Prerequisites
* GNU Make, Bash and the usual POSIX utilities. Git 2.9.4 or newer.
* PCRE
More information on the installation of these can be found [here](https://github.com/status-im/nimbus#prerequisites).
### Wakunode
```bash
# The first `make` invocation will update all Git submodules.
# You'll run `make update` after each `git pull`, in the future, to keep those submodules up to date.
make wakunode1
# See available command line options
./build/wakunode --help
# Connect the client directly with the Status test fleet
./build/wakunode --log-level:debug --discovery:off --fleet:test --log-metrics
```
### Waku v1 Protocol Test Suite
```bash
# Run all the Waku v1 tests
make test1
```
You can also run a specific test (and alter compile options as you want):
```bash
# Get a shell with the right environment variables set
./env.sh bash
# Run a specific test
nim c -r ./tests/v1/test_waku_connect.nim
```
### Waku v1 Protocol Example
There is a more basic example, more limited in features and configuration than
the `wakunode`, located in `examples/v1/example.nim`.
More information on how to run this example can be found it its
[readme](../../examples/v1/README.md).
### Waku Quick Simulation
One can set up several nodes, get them connected and then instruct them via the
JSON-RPC interface. This can be done via e.g. web3.js, nim-web3 (needs to be
updated) or simply curl your way out.
The JSON-RPC interface is currently the same as the one of Whisper. The only
difference is the addition of broadcasting the topics interest when a filter
with a certain set of topics is subcribed.
The quick simulation uses this approach, `start_network` launches a set of
`wakunode`s, and `quicksim` instructs the nodes through RPC calls.
Example of how to build and run:
```bash
# Build wakunode + quicksim with metrics enabled
make NIMFLAGS="-d:insecure" sim1
# Start the simulation nodes, this currently requires multitail to be installed
./build/start_network --topology:FullMesh --amount:6 --test-node-peers:2
# In another shell run
./build/quicksim
```
The `start_network` tool will also provide a `prometheus.yml` with targets
set to all simulation nodes that are started. This way you can easily start
prometheus with this config, e.g.:
```bash
cd ./metrics/prometheus
prometheus
```
A Grafana dashboard containing the example dashboard for each simulation node
is also generated and can be imported in case you have Grafana running.
This dashboard can be found at `./metrics/waku-sim-all-nodes-grafana-dashboard.json`
To read more details about metrics, see [next](#using-metrics) section.
## Using Metrics
Metrics are available for valid envelopes and dropped envelopes.
To compile in an HTTP endpoint for accessing the metrics we need to provide the
`insecure` flag:
```bash
make NIMFLAGS="-d:insecure" wakunode1
./build/wakunode --metrics-server
```
Ensure your Prometheus config `prometheus.yml` contains the targets you care about, e.g.:
```
scrape_configs:
- job_name: "waku"
static_configs:
- targets: ['localhost:8008', 'localhost:8009', 'localhost:8010']
```
For visualisation, similar steps can be used as is written down for Nimbus
[here](https://github.com/status-im/nimbus#metric-visualisation).
There is a similar example dashboard that includes visualisation of the
envelopes available at `metrics/waku-grafana-dashboard.json`.
## Spec support
*This section last updated April 21, 2020*
This client of Waku is spec compliant with [Waku spec v1.0.0](https://specs.vac.dev/waku/waku.html).
It doesn't yet implement the following recommended features:
- No support for rate limiting
- No support for DNS discovery to find Waku nodes
- It doesn't disconnect a peer if it receives a message before a Status message
- No support for negotiation with peer supporting multiple versions via Devp2p capabilities in `Hello` packet
Additionally it makes the following choices:
- It doesn't send message confirmations
- It has partial support for accounting:
- Accounting of total resource usage and total circulated envelopes is done through metrics But no accounting is done for individual peers.
## Docker Image
You can create a Docker image using:
```bash
make docker-image
docker run --rm -it statusteam/nim-waku:latest --help
```
The target will be a docker image with `wakunode`, which is the Waku v1 node.

3
waku/v1/node/README.md Normal file
View File

@ -0,0 +1,3 @@
# Waku Node v1
This folder contains code related to running a `wakunode` process. The main entrypoint is the `wakunode` file.

169
waku/v1/node/config.nim Normal file
View File

@ -0,0 +1,169 @@
import
confutils/defs, chronicles, chronos, eth/keys
type
Fleet* = enum
none
prod
staging
test
WakuNodeCmd* = enum
noCommand
genNodekey
WakuNodeConf* = object
logLevel* {.
desc: "Sets the log level."
defaultValue: LogLevel.INFO
name: "log-level" .}: LogLevel
case cmd* {.
command
defaultValue: noCommand .}: WakuNodeCmd
of noCommand:
tcpPort* {.
desc: "TCP listening port."
defaultValue: 30303
name: "tcp-port" .}: uint16
udpPort* {.
desc: "UDP listening port."
defaultValue: 30303
name: "udp-port" .}: uint16
portsShift* {.
desc: "Add a shift to all port numbers."
defaultValue: 0
name: "ports-shift" .}: uint16
nat* {.
desc: "Specify method to use for determining public address. " &
"Must be one of: any, none, upnp, pmp, extip:<IP>."
defaultValue: "any" .}: string
discovery* {.
desc: "Enable/disable discovery v4."
defaultValue: true
name: "discovery" .}: bool
noListen* {.
desc: "Disable listening for incoming peers."
defaultValue: false
name: "no-listen" .}: bool
fleet* {.
desc: "Select the fleet to connect to."
defaultValue: Fleet.none
name: "fleet" .}: Fleet
bootnodes* {.
desc: "Enode URL to bootstrap P2P discovery with. Argument may be repeated."
name: "bootnode" .}: seq[string]
staticnodes* {.
desc: "Enode URL to directly connect with. Argument may be repeated."
name: "staticnode" .}: seq[string]
whisper* {.
desc: "Enable the Whisper protocol."
defaultValue: false
name: "whisper" .}: bool
whisperBridge* {.
desc: "Enable the Whisper protocol and bridge with Waku protocol."
defaultValue: false
name: "whisper-bridge" .}: bool
lightNode* {.
desc: "Run as light node (no message relay).",
defaultValue: false
name: "light-node" .}: bool
wakuTopicInterest* {.
desc: "Run as node with a topic-interest",
defaultValue: false
name: "waku-topic-interest" .}: bool
wakuPow* {.
desc: "PoW requirement of Waku node.",
defaultValue: 0.002
name: "waku-pow" .}: float64
nodekey* {.
desc: "P2P node private key as hex.",
# TODO: can the rng be passed in somehow via Load?
defaultValue: KeyPair.random(keys.newRng()[])
name: "nodekey" .}: KeyPair
# TODO: Add nodekey file option
bootnodeOnly* {.
desc: "Run only as discovery bootnode."
defaultValue: false
name: "bootnode-only" .}: bool
rpc* {.
desc: "Enable Waku RPC server.",
defaultValue: false
name: "rpc" .}: bool
rpcAddress* {.
desc: "Listening address of the RPC server.",
defaultValue: parseIpAddress("127.0.0.1")
name: "rpc-address" .}: IpAddress
rpcPort* {.
desc: "Listening port of the RPC server.",
defaultValue: 8545
name: "rpc-port" .}: uint16
metricsServer* {.
desc: "Enable the metrics server."
defaultValue: false
name: "metrics-server" .}: bool
metricsServerAddress* {.
desc: "Listening address of the metrics server."
defaultValue: parseIpAddress("127.0.0.1")
name: "metrics-server-address" .}: IpAddress
metricsServerPort* {.
desc: "Listening HTTP port of the metrics server."
defaultValue: 8008
name: "metrics-server-port" .}: uint16
logMetrics* {.
desc: "Enable metrics logging."
defaultValue: false
name: "log-metrics" .}: bool
logAccounting* {.
desc: "Enable peer accounting logging."
defaultValue: false
name: "log-accounting" .}: bool
# TODO:
# - discv5 + topic register
# - mailserver functionality
of genNodekey:
discard
proc parseCmdArg*(T: type KeyPair, p: TaintedString): T =
try:
let privkey = PrivateKey.fromHex(string(p)).tryGet()
result = privkey.toKeyPair()
except CatchableError:
raise newException(ConfigurationError, "Invalid private key")
proc completeCmdArg*(T: type KeyPair, val: TaintedString): seq[string] =
return @[]
proc parseCmdArg*(T: type IpAddress, p: TaintedString): T =
try:
result = parseIpAddress(p)
except CatchableError:
raise newException(ConfigurationError, "Invalid IP address")
proc completeCmdArg*(T: type IpAddress, val: TaintedString): seq[string] =
return @[]

4
waku/v1/node/nim.cfg Normal file
View File

@ -0,0 +1,4 @@
-d:chronicles_line_numbers
-d:"chronicles_runtime_filtering=on"
-d:nimDebugDlOpen

76
waku/v1/node/quicksim.nim Normal file
View File

@ -0,0 +1,76 @@
import
os, strformat, chronicles, json_rpc/[rpcclient, rpcserver], nimcrypto/sysrand,
eth/common as eth_common, eth/keys,
../protocol/waku_protocol, ./rpc/[hexstrings, rpc_types],
options as what # TODO: Huh? Redefinition?
from os import DirSep
from strutils import rsplit
template sourceDir: string = currentSourcePath.rsplit(DirSep, 1)[0]
const sigWakuPath = &"{sourceDir}{DirSep}rpc{DirSep}wakucallsigs.nim"
createRpcSigs(RpcHttpClient, sigWakuPath)
const topicAmount = 100
let
trafficNode = newRpcHttpClient()
lightNode = newRpcHttpClient()
lightNode2 = newRpcHttpClient()
waitFor lightNode.connect("localhost", Port(8545))
waitFor lightNode2.connect("localhost", Port(8546))
waitFor trafficNode.connect("localhost", Port(8548))
proc generateTopics(amount = topicAmount): seq[waku_protocol.Topic] =
var topic: waku_protocol.Topic
for i in 0..<amount:
if randomBytes(topic) != 4:
raise newException(ValueError, "Generation of random topic failed.")
result.add(topic)
let
symKey = "0x0000000000000000000000000000000000000000000000000000000000000001"
topics = generateTopics()
symKeyID = waitFor lightNode.waku_addSymKey(symKey)
options = WakuFilterOptions(symKeyID: some(symKeyID),
topics: some(topics))
filterID = waitFor lightNode.waku_newMessageFilter(options)
symKeyID2 = waitFor lightNode2.waku_addSymKey(symKey)
options2 = WakuFilterOptions(symKeyID: some(symKeyID2),
topics: some(topics))
filterID2 = waitFor lightNode2.waku_newMessageFilter(options2)
symkeyID3 = waitFor trafficNode.waku_addSymKey(symKey)
var message = WakuPostMessage(symKeyID: some(symkeyID3),
ttl: 30,
topic: some(topics[0]),
payload: "0x45879632".HexDataStr,
powTime: 1.0,
powTarget: 0.002)
info "Posting envelopes on all subscribed topics"
for i in 0..<topicAmount:
message.topic = some(topics[i])
discard waitFor trafficNode.waku_post(message)
# Check if the subscription for the topics works
waitFor sleepAsync(1000.milliseconds) # This is a bit brittle
let
messages = waitFor lightNode.waku_getFilterMessages(filterID)
messages2 = waitFor lightNode2.waku_getFilterMessages(filterID2)
if messages.len != topicAmount or messages2.len != topicAmount:
error "Light node did not receive envelopes on all subscribed topics",
lightnode1=messages.len, lightnode2=messages2.len
quit 1
info "Received envelopes on all subscribed topics"
# Generate test traffic on node
discard waitFor trafficNode.wakusim_generateRandomTraffic(10_000)
info "Started random traffic generation"

View File

@ -0,0 +1,225 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
## This module implements the Ethereum hexadecimal string formats for JSON
## See: https://github.com/ethereum/wiki/wiki/JSON-RPC#hex-value-encoding
#[
Note:
The following types are converted to hex strings when marshalled to JSON:
* Hash256
* UInt256
* seq[byte]
* openArray[seq]
* PublicKey
* PrivateKey
* SymKey
* Topic
* Bytes
]#
import
stint, stew/byteutils, eth/[keys, rlp], eth/common/eth_types,
../../protocol/waku_protocol
type
HexDataStr* = distinct string
Identifier* = distinct string # 32 bytes, no 0x prefix!
HexStrings = HexDataStr | Identifier
# Hex validation
template hasHexHeader(value: string): bool =
if value.len >= 2 and value[0] == '0' and value[1] in {'x', 'X'}: true
else: false
template isHexChar(c: char): bool =
if c notin {'0'..'9'} and
c notin {'a'..'f'} and
c notin {'A'..'F'}: false
else: true
func isValidHexQuantity*(value: string): bool =
if not value.hasHexHeader:
return false
# No leading zeros (but allow 0x0)
if value.len < 3 or (value.len > 3 and value[2] == '0'): return false
for i in 2 ..< value.len:
let c = value[i]
if not c.isHexChar:
return false
return true
func isValidHexData*(value: string, header = true): bool =
if header and not value.hasHexHeader:
return false
# Must be even number of digits
if value.len mod 2 != 0: return false
# Leading zeros are allowed
for i in 2 ..< value.len:
let c = value[i]
if not c.isHexChar:
return false
return true
template isValidHexData(value: string, hexLen: int, header = true): bool =
value.len == hexLen and value.isValidHexData(header)
func isValidIdentifier*(value: string): bool =
# 32 bytes for Whisper ID, no 0x prefix
result = value.isValidHexData(64, false)
func isValidPublicKey*(value: string): bool =
# 65 bytes for Public Key plus 1 byte for 0x prefix
result = value.isValidHexData(132)
func isValidPrivateKey*(value: string): bool =
# 32 bytes for Private Key plus 1 byte for 0x prefix
result = value.isValidHexData(66)
func isValidSymKey*(value: string): bool =
# 32 bytes for Private Key plus 1 byte for 0x prefix
result = value.isValidHexData(66)
func isValidHash256*(value: string): bool =
# 32 bytes for Hash256 plus 1 byte for 0x prefix
result = value.isValidHexData(66)
func isValidTopic*(value: string): bool =
# 4 bytes for Topic plus 1 byte for 0x prefix
result = value.isValidHexData(10)
const
SInvalidData = "Invalid hex data format for Ethereum"
proc validateHexData*(value: string) {.inline.} =
if unlikely(not value.isValidHexData):
raise newException(ValueError, SInvalidData & ": " & value)
# Initialisation
proc hexDataStr*(value: string): HexDataStr {.inline.} =
value.validateHexData
result = value.HexDataStr
# Converters for use in RPC
import json
from json_rpc/rpcserver import expect
proc `%`*(value: HexStrings): JsonNode =
result = %(value.string)
# Overloads to support expected representation of hex data
proc `%`*(value: Hash256): JsonNode =
#result = %("0x" & $value) # More clean but no lowercase :(
result = %("0x" & value.data.toHex)
proc `%`*(value: UInt256): JsonNode =
result = %("0x" & value.toString(16))
proc `%`*(value: PublicKey): JsonNode =
result = %("0x04" & $value)
proc `%`*(value: PrivateKey): JsonNode =
result = %("0x" & $value)
proc `%`*(value: SymKey): JsonNode =
result = %("0x" & value.toHex)
proc `%`*(value: waku_protocol.Topic): JsonNode =
result = %("0x" & value.toHex)
proc `%`*(value: seq[byte]): JsonNode =
result = %("0x" & value.toHex)
# Helpers for the fromJson procs
proc toPublicKey*(key: string): PublicKey {.inline.} =
result = PublicKey.fromHex(key[4 .. ^1]).tryGet()
proc toPrivateKey*(key: string): PrivateKey {.inline.} =
result = PrivateKey.fromHex(key[2 .. ^1]).tryGet()
proc toSymKey*(key: string): SymKey {.inline.} =
hexToByteArray(key[2 .. ^1], result)
proc toTopic*(topic: string): waku_protocol.Topic {.inline.} =
hexToByteArray(topic[2 .. ^1], result)
# Marshalling from JSON to Nim types that includes format checking
func invalidMsg(name: string): string = "When marshalling from JSON, parameter \"" & name & "\" is not valid"
proc fromJson*(n: JsonNode, argName: string, result: var HexDataStr) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not hexStr.isValidHexData:
raise newException(ValueError, invalidMsg(argName) & " as Ethereum data \"" & hexStr & "\"")
result = hexStr.hexDataStr
proc fromJson*(n: JsonNode, argName: string, result: var Identifier) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not hexStr.isValidIdentifier:
raise newException(ValueError, invalidMsg(argName) & " as a identifier \"" & hexStr & "\"")
result = hexStr.Identifier
proc fromJson*(n: JsonNode, argName: string, result: var UInt256) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not (hexStr.len <= 66 and hexStr.isValidHexQuantity):
raise newException(ValueError, invalidMsg(argName) & " as a UInt256 \"" & hexStr & "\"")
result = readUintBE[256](hexToPaddedByteArray[32](hexStr))
proc fromJson*(n: JsonNode, argName: string, result: var PublicKey) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not hexStr.isValidPublicKey:
raise newException(ValueError, invalidMsg(argName) & " as a public key \"" & hexStr & "\"")
result = hexStr.toPublicKey
proc fromJson*(n: JsonNode, argName: string, result: var PrivateKey) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not hexStr.isValidPrivateKey:
raise newException(ValueError, invalidMsg(argName) & " as a private key \"" & hexStr & "\"")
result = hexStr.toPrivateKey
proc fromJson*(n: JsonNode, argName: string, result: var SymKey) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not hexStr.isValidSymKey:
raise newException(ValueError, invalidMsg(argName) & " as a symmetric key \"" & hexStr & "\"")
result = toSymKey(hexStr)
proc fromJson*(n: JsonNode, argName: string, result: var waku_protocol.Topic) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not hexStr.isValidTopic:
raise newException(ValueError, invalidMsg(argName) & " as a topic \"" & hexStr & "\"")
result = toTopic(hexStr)
# Following procs currently required only for testing, the `createRpcSigs` macro
# requires it as it will convert the JSON results back to the original Nim
# types, but it needs the `fromJson` calls for those specific Nim types to do so
proc fromJson*(n: JsonNode, argName: string, result: var seq[byte]) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not hexStr.isValidHexData:
raise newException(ValueError, invalidMsg(argName) & " as a hex data \"" & hexStr & "\"")
result = hexToSeqByte(hexStr)
proc fromJson*(n: JsonNode, argName: string, result: var Hash256) =
n.kind.expect(JString, argName)
let hexStr = n.getStr()
if not hexStr.isValidHash256:
raise newException(ValueError, invalidMsg(argName) & " as a Hash256 \"" & hexStr & "\"")
hexToByteArray(hexStr, result.data)

View File

@ -0,0 +1,22 @@
#
# Nimbus
# (c) Copyright 2019
# Status Research & Development GmbH
#
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
import tables, eth/keys, eth/p2p/rlpx_protocols/whisper/whisper_types
type
KeyStorage* = ref object
asymKeys*: Table[string, KeyPair]
symKeys*: Table[string, SymKey]
KeyGenerationError* = object of CatchableError
proc newKeyStorage*(): KeyStorage =
new(result)
result.asymKeys = initTable[string, KeyPair]()
result.symKeys = initTable[string, SymKey]()

View File

@ -0,0 +1,58 @@
import
hexstrings, options, eth/[keys, rlp],
../../protocol/waku_protocol
#[
Notes:
* Some of the types suppose 'null' when there is no appropriate value.
To allow for this, you can use Option[T] or use refs so the JSON transform can convert to `JNull`.
* Parameter objects from users must have their data verified so will use EthAddressStr instead of EthAddres, for example
* Objects returned to the user can use native Waku types, where hexstrings provides converters to hex strings.
This is because returned arrays in JSON is
a) not an efficient use of space
b) not the format the user expects (for example addresses are expected to be hex strings prefixed by "0x")
]#
type
WakuInfo* = object
# Returned to user
minPow*: float64 # Current minimum PoW requirement.
# TODO: may be uint32
maxMessageSize*: uint64 # Current message size limit in bytes.
memory*: int # Memory size of the floating messages in bytes.
messages*: int # Number of floating messages.
WakuFilterOptions* = object
# Parameter from user
symKeyID*: Option[Identifier] # ID of symmetric key for message decryption.
privateKeyID*: Option[Identifier] # ID of private (asymmetric) key for message decryption.
sig*: Option[PublicKey] # (Optional) Public key of the signature.
minPow*: Option[float64] # (Optional) Minimal PoW requirement for incoming messages.
topics*: Option[seq[waku_protocol.Topic]] # (Optional when asym key): Array of possible topics (or partial topics).
allowP2P*: Option[bool] # (Optional) Indicates if this filter allows processing of direct peer-to-peer messages.
WakuFilterMessage* = object
# Returned to user
sig*: Option[PublicKey] # Public key who signed this message.
recipientPublicKey*: Option[PublicKey] # The recipients public key.
ttl*: uint64 # Time-to-live in seconds.
timestamp*: uint64 # Unix timestamp of the message generation.
topic*: waku_protocol.Topic # 4 Bytes: Message topic.
payload*: seq[byte] # Decrypted payload.
padding*: seq[byte] # (Optional) Padding (byte array of arbitrary length).
pow*: float64 # Proof of work value.
hash*: Hash # Hash of the enveloped message.
WakuPostMessage* = object
# Parameter from user
symKeyID*: Option[Identifier] # ID of symmetric key for message encryption.
pubKey*: Option[PublicKey] # Public key for message encryption.
sig*: Option[Identifier] # (Optional) ID of the signing key.
ttl*: uint64 # Time-to-live in seconds.
topic*: Option[waku_protocol.Topic] # Message topic (mandatory when key is symmetric).
payload*: HexDataStr # Payload to be encrypted.
padding*: Option[HexDataStr] # (Optional) Padding (byte array of arbitrary length).
powTime*: float64 # Maximal time in seconds to be spent on proof of work.
powTarget*: float64 # Minimal PoW target required for this message.
# TODO: EnodeStr
targetPeer*: Option[string] # (Optional) Peer ID (for peer-to-peer message only).

365
waku/v1/node/rpc/waku.nim Normal file
View File

@ -0,0 +1,365 @@
import
json_rpc/rpcserver, tables, options, sequtils,
eth/[common, rlp, keys, p2p],
nimcrypto/[sysrand, hmac, sha2, pbkdf2],
rpc_types, hexstrings, key_storage,
../../protocol/waku_protocol
from stew/byteutils import hexToSeqByte, hexToByteArray
# Blatant copy of Whisper RPC but for the Waku protocol
proc setupWakuRPC*(node: EthereumNode, keys: KeyStorage, rpcsrv: RpcServer,
rng: ref BrHmacDrbgContext) =
rpcsrv.rpc("waku_version") do() -> string:
## Returns string of the current Waku protocol version.
result = wakuVersionStr
rpcsrv.rpc("waku_info") do() -> WakuInfo:
## Returns diagnostic information about the Waku node.
let config = node.protocolState(Waku).config
result = WakuInfo(minPow: config.powRequirement,
maxMessageSize: config.maxMsgSize,
memory: 0,
messages: 0)
# TODO: uint32 instead of uint64 is OK here, but needs to be added in json_rpc
rpcsrv.rpc("waku_setMaxMessageSize") do(size: uint64) -> bool:
## Sets the maximal message size allowed by this node.
## Incoming and outgoing messages with a larger size will be rejected.
## Waku message size can never exceed the limit imposed by the underlying
## P2P protocol (10 Mb).
##
## size: Message size in bytes.
##
## Returns true on success and an error on failure.
result = node.setMaxMessageSize(size.uint32)
if not result:
raise newException(ValueError, "Invalid size")
rpcsrv.rpc("waku_setMinPoW") do(pow: float) -> bool:
## Sets the minimal PoW required by this node.
##
## pow: The new PoW requirement.
##
## Returns true on success and an error on failure.
# Note: `setPowRequirement` does not raise on failures of sending the update
# to the peers. Hence in theory this should not causes errors.
await node.setPowRequirement(pow)
result = true
# TODO: change string in to ENodeStr with extra checks
rpcsrv.rpc("waku_markTrustedPeer") do(enode: string) -> bool:
## Marks specific peer trusted, which will allow it to send historic
## (expired) messages.
## Note: This function is not adding new nodes, the node needs to exists as
## a peer.
##
## enode: Enode of the trusted peer.
##
## Returns true on success and an error on failure.
# TODO: It will now require an enode://pubkey@ip:port uri
# could also accept only the pubkey (like geth)?
let peerNode = newNode(enode)
result = node.setPeerTrusted(peerNode.id)
if not result:
raise newException(ValueError, "Not a peer")
rpcsrv.rpc("waku_newKeyPair") do() -> Identifier:
## Generates a new public and private key pair for message decryption and
## encryption.
##
## Returns key identifier on success and an error on failure.
result = generateRandomID(rng[]).Identifier
keys.asymKeys.add(result.string, KeyPair.random(rng[]))
rpcsrv.rpc("waku_addPrivateKey") do(key: PrivateKey) -> Identifier:
## Stores the key pair, and returns its ID.
##
## key: Private key as hex bytes.
##
## Returns key identifier on success and an error on failure.
result = generateRandomID(rng[]).Identifier
keys.asymKeys.add(result.string, key.toKeyPair())
rpcsrv.rpc("waku_deleteKeyPair") do(id: Identifier) -> bool:
## Deletes the specifies key if it exists.
##
## id: Identifier of key pair
##
## Returns true on success and an error on failure.
var unneeded: KeyPair
result = keys.asymKeys.take(id.string, unneeded)
if not result:
raise newException(ValueError, "Invalid key id")
rpcsrv.rpc("waku_hasKeyPair") do(id: Identifier) -> bool:
## Checks if the Waku node has a private key of a key pair matching the
## given ID.
##
## id: Identifier of key pair
##
## Returns (true or false) on success and an error on failure.
result = keys.asymkeys.hasKey(id.string)
rpcsrv.rpc("waku_getPublicKey") do(id: Identifier) -> PublicKey:
## Returns the public key for identity ID.
##
## id: Identifier of key pair
##
## Returns public key on success and an error on failure.
# Note: key not found exception as error in case not existing
result = keys.asymkeys[id.string].pubkey
rpcsrv.rpc("waku_getPrivateKey") do(id: Identifier) -> PrivateKey:
## Returns the private key for identity ID.
##
## id: Identifier of key pair
##
## Returns private key on success and an error on failure.
# Note: key not found exception as error in case not existing
result = keys.asymkeys[id.string].seckey
rpcsrv.rpc("waku_newSymKey") do() -> Identifier:
## Generates a random symmetric key and stores it under an ID, which is then
## returned. Can be used encrypting and decrypting messages where the key is
## known to both parties.
##
## Returns key identifier on success and an error on failure.
result = generateRandomID(rng[]).Identifier
var key: SymKey
if randomBytes(key) != key.len:
raise newException(KeyGenerationError, "Failed generating key")
keys.symKeys.add(result.string, key)
rpcsrv.rpc("waku_addSymKey") do(key: SymKey) -> Identifier:
## Stores the key, and returns its ID.
##
## key: The raw key for symmetric encryption as hex bytes.
##
## Returns key identifier on success and an error on failure.
result = generateRandomID(rng[]).Identifier
keys.symKeys.add(result.string, key)
rpcsrv.rpc("waku_generateSymKeyFromPassword") do(password: string) -> Identifier:
## Generates the key from password, stores it, and returns its ID.
##
## password: Password.
##
## Returns key identifier on success and an error on failure.
## Warning: an empty string is used as salt because the shh RPC API does not
## allow for passing a salt. A very good password is necessary (calculate
## yourself what that means :))
var ctx: HMAC[sha256]
var symKey: SymKey
if pbkdf2(ctx, password, "", 65356, symKey) != sizeof(SymKey):
raise newException(KeyGenerationError, "Failed generating key")
result = generateRandomID(rng[]).Identifier
keys.symKeys.add(result.string, symKey)
rpcsrv.rpc("waku_hasSymKey") do(id: Identifier) -> bool:
## Returns true if there is a key associated with the name string.
## Otherwise, returns false.
##
## id: Identifier of key.
##
## Returns (true or false) on success and an error on failure.
result = keys.symkeys.hasKey(id.string)
rpcsrv.rpc("waku_getSymKey") do(id: Identifier) -> SymKey:
## Returns the symmetric key associated with the given ID.
##
## id: Identifier of key.
##
## Returns Raw key on success and an error on failure.
# Note: key not found exception as error in case not existing
result = keys.symkeys[id.string]
rpcsrv.rpc("waku_deleteSymKey") do(id: Identifier) -> bool:
## Deletes the key associated with the name string if it exists.
##
## id: Identifier of key.
##
## Returns (true or false) on success and an error on failure.
var unneeded: SymKey
result = keys.symKeys.take(id.string, unneeded)
if not result:
raise newException(ValueError, "Invalid key id")
rpcsrv.rpc("waku_subscribe") do(id: string,
options: WakuFilterOptions) -> Identifier:
## Creates and registers a new subscription to receive notifications for
## inbound Waku messages. Returns the ID of the newly created
## subscription.
##
## id: identifier of function call. In case of Waku must contain the
## value "messages".
## options: WakuFilterOptions
##
## Returns the subscription ID on success, the error on failure.
# TODO: implement subscriptions, only for WS & IPC?
discard
rpcsrv.rpc("waku_unsubscribe") do(id: Identifier) -> bool:
## Cancels and removes an existing subscription.
##
## id: Subscription identifier
##
## Returns true on success, the error on failure
result = node.unsubscribeFilter(id.string)
if not result:
raise newException(ValueError, "Invalid filter id")
proc validateOptions[T,U,V](asym: Option[T], sym: Option[U], topic: Option[V]) =
if (asym.isSome() and sym.isSome()) or (asym.isNone() and sym.isNone()):
raise newException(ValueError,
"Either privateKeyID/pubKey or symKeyID must be present")
if asym.isNone() and topic.isNone():
raise newException(ValueError, "Topic mandatory with symmetric key")
rpcsrv.rpc("waku_newMessageFilter") do(options: WakuFilterOptions) -> Identifier:
## Create a new filter within the node. This filter can be used to poll for
## new messages that match the set of criteria.
##
## options: WakuFilterOptions
##
## Returns filter identifier on success, error on failure
# Check if either symKeyID or privateKeyID is present, and not both
# Check if there are Topics when symmetric key is used
validateOptions(options.privateKeyID, options.symKeyID, options.topics)
var
src: Option[PublicKey]
privateKey: Option[PrivateKey]
symKey: Option[SymKey]
topics: seq[waku_protocol.Topic]
powReq: float64
allowP2P: bool
src = options.sig
if options.privateKeyID.isSome():
privateKey = some(keys.asymKeys[options.privateKeyID.get().string].seckey)
if options.symKeyID.isSome():
symKey= some(keys.symKeys[options.symKeyID.get().string])
if options.minPow.isSome():
powReq = options.minPow.get()
if options.topics.isSome():
topics = options.topics.get()
if options.allowP2P.isSome():
allowP2P = options.allowP2P.get()
let filter = initFilter(src, privateKey, symKey, topics, powReq, allowP2P)
result = node.subscribeFilter(filter).Identifier
# TODO: Should we do this here "automatically" or separate it in another
# RPC call? Is there a use case for that?
# Same could be said about bloomfilter, except that there is a use case
# there to have a full node no matter what message filters.
# Could also be moved to waku_protocol.nim
let config = node.protocolState(Waku).config
if config.topics.isSome():
try:
# TODO: an addTopics call would probably be more useful
let result = await node.setTopicInterest(config.topics.get().concat(filter.topics))
if not result:
raise newException(ValueError, "Too many topics")
except CatchableError:
trace "setTopics error occured"
elif config.isLightNode:
try:
await node.setBloomFilter(node.filtersToBloom())
except CatchableError:
trace "setBloomFilter error occured"
rpcsrv.rpc("waku_deleteMessageFilter") do(id: Identifier) -> bool:
## Uninstall a message filter in the node.
##
## id: Filter identifier as returned when the filter was created.
##
## Returns true on success, error on failure.
result = node.unsubscribeFilter(id.string)
if not result:
raise newException(ValueError, "Invalid filter id")
rpcsrv.rpc("waku_getFilterMessages") do(id: Identifier) -> seq[WakuFilterMessage]:
## Retrieve messages that match the filter criteria and are received between
## the last time this function was called and now.
##
## id: ID of filter that was created with `waku_newMessageFilter`.
##
## Returns array of messages on success and an error on failure.
let messages = node.getFilterMessages(id.string)
for msg in messages:
result.add WakuFilterMessage(
sig: msg.decoded.src,
recipientPublicKey: msg.dst,
ttl: msg.ttl,
topic: msg.topic,
timestamp: msg.timestamp,
payload: msg.decoded.payload,
# Note: waku_protocol padding is an Option as there is the
# possibility of 0 padding in case of custom padding.
padding: msg.decoded.padding.get(@[]),
pow: msg.pow,
hash: msg.hash)
rpcsrv.rpc("waku_post") do(message: WakuPostMessage) -> bool:
## Creates a Waku message and injects it into the network for
## distribution.
##
## message: Waku message to post.
##
## Returns true on success and an error on failure.
# Check if either symKeyID or pubKey is present, and not both
# Check if there is a Topic when symmetric key is used
validateOptions(message.pubKey, message.symKeyID, message.topic)
var
sigPrivKey: Option[PrivateKey]
symKey: Option[SymKey]
topic: waku_protocol.Topic
padding: Option[seq[byte]]
targetPeer: Option[NodeId]
if message.sig.isSome():
sigPrivKey = some(keys.asymKeys[message.sig.get().string].seckey)
if message.symKeyID.isSome():
symKey = some(keys.symKeys[message.symKeyID.get().string])
# Note: If no topic it will be defaulted to 0x00000000
if message.topic.isSome():
topic = message.topic.get()
if message.padding.isSome():
padding = some(hexToSeqByte(message.padding.get().string))
if message.targetPeer.isSome():
targetPeer = some(newNode(message.targetPeer.get()).id)
result = node.postMessage(message.pubKey,
symKey,
sigPrivKey,
ttl = message.ttl.uint32,
topic = topic,
payload = hexToSeqByte(message.payload.string),
padding = padding,
powTime = message.powTime,
powTarget = message.powTarget,
targetPeer = targetPeer)
if not result:
raise newException(ValueError, "Message could not be posted")

View File

@ -0,0 +1,27 @@
proc waku_version(): string
proc waku_info(): WakuInfo
proc waku_setMaxMessageSize(size: uint64): bool
proc waku_setMinPoW(pow: float): bool
proc waku_markTrustedPeer(enode: string): bool
proc waku_newKeyPair(): Identifier
proc waku_addPrivateKey(key: string): Identifier
proc waku_deleteKeyPair(id: Identifier): bool
proc waku_hasKeyPair(id: Identifier): bool
proc waku_getPublicKey(id: Identifier): PublicKey
proc waku_getPrivateKey(id: Identifier): PrivateKey
proc waku_newSymKey(): Identifier
proc waku_addSymKey(key: string): Identifier
proc waku_generateSymKeyFromPassword(password: string): Identifier
proc waku_hasSymKey(id: Identifier): bool
proc waku_getSymKey(id: Identifier): SymKey
proc waku_deleteSymKey(id: Identifier): bool
proc waku_newMessageFilter(options: WakuFilterOptions): Identifier
proc waku_deleteMessageFilter(id: Identifier): bool
proc waku_getFilterMessages(id: Identifier): seq[WakuFilterMessage]
proc waku_post(message: WakuPostMessage): bool
proc wakusim_generateTraffic(amount: int): bool
proc wakusim_generateRandomTraffic(amount: int): bool

View File

@ -0,0 +1,31 @@
import
json_rpc/rpcserver, stew/endians2, nimcrypto/sysrand,
eth/[p2p, async_utils],
../../protocol/waku_protocol
proc generateTraffic(node: EthereumNode, amount = 100) {.async.} =
let payload = @[byte 0]
for i in 0..<amount:
discard waku_protocol.postMessage(node, ttl = 10,
topic = toBytesLE(i.uint32), payload = payload)
await sleepAsync(1.milliseconds)
proc generateRandomTraffic(node: EthereumNode, amount = 100) {.async.} =
var topic: array[4, byte]
let payload = @[byte 0]
for i in 0..<amount:
while randomBytes(topic) != 4:
discard
discard waku_protocol.postMessage(node, ttl = 10, topic = topic,
payload = payload)
await sleepAsync(1.milliseconds)
proc setupWakuSimRPC*(node: EthereumNode, rpcsrv: RpcServer) =
rpcsrv.rpc("wakusim_generateTraffic") do(amount: int) -> bool:
traceAsyncErrors node.generateTraffic(amount)
return true
rpcsrv.rpc("wakusim_generateRandomTraffic") do(amount: int) -> bool:
traceAsyncErrors node.generateRandomTraffic(amount)
return true

View File

@ -0,0 +1,198 @@
import
options, strformat, os, osproc, net, confutils, strformat, chronicles, json, strutils,
eth/keys, eth/p2p/enode
const
defaults ="--log-level:DEBUG --log-metrics --metrics-server --rpc"
wakuNodeBin = "build" / "wakunode"
metricsDir = "metrics"
portOffset = 2
type
NodeType = enum
FullNode = "",
LightNode = "--light-node:on",
Topology = enum
Star,
FullMesh,
DiscoveryBased # Whatever topology the discovery brings
WakuNetworkConf* = object
topology* {.
desc: "Set the network topology."
defaultValue: Star
name: "topology" .}: Topology
amount* {.
desc: "Amount of full nodes to be started."
defaultValue: 4
name: "amount" .}: int
testNodePeers* {.
desc: "Amount of peers a test node should connect to."
defaultValue: 1
name: "test-node-peers" .}: int
NodeInfo* = object
cmd: string
master: bool
enode: string
shift: int
label: string
proc initNodeCmd(nodeType: NodeType, shift: int, staticNodes: seq[string] = @[],
discovery = false, bootNodes: seq[string] = @[], topicInterest = false,
master = false, label: string): NodeInfo =
let
rng = keys.newRng()
keypair = KeyPair.random(rng[])
address = Address(ip: parseIpAddress("127.0.0.1"),
udpPort: (30303 + shift).Port, tcpPort: (30303 + shift).Port)
enode = ENode(pubkey: keypair.pubkey, address: address)
result.cmd = wakuNodeBin & " " & defaults & " "
result.cmd &= $nodeType & " "
result.cmd &= "--waku-topic-interest:" & $topicInterest & " "
result.cmd &= "--nodekey:" & $keypair.seckey & " "
result.cmd &= "--ports-shift:" & $shift & " "
if discovery:
result.cmd &= "--discovery:on" & " "
if bootNodes.len > 0:
for bootNode in bootNodes:
result.cmd &= "--bootnode:" & bootNode & " "
else:
result.cmd &= "--discovery:off" & " "
if staticNodes.len > 0:
for staticNode in staticNodes:
result.cmd &= "--staticnode:" & staticNode & " "
result.master = master
result.enode = $enode
result.shift = shift
result.label = label
debug "Node command created.", cmd=result.cmd
proc starNetwork(amount: int): seq[NodeInfo] =
let masterNode = initNodeCmd(FullNode, portOffset, master = true,
label = "master node")
result.add(masterNode)
for i in 1..<amount:
result.add(initNodeCmd(FullNode, portOffset + i, @[masterNode.enode],
label = "full node"))
proc fullMeshNetwork(amount: int): seq[NodeInfo] =
debug "amount", amount
for i in 0..<amount:
var staticnodes: seq[string]
for item in result:
staticnodes.add(item.enode)
result.add(initNodeCmd(FullNode, portOffset + i, staticnodes,
label = "full node"))
proc discoveryNetwork(amount: int): seq[NodeInfo] =
let bootNode = initNodeCmd(FullNode, portOffset, discovery = true,
master = true, label = "boot node")
result.add(bootNode)
for i in 1..<amount:
result.add(initNodeCmd(FullNode, portOffset + i, label = "full node",
discovery = true, bootNodes = @[bootNode.enode]))
proc generatePrometheusConfig(nodes: seq[NodeInfo], outputFile: string) =
var config = """
global:
scrape_interval: 1s
scrape_configs:
- job_name: "wakusim"
static_configs:"""
var count = 0
for node in nodes:
let port = 8008 + node.shift
config &= &"""
- targets: ['127.0.0.1:{port}']
labels:
node: '{count}'"""
count += 1
var (path, file) = splitPath(outputFile)
createDir(path)
writeFile(outputFile, config)
proc proccessGrafanaDashboard(nodes: int, inputFile: string,
outputFile: string) =
# from https://github.com/status-im/nim-beacon-chain/blob/master/tests/simulation/process_dashboard.nim
var
inputData = parseFile(inputFile)
panels = inputData["panels"].copy()
numPanels = len(panels)
gridHeight = 0
outputData = inputData
for panel in panels:
if panel["gridPos"]["x"].getInt() == 0:
gridHeight += panel["gridPos"]["h"].getInt()
outputData["panels"] = %* []
for nodeNum in 0 .. (nodes - 1):
var
nodePanels = panels.copy()
panelIndex = 0
for panel in nodePanels.mitems:
panel["title"] = %* replace(panel["title"].getStr(), "#0", "#" & $nodeNum)
panel["id"] = %* (panelIndex + (nodeNum * numPanels))
panel["gridPos"]["y"] = %* (panel["gridPos"]["y"].getInt() + (nodeNum * gridHeight))
var targets = panel["targets"]
for target in targets.mitems:
target["expr"] = %* replace(target["expr"].getStr(), "{node=\"0\"}", "{node=\"" & $nodeNum & "\"}")
outputData["panels"].add(panel)
panelIndex.inc()
outputData["uid"] = %* (outputData["uid"].getStr() & "a")
outputData["title"] = %* (outputData["title"].getStr() & " (all nodes)")
writeFile(outputFile, pretty(outputData))
when isMainModule:
let conf = WakuNetworkConf.load()
var nodes: seq[NodeInfo]
case conf.topology:
of Star:
nodes = starNetwork(conf.amount)
of FullMesh:
nodes = fullMeshNetwork(conf.amount)
of DiscoveryBased:
nodes = discoveryNetwork(conf.amount)
var staticnodes: seq[string]
for i in 0..<conf.testNodePeers:
# TODO: could also select nodes randomly
staticnodes.add(nodes[i].enode)
# light node with topic interest
nodes.add(initNodeCmd(LightNode, 0, staticnodes, topicInterest = true,
label = "light node topic interest"))
# Regular light node
nodes.add(initNodeCmd(LightNode, 1, staticnodes, label = "light node"))
var commandStr = "multitail -s 2 -M 0 -x \"Waku Simulation\""
var count = 0
var sleepDuration = 0
for node in nodes:
if conf.topology in {Star, DiscoveryBased}:
sleepDuration = if node.master: 0
else: 1
commandStr &= &" -cT ansi -t 'node #{count} {node.label}' -l 'sleep {sleepDuration}; {node.cmd}; echo [node execution completed]; while true; do sleep 100; done'"
if conf.topology == FullMesh:
sleepDuration += 1
count += 1
generatePrometheusConfig(nodes, metricsDir / "prometheus" / "prometheus.yml")
proccessGrafanaDashboard(nodes.len,
metricsDir / "waku-grafana-dashboard.json",
metricsDir / "waku-sim-all-nodes-grafana-dashboard.json")
let errorCode = execCmd(commandStr)
if errorCode != 0:
error "launch command failed", command=commandStr

View File

@ -0,0 +1,16 @@
import
chronos,
eth/[p2p, async_utils], eth/p2p/peer_pool
proc setBootNodes*(nodes: openArray[string]): seq[ENode] =
result = newSeqOfCap[ENode](nodes.len)
for nodeId in nodes:
# TODO: something more user friendly than an expect
result.add(ENode.fromString(nodeId).expect("correct node"))
proc connectToNodes*(node: EthereumNode, nodes: openArray[string]) =
for nodeId in nodes:
# TODO: something more user friendly than an assert
let whisperENode = ENode.fromString(nodeId).expect("correct node")
traceAsyncErrors node.peerPool.connectToNode(newNode(whisperENode))

133
waku/v1/node/wakunode1.nim Normal file
View File

@ -0,0 +1,133 @@
import
confutils, chronos, json_rpc/rpcserver, metrics, metrics/chronicles_support,
stew/shims/net as stewNet,
eth/[keys, p2p], eth/common/utils,
eth/p2p/[discovery, enode, peer_pool, bootnodes, whispernodes],
eth/p2p/rlpx_protocols/whisper_protocol,
../protocol/[waku_protocol, waku_bridge],
../../common/utils/nat,
./rpc/[waku, wakusim, key_storage], ./waku_helpers, ./config
const clientId = "Nimbus waku node"
proc run(config: WakuNodeConf, rng: ref BrHmacDrbgContext) =
let
(ipExt, tcpPortExt, udpPortExt) = setupNat(config.nat, clientId,
Port(config.tcpPort + config.portsShift),
Port(config.udpPort + config.portsShift))
# TODO: EthereumNode should have a better split of binding address and
# external address. Also, can't have different ports as it stands now.
address = if ipExt.isNone():
Address(ip: parseIpAddress("0.0.0.0"),
tcpPort: Port(config.tcpPort + config.portsShift),
udpPort: Port(config.udpPort + config.portsShift))
else:
Address(ip: ipExt.get(),
tcpPort: Port(config.tcpPort + config.portsShift),
udpPort: Port(config.udpPort + config.portsShift))
# Set-up node
var node = newEthereumNode(config.nodekey, address, 1, nil, clientId,
addAllCapabilities = false, rng = rng)
if not config.bootnodeOnly:
node.addCapability Waku # Always enable Waku protocol
var topicInterest: Option[seq[waku_protocol.Topic]]
var bloom: Option[Bloom]
if config.wakuTopicInterest:
var topics: seq[waku_protocol.Topic]
topicInterest = some(topics)
else:
bloom = some(fullBloom())
let wakuConfig = WakuConfig(powRequirement: config.wakuPow,
bloom: bloom,
isLightNode: config.lightNode,
maxMsgSize: waku_protocol.defaultMaxMsgSize,
topics: topicInterest)
node.configureWaku(wakuConfig)
if config.whisper or config.whisperBridge:
node.addCapability Whisper
node.protocolState(Whisper).config.powRequirement = 0.002
if config.whisperBridge:
node.shareMessageQueue()
# TODO: Status fleet bootnodes are discv5? That will not work.
let bootnodes = if config.bootnodes.len > 0: setBootNodes(config.bootnodes)
elif config.fleet == prod: setBootNodes(StatusBootNodes)
elif config.fleet == staging: setBootNodes(StatusBootNodesStaging)
elif config.fleet == test : setBootNodes(StatusBootNodesTest)
else: @[]
let connectedFut = node.connectToNetwork(bootnodes, not config.noListen,
config.discovery)
connectedFut.callback = proc(data: pointer) {.gcsafe.} =
{.gcsafe.}:
if connectedFut.failed:
fatal "connectToNetwork failed", msg = connectedFut.readError.msg
quit(1)
if not config.bootnodeOnly:
# Optionally direct connect with a set of nodes
if config.staticnodes.len > 0: connectToNodes(node, config.staticnodes)
elif config.fleet == prod: connectToNodes(node, WhisperNodes)
elif config.fleet == staging: connectToNodes(node, WhisperNodesStaging)
elif config.fleet == test: connectToNodes(node, WhisperNodesTest)
if config.rpc:
let ta = initTAddress(config.rpcAddress,
Port(config.rpcPort + config.portsShift))
var rpcServer = newRpcHttpServer([ta])
let keys = newKeyStorage()
setupWakuRPC(node, keys, rpcServer, rng)
setupWakuSimRPC(node, rpcServer)
rpcServer.start()
if config.logAccounting:
proc logPeerAccounting(udata: pointer) {.closure, gcsafe.} =
{.gcsafe.}:
for peer in node.peerPool.peers:
let
sent = peer.state(Waku).accounting.sent
received = peer.state(Waku).accounting.received
id = peer.network.toEnode
info "Peer accounting", id, sent, received
peer.state(Waku).accounting = Accounting(sent: 0, received: 0)
discard setTimer(Moment.fromNow(2.seconds), logPeerAccounting)
discard setTimer(Moment.fromNow(2.seconds), logPeerAccounting)
when defined(insecure):
if config.metricsServer:
let
address = config.metricsServerAddress
port = config.metricsServerPort + config.portsShift
info "Starting metrics HTTP server", address, port
metrics.startHttpServer($address, Port(port))
if config.logMetrics:
proc logMetrics(udata: pointer) {.closure, gcsafe.} =
{.gcsafe.}:
let
connectedPeers = connected_peers
validEnvelopes = waku_protocol.envelopes_valid
droppedEnvelopes = waku_protocol.envelopes_dropped
info "Node metrics", connectedPeers, validEnvelopes, droppedEnvelopes
discard setTimer(Moment.fromNow(2.seconds), logMetrics)
discard setTimer(Moment.fromNow(2.seconds), logMetrics)
runForever()
when isMainModule:
let
rng = keys.newRng()
conf = WakuNodeConf.load()
if conf.logLevel != LogLevel.NONE:
setLogLevel(conf.logLevel)
case conf.cmd
of genNodekey:
echo PrivateKey.random(rng[])
of noCommand:
run(conf, rng)

View File

@ -0,0 +1,3 @@
# Waku v1 protocol
This folder contains implementations of [Waku v1 protocols](https://specs.vac.dev/specs/waku/v1/waku-1.html).

View File

@ -0,0 +1,17 @@
#
# Waku - Whisper Bridge
# (c) Copyright 2019
# Status Research & Development GmbH
#
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
#
import
eth/p2p,
eth/p2p/rlpx_protocols/whisper_protocol,
./waku_protocol
proc shareMessageQueue*(node: EthereumNode) =
node.protocolState(Waku).queue = node.protocolState(Whisper).queue

View File

@ -0,0 +1,85 @@
#
# Waku Mail Client & Server
# (c) Copyright 2019
# Status Research & Development GmbH
#
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
#
import
chronos,
eth/[p2p, async_utils],
./waku_protocol
const
requestCompleteTimeout = chronos.seconds(5)
type
Cursor = seq[byte]
MailRequest* = object
lower*: uint32 ## Unix timestamp; oldest requested envelope's creation time
upper*: uint32 ## Unix timestamp; newest requested envelope's creation time
bloom*: seq[byte] ## Bloom filter to apply on the envelopes
limit*: uint32 ## Maximum amount of envelopes to return
cursor*: Cursor ## Optional cursor
proc requestMail*(node: EthereumNode, peerId: NodeId, request: MailRequest,
symKey: SymKey, requests = 10): Future[Option[Cursor]] {.async.} =
## Send p2p mail request and check request complete.
## If result is none, and error occured. If result is a none empty cursor,
## more envelopes are available.
# TODO: Perhaps don't go the recursive route or could use the actual response
# proc to implement this (via a handler) and store the necessary data in the
# WakuPeer object.
# TODO: Several requestMail calls in parallel can create issues with handling
# the wrong response to a request. Can additionaly check the requestId but
# that would only solve it half. Better to use the requestResponse mechanism.
# TODO: move this check out of requestMail?
let peer = node.getPeer(peerId, Waku)
if not peer.isSome():
error "Invalid peer"
return result
elif not peer.get().state(Waku).trusted:
return result
var writer = initRlpWriter()
writer.append(request)
let payload = writer.finish()
let data = encode(node.rng[], Payload(payload: payload, symKey: some(symKey)))
if not data.isSome():
error "Encoding of payload failed"
return result
# TODO: should this envelope be valid in terms of ttl, PoW, etc.?
let env = Envelope(expiry:0, ttl: 0, data: data.get(), nonce: 0)
# Send the request
traceAsyncErrors peer.get().p2pRequest(env)
# Wait for the Request Complete packet
var f = peer.get().nextMsg(Waku.p2pRequestComplete)
if await f.withTimeout(requestCompleteTimeout):
let response = f.read()
# TODO: I guess the idea is to check requestId (Hash) also?
let requests = requests - 1
# If there is cursor data, do another request
if response.cursor.len > 0 and requests > 0:
var newRequest = request
newRequest.cursor = response.cursor
return await requestMail(node, peerId, newRequest, symKey, requests)
else:
return some(response.cursor)
else:
error "p2pRequestComplete timeout"
return result
proc p2pRequestHandler(peer: Peer, envelope: Envelope) =
# Mail server p2p request implementation
discard
proc enableMailServer*(node: EthereumNode) =
# TODO: This could become part of an init call for an actual `MailServer`
# object.
node.registerP2PRequestHandler(p2pRequestHandler)

View File

@ -0,0 +1,652 @@
#
# Waku
# (c) Copyright 2018-2019
# Status Research & Development GmbH
#
# Licensed under either of
# Apache License, version 2.0, (LICENSE-APACHEv2)
# MIT license (LICENSE-MIT)
#
## Waku
## *******
##
## Waku is a fork of Whisper.
##
## Waku is a gossip protocol that synchronizes a set of messages across nodes
## with attention given to sender and recipient anonymitiy. Messages are
## categorized by a topic and stay alive in the network based on a time-to-live
## measured in seconds. Spam prevention is based on proof-of-work, where large
## or long-lived messages must spend more work.
##
## Implementation should be according to Waku specification defined here:
## https://github.com/vacp2p/specs/blob/master/waku/waku.md
##
## Example usage
## ----------
## First an `EthereumNode` needs to be created, either with all capabilities set
## or with specifically the Waku capability set.
## The latter can be done like this:
##
## .. code-block::nim
## var node = newEthereumNode(keypair, address, netId, nil,
## addAllCapabilities = false)
## node.addCapability Waku
##
## Now calls such as ``postMessage`` and ``subscribeFilter`` can be done.
## However, they only make real sense after ``connectToNetwork`` was started. As
## else there will be no peers to send and receive messages from.
import
options, tables, times, chronos, chronicles, metrics,
eth/[keys, async_utils, p2p], eth/p2p/rlpx_protocols/whisper/whisper_types,
eth/trie/trie_defs
export
whisper_types
logScope:
topics = "waku"
const
defaultQueueCapacity = 2048
wakuVersion* = 1 ## Waku version.
wakuVersionStr* = $wakuVersion ## Waku version.
defaultMinPow* = 0.2'f64 ## The default minimum PoW requirement for this node.
defaultMaxMsgSize* = 1024'u32 * 1024'u32 ## The current default and max
## message size. This can never be larger than the maximum RLPx message size.
messageInterval* = chronos.milliseconds(300) ## Interval at which messages are
## send to peers, in ms.
pruneInterval* = chronos.milliseconds(1000) ## Interval at which message
## queue is pruned, in ms.
topicInterestMax = 10000
type
WakuConfig* = object
powRequirement*: float64
bloom*: Option[Bloom]
isLightNode*: bool
maxMsgSize*: uint32
confirmationsEnabled*: bool
rateLimits*: Option[RateLimits]
topics*: Option[seq[Topic]]
Accounting* = ref object
sent*: uint
received*: uint
WakuPeer = ref object
initialized: bool # when successfully completed the handshake
powRequirement*: float64
bloom*: Bloom
isLightNode*: bool
trusted*: bool
topics*: Option[seq[Topic]]
received: HashSet[Hash]
accounting*: Accounting
P2PRequestHandler* = proc(peer: Peer, envelope: Envelope) {.gcsafe.}
WakuNetwork = ref object
queue*: ref Queue
filters*: Filters
config*: WakuConfig
p2pRequestHandler*: P2PRequestHandler
RateLimits* = object
# TODO: uint or specifically uint32?
limitIp*: uint
limitPeerId*: uint
limitTopic*: uint
StatusOptions* = object
powRequirement*: Option[(float64)]
bloomFilter*: Option[Bloom]
lightNode*: Option[bool]
confirmationsEnabled*: Option[bool]
rateLimits*: Option[RateLimits]
topicInterest*: Option[seq[Topic]]
KeyKind* = enum
powRequirementKey,
bloomFilterKey,
lightNodeKey,
confirmationsEnabledKey,
rateLimitsKey,
topicInterestKey
template countSomeFields*(x: StatusOptions): int =
var count = 0
for f in fields(x):
if f.isSome():
inc count
count
proc append*(rlpWriter: var RlpWriter, value: StatusOptions) =
var list = initRlpList(countSomeFields(value))
if value.powRequirement.isSome():
list.append((powRequirementKey, cast[uint64](value.powRequirement.get())))
if value.bloomFilter.isSome():
list.append((bloomFilterKey, @(value.bloomFilter.get())))
if value.lightNode.isSome():
list.append((lightNodeKey, value.lightNode.get()))
if value.confirmationsEnabled.isSome():
list.append((confirmationsEnabledKey, value.confirmationsEnabled.get()))
if value.rateLimits.isSome():
list.append((rateLimitsKey, value.rateLimits.get()))
if value.topicInterest.isSome():
list.append((topicInterestKey, value.topicInterest.get()))
let bytes = list.finish()
rlpWriter.append(rlpFromBytes(bytes))
proc read*(rlp: var Rlp, T: typedesc[StatusOptions]): T =
if not rlp.isList():
raise newException(RlpTypeMismatch,
"List expected, but the source RLP is not a list.")
let sz = rlp.listLen()
# We already know that we are working with a list
doAssert rlp.enterList()
for i in 0 ..< sz:
rlp.tryEnterList()
var k: KeyKind
try:
k = rlp.read(KeyKind)
except RlpTypeMismatch:
# skip unknown keys and their value
rlp.skipElem()
rlp.skipElem()
continue
case k
of powRequirementKey:
let pow = rlp.read(uint64)
result.powRequirement = some(cast[float64](pow))
of bloomFilterKey:
let bloom = rlp.read(seq[byte])
if bloom.len != bloomSize:
raise newException(UselessPeerError, "Bloomfilter size mismatch")
var bloomFilter: Bloom
bloomFilter.bytesCopy(bloom)
result.bloomFilter = some(bloomFilter)
of lightNodeKey:
result.lightNode = some(rlp.read(bool))
of confirmationsEnabledKey:
result.confirmationsEnabled = some(rlp.read(bool))
of rateLimitsKey:
result.rateLimits = some(rlp.read(RateLimits))
of topicInterestKey:
result.topicInterest = some(rlp.read(seq[Topic]))
proc allowed*(msg: Message, config: WakuConfig): bool =
# Check max msg size, already happens in RLPx but there is a specific waku
# max msg size which should always be < RLPx max msg size
if msg.size > config.maxMsgSize:
envelopes_dropped.inc(labelValues = ["too_large"])
warn "Message size too large", size = msg.size
return false
if msg.pow < config.powRequirement:
envelopes_dropped.inc(labelValues = ["low_pow"])
warn "Message PoW too low", pow = msg.pow, minPow = config.powRequirement
return false
if config.topics.isSome():
if msg.env.topic notin config.topics.get():
envelopes_dropped.inc(labelValues = ["topic_mismatch"])
warn "Message topic does not match Waku topic list"
return false
else:
if config.bloom.isSome() and not bloomFilterMatch(config.bloom.get(), msg.bloom):
envelopes_dropped.inc(labelValues = ["bloom_filter_mismatch"])
warn "Message does not match node bloom filter"
return false
return true
proc run(peer: Peer) {.gcsafe, async.}
proc run(node: EthereumNode, network: WakuNetwork) {.gcsafe, async.}
proc initProtocolState*(network: WakuNetwork, node: EthereumNode) {.gcsafe.} =
new(network.queue)
network.queue[] = initQueue(defaultQueueCapacity)
network.filters = initTable[string, Filter]()
network.config.bloom = some(fullBloom())
network.config.powRequirement = defaultMinPow
network.config.isLightNode = false
# RateLimits and confirmations are not yet implemented so we set confirmations
# to false and we don't pass RateLimits at all.
network.config.confirmationsEnabled = false
network.config.rateLimits = none(RateLimits)
network.config.maxMsgSize = defaultMaxMsgSize
network.config.topics = none(seq[Topic])
asyncCheck node.run(network)
p2pProtocol Waku(version = wakuVersion,
rlpxName = "waku",
peerState = WakuPeer,
networkState = WakuNetwork):
onPeerConnected do (peer: Peer):
trace "onPeerConnected Waku"
let
wakuNet = peer.networkState
wakuPeer = peer.state
let options = StatusOptions(
powRequirement: some(wakuNet.config.powRequirement),
bloomFilter: wakuNet.config.bloom,
lightNode: some(wakuNet.config.isLightNode),
confirmationsEnabled: some(wakuNet.config.confirmationsEnabled),
rateLimits: wakuNet.config.rateLimits,
topicInterest: wakuNet.config.topics)
let m = await peer.status(options,
timeout = chronos.milliseconds(5000))
wakuPeer.powRequirement = m.options.powRequirement.get(defaultMinPow)
wakuPeer.bloom = m.options.bloomFilter.get(fullBloom())
wakuPeer.isLightNode = m.options.lightNode.get(false)
if wakuPeer.isLightNode and wakuNet.config.isLightNode:
# No sense in connecting two light nodes so we disconnect
raise newException(UselessPeerError, "Two light nodes connected")
wakuPeer.topics = m.options.topicInterest
if wakuPeer.topics.isSome():
if wakuPeer.topics.get().len > topicInterestMax:
raise newException(UselessPeerError, "Topic-interest is too large")
if wakuNet.config.topics.isSome():
raise newException(UselessPeerError,
"Two Waku nodes with topic-interest connected")
wakuPeer.received.init()
wakuPeer.trusted = false
wakuPeer.accounting = Accounting(sent: 0, received: 0)
wakuPeer.initialized = true
# No timer based queue processing for a light node.
if not wakuNet.config.isLightNode:
traceAsyncErrors peer.run()
debug "Waku peer initialized", peer
handshake:
proc status(peer: Peer, options: StatusOptions)
proc messages(peer: Peer, envelopes: openarray[Envelope]) =
if not peer.state.initialized:
warn "Handshake not completed yet, discarding messages"
return
for envelope in envelopes:
# check if expired or in future, or ttl not 0
if not envelope.valid():
warn "Expired or future timed envelope", peer
# disconnect from peers sending bad envelopes
# await peer.disconnect(SubprotocolReason)
continue
peer.state.accounting.received += 1
let msg = initMessage(envelope)
if not msg.allowed(peer.networkState.config):
# disconnect from peers sending bad envelopes
# await peer.disconnect(SubprotocolReason)
continue
# This peer send this message thus should not receive it again.
# If this peer has the message in the `received` set already, this means
# it was either already received here from this peer or send to this peer.
# Either way it will be in our queue already (and the peer should know
# this) and this peer is sending duplicates.
# Note: geth does not check if a peer has send a message to them before
# broadcasting this message. This too is seen here as a duplicate message
# (see above comment). If we want to seperate these cases (e.g. when peer
# rating), then we have to add a "peer.state.send" HashSet.
# Note: it could also be a race between the arrival of a message send by
# this node to a peer and that same message arriving from that peer (after
# it was received from another peer) here.
if peer.state.received.containsOrIncl(msg.hash):
envelopes_dropped.inc(labelValues = ["duplicate"])
trace "Peer sending duplicate messages", peer, hash = $msg.hash
# await peer.disconnect(SubprotocolReason)
continue
# This can still be a duplicate message, but from another peer than
# the peer who send the message.
if peer.networkState.queue[].add(msg):
# notify filters of this message
peer.networkState.filters.notify(msg)
nextID 22
proc statusOptions(peer: Peer, options: StatusOptions) =
if not peer.state.initialized:
warn "Handshake not completed yet, discarding statusOptions"
return
if options.topicInterest.isSome():
peer.state.topics = options.topicInterest
elif options.bloomFilter.isSome():
peer.state.bloom = options.bloomFilter.get()
peer.state.topics = none(seq[Topic])
if options.powRequirement.isSome():
peer.state.powRequirement = options.powRequirement.get()
if options.lightNode.isSome():
peer.state.isLightNode = options.lightNode.get()
nextID 126
proc p2pRequest(peer: Peer, envelope: Envelope) =
if not peer.networkState.p2pRequestHandler.isNil():
peer.networkState.p2pRequestHandler(peer, envelope)
proc p2pMessage(peer: Peer, envelopes: openarray[Envelope]) =
if peer.state.trusted:
# when trusted we can bypass any checks on envelope
for envelope in envelopes:
let msg = Message(env: envelope, isP2P: true)
peer.networkState.filters.notify(msg)
# Following message IDs are not part of EIP-627, but are added and used by
# the Status application, we ignore them for now.
nextID 11
proc batchAcknowledged(peer: Peer) = discard
proc messageResponse(peer: Peer) = discard
nextID 123
requestResponse:
proc p2pSyncRequest(peer: Peer) = discard
proc p2pSyncResponse(peer: Peer) = discard
proc p2pRequestComplete(peer: Peer, requestId: Hash, lastEnvelopeHash: Hash,
cursor: seq[byte]) = discard
# TODO:
# In the current specification the parameters are not wrapped in a regular
# envelope as is done for the P2P Request packet. If we could alter this in
# the spec it would be a cleaner separation between Waku and Mail server /
# client.
# Also, if a requestResponse block is used, a reqestId will automatically
# be added by the protocol DSL.
# However the requestResponse block in combination with p2pRequest cannot be
# used due to the unfortunate fact that the packet IDs are not consecutive,
# and nextID is not recognized in between these. The nextID behaviour could
# be fixed, however it would be cleaner if the specification could be
# changed to have these IDs to be consecutive.
# 'Runner' calls ---------------------------------------------------------------
proc processQueue(peer: Peer) =
# Send to peer all valid and previously not send envelopes in the queue.
var
envelopes: seq[Envelope] = @[]
wakuPeer = peer.state(Waku)
wakuNet = peer.networkState(Waku)
for message in wakuNet.queue.items:
if wakuPeer.received.contains(message.hash):
# trace "message was already send to peer", hash = $message.hash, peer
continue
if message.pow < wakuPeer.powRequirement:
trace "Message PoW too low for peer", pow = message.pow,
powReq = wakuPeer.powRequirement
continue
if wakuPeer.topics.isSome():
if message.env.topic notin wakuPeer.topics.get():
trace "Message does not match topics list"
continue
else:
if not bloomFilterMatch(wakuPeer.bloom, message.bloom):
trace "Message does not match peer bloom filter"
continue
trace "Adding envelope"
envelopes.add(message.env)
wakuPeer.accounting.sent += 1
wakuPeer.received.incl(message.hash)
if envelopes.len() > 0:
trace "Sending envelopes", amount=envelopes.len
# Ignore failure of sending messages, this could occur when the connection
# gets dropped
traceAsyncErrors peer.messages(envelopes)
proc run(peer: Peer) {.async.} =
while peer.connectionState notin {Disconnecting, Disconnected}:
peer.processQueue()
await sleepAsync(messageInterval)
proc pruneReceived(node: EthereumNode) {.raises: [].} =
if node.peerPool != nil: # XXX: a bit dirty to need to check for this here ...
var wakuNet = node.protocolState(Waku)
for peer in node.protocolPeers(Waku):
if not peer.initialized:
continue
# NOTE: Perhaps alter the queue prune call to keep track of a HashSet
# of pruned messages (as these should be smaller), and diff this with
# the received sets.
peer.received = intersection(peer.received, wakuNet.queue.itemHashes)
proc run(node: EthereumNode, network: WakuNetwork) {.async.} =
while true:
# prune message queue every second
# TTL unit is in seconds, so this should be sufficient?
network.queue[].prune()
# pruning the received sets is not necessary for correct workings
# but simply from keeping the sets growing indefinitely
node.pruneReceived()
await sleepAsync(pruneInterval)
# Private EthereumNode calls ---------------------------------------------------
proc sendP2PMessage(node: EthereumNode, peerId: NodeId,
envelopes: openarray[Envelope]): bool =
for peer in node.peers(Waku):
if peer.remote.id == peerId:
asyncCheck peer.p2pMessage(envelopes)
return true
proc queueMessage(node: EthereumNode, msg: Message): bool =
var wakuNet = node.protocolState(Waku)
# We have to do the same checks here as in the messages proc not to leak
# any information that the message originates from this node.
if not msg.allowed(wakuNet.config):
return false
trace "Adding message to queue", hash = $msg.hash
if wakuNet.queue[].add(msg):
# Also notify our own filters of the message we are sending,
# e.g. msg from local Dapp to Dapp
wakuNet.filters.notify(msg)
return true
# Public EthereumNode calls ----------------------------------------------------
proc postMessage*(node: EthereumNode, pubKey = none[PublicKey](),
symKey = none[SymKey](), src = none[PrivateKey](),
ttl: uint32, topic: Topic, payload: seq[byte],
padding = none[seq[byte]](), powTime = 1'f,
powTarget = defaultMinPow,
targetPeer = none[NodeId]()): bool =
## Post a message on the message queue which will be processed at the
## next `messageInterval`.
##
## NOTE: This call allows a post without encryption. If encryption is
## mandatory it should be enforced a layer up
let payload = encode(node.rng[], Payload(
payload: payload, src: src, dst: pubKey, symKey: symKey, padding: padding))
if payload.isSome():
var env = Envelope(expiry:epochTime().uint32 + ttl,
ttl: ttl, topic: topic, data: payload.get(), nonce: 0)
# Allow lightnode to post only direct p2p messages
if targetPeer.isSome():
return node.sendP2PMessage(targetPeer.get(), [env])
else:
# non direct p2p message can not have ttl of 0
if env.ttl == 0:
return false
var msg = initMessage(env, powCalc = false)
# XXX: make this non blocking or not?
# In its current blocking state, it could be noticed by a peer that no
# messages are send for a while, and thus that mining PoW is done, and
# that next messages contains a message originated from this peer
# zah: It would be hard to execute this in a background thread at the
# moment. We'll need a way to send custom "tasks" to the async message
# loop (e.g. AD2 support for AsyncChannels).
if not msg.sealEnvelope(powTime, powTarget):
return false
# need to check expiry after mining PoW
if not msg.env.valid():
return false
result = node.queueMessage(msg)
# Allows light nodes to post via untrusted messages packet.
# Queue gets processed immediatly as the node sends only its own messages,
# so the privacy ship has already sailed anyhow.
# TODO:
# - Could be still a concern in terms of efficiency, if multiple messages
# need to be send.
# - For Waku Mode, the checks in processQueue are rather useless as the
# idea is to connect only to 1 node? Also refactor in that case.
if node.protocolState(Waku).config.isLightNode:
for peer in node.peers(Waku):
peer.processQueue()
else:
error "Encoding of payload failed"
return false
proc subscribeFilter*(node: EthereumNode, filter: Filter,
handler:FilterMsgHandler = nil): string =
## Initiate a filter for incoming/outgoing messages. Messages can be
## retrieved with the `getFilterMessages` call or with a provided
## `FilterMsgHandler`.
##
## NOTE: This call allows for a filter without decryption. If encryption is
## mandatory it should be enforced a layer up.
return subscribeFilter(
node.rng[], node.protocolState(Waku).filters, filter, handler)
proc unsubscribeFilter*(node: EthereumNode, filterId: string): bool =
## Remove a previously subscribed filter.
var filter: Filter
return node.protocolState(Waku).filters.take(filterId, filter)
proc getFilterMessages*(node: EthereumNode, filterId: string): seq[ReceivedMessage] =
## Get all the messages currently in the filter queue. This will reset the
## filter message queue.
return node.protocolState(Waku).filters.getFilterMessages(filterId)
proc filtersToBloom*(node: EthereumNode): Bloom =
## Returns the bloom filter of all topics of all subscribed filters.
return node.protocolState(Waku).filters.toBloom()
proc setPowRequirement*(node: EthereumNode, powReq: float64) {.async.} =
## Sets the PoW requirement for this node, will also send
## this new PoW requirement to all connected peers.
##
## Failures when sending messages to peers will not be reported.
# NOTE: do we need a tolerance of old PoW for some time?
node.protocolState(Waku).config.powRequirement = powReq
var futures: seq[Future[void]] = @[]
let list = StatusOptions(powRequirement: some(powReq))
for peer in node.peers(Waku):
futures.add(peer.statusOptions(list))
# Exceptions from sendMsg will not be raised
await allFutures(futures)
proc setBloomFilter*(node: EthereumNode, bloom: Bloom) {.async.} =
## Sets the bloom filter for this node, will also send
## this new bloom filter to all connected peers.
##
## Failures when sending messages to peers will not be reported.
# NOTE: do we need a tolerance of old bloom filter for some time?
node.protocolState(Waku).config.bloom = some(bloom)
# reset topics
node.protocolState(Waku).config.topics = none(seq[Topic])
var futures: seq[Future[void]] = @[]
let list = StatusOptions(bloomFilter: some(bloom))
for peer in node.peers(Waku):
futures.add(peer.statusOptions(list))
# Exceptions from sendMsg will not be raised
await allFutures(futures)
proc setTopicInterest*(node: EthereumNode, topics: seq[Topic]):
Future[bool] {.async.} =
if topics.len > topicInterestMax:
return false
node.protocolState(Waku).config.topics = some(topics)
var futures: seq[Future[void]] = @[]
let list = StatusOptions(topicInterest: some(topics))
for peer in node.peers(Waku):
futures.add(peer.statusOptions(list))
# Exceptions from sendMsg will not be raised
await allFutures(futures)
return true
proc setMaxMessageSize*(node: EthereumNode, size: uint32): bool =
## Set the maximum allowed message size.
## Can not be set higher than ``defaultMaxMsgSize``.
if size > defaultMaxMsgSize:
warn "size > defaultMaxMsgSize"
return false
node.protocolState(Waku).config.maxMsgSize = size
return true
proc setPeerTrusted*(node: EthereumNode, peerId: NodeId): bool =
## Set a connected peer as trusted.
for peer in node.peers(Waku):
if peer.remote.id == peerId:
peer.state(Waku).trusted = true
return true
proc setLightNode*(node: EthereumNode, isLightNode: bool) {.async.} =
## Set this node as a Waku light node.
node.protocolState(Waku).config.isLightNode = isLightNode
# TODO: Add starting/stopping of `processQueue` loop depending on value of isLightNode.
var futures: seq[Future[void]] = @[]
let list = StatusOptions(lightNode: some(isLightNode))
for peer in node.peers(Waku):
futures.add(peer.statusOptions(list))
# Exceptions from sendMsg will not be raised
await allFutures(futures)
proc configureWaku*(node: EthereumNode, config: WakuConfig) =
## Apply a Waku configuration.
##
## NOTE: Should be run before connection is made with peers as some
## of the settings are only communicated at peer handshake.
node.protocolState(Waku).config = config
proc registerP2PRequestHandler*(node: EthereumNode,
customHandler: P2PRequestHandler) =
node.protocolState(Waku).p2pRequestHandler = customHandler
proc resetMessageQueue*(node: EthereumNode) =
## Full reset of the message queue.
##
## NOTE: Not something that should be run in normal circumstances.
node.protocolState(Waku).queue[] = initQueue(defaultQueueCapacity)

139
waku/v2/README.md Normal file
View File

@ -0,0 +1,139 @@
# Waku v2
This folder contains code related to Waku v1, both as a node and as a protocol.
## Introduction
This is an implementation in Nim of Waku v2, which is currently in draft/beta stage.
See [spec](https://specs.vac.dev/specs/waku/v2/waku-v2.html).
## How to Build & Run
### Prerequisites
* GNU Make, Bash and the usual POSIX utilities. Git 2.9.4 or newer.
* PCRE
More information on the installation of these can be found [here](https://github.com/status-im/nimbus#prerequisites).
### Wakunode
```bash
# The first `make` invocation will update all Git submodules.
# You'll run `make update` after each `git pull`, in the future, to keep those submodules up to date.
make wakunode2
# See available command line options
./build/wakunode2 --help
# Connect the client directly with the Status test fleet
# TODO NYI
#./build/wakunode2 --log-level:debug --discovery:off --fleet:test --log-metrics
```
### Waku v2 Protocol Test Suite
```bash
# Run all the Waku v2 tests
make test2
```
You can also run a specific test (and alter compile options as you want):
```bash
# Get a shell with the right environment variables set
./env.sh bash
# Run a specific test
nim c -r ./tests/v2/test_waku_filter.nim
```
### Waku v2 Protocol Example
There is a more basic example, more limited in features and configuration than
the `wakunode1`, located in `examples/v2/basic2.nim`.
There is also a more full featured example in `examples/v2/chat2.nim`.
### Waku Quick Simulation
*NOTE: This section might be slightly out of date as it was written for Waku v1.*
One can set up several nodes, get them connected and then instruct them via the
JSON-RPC interface. This can be done via e.g. web3.js, nim-web3 (needs to be
updated) or simply curl your way out.
The JSON-RPC interface is currently the same as the one of Whisper. The only
difference is the addition of broadcasting the topics interest when a filter
with a certain set of topics is subcribed.
The quick simulation uses this approach, `start_network` launches a set of
`wakunode`s, and `quicksim` instructs the nodes through RPC calls.
Example of how to build and run:
```bash
# Build wakunode + quicksim with metrics enabled
make NIMFLAGS="-d:insecure" wakusim2
# Start the simulation nodes, this currently requires multitail to be installed
# TODO Partial support for Waku v2
./build/start_network2 --topology:FullMesh --amount:6 --test-node-peers:2
# In another shell run
./build/quicksim2
```
The `start_network2` tool will also provide a `prometheus.yml` with targets
set to all simulation nodes that are started. This way you can easily start
prometheus with this config, e.g.:
```bash
cd ./metrics/prometheus
prometheus
```
A Grafana dashboard containing the example dashboard for each simulation node
is also generated and can be imported in case you have Grafana running.
This dashboard can be found at `./metrics/waku-sim-all-nodes-grafana-dashboard.json`
To read more details about metrics, see [next](#using-metrics) section.
## Using Metrics
Metrics are available for valid envelopes and dropped envelopes.
To compile in an HTTP endpoint for accessing the metrics we need to provide the
`insecure` flag:
```bash
make NIMFLAGS="-d:insecure" wakunode2
./build/wakunode2 --metrics-server
```
Ensure your Prometheus config `prometheus.yml` contains the targets you care about, e.g.:
```
scrape_configs:
- job_name: "waku"
static_configs:
- targets: ['localhost:8008', 'localhost:8009', 'localhost:8010']
```
For visualisation, similar steps can be used as is written down for Nimbus
[here](https://github.com/status-im/nimbus#metric-visualisation).
There is a similar example dashboard that includes visualisation of the
envelopes available at `metrics/waku-grafana-dashboard.json`.
## Spec support
*This section last updated November 16, 2020*
All Waku v2 specs, except for bridge, are currently in draft.
## Docker Image
By default, the target will be a docker image with `wakunode`, which is the Waku v1 node.
You can change this to `wakunode2`, the Waku v2 node like this:
```bash
make docker-image MAKE_TARGET=wakunode2
docker run --rm -it statusteam/nim-waku:latest --help
```

5
waku/v2/node/README.md Normal file
View File

@ -0,0 +1,5 @@
# Waku Node v2
This folder contains code related to running a `wakunode2` process. The main entrypoint is the `wakunode2` file.
See `../../docs/api/v2/node.md` for more details on the the Nim Node API.

160
waku/v2/node/config.nim Normal file
View File

@ -0,0 +1,160 @@
import
std/strutils,
confutils, confutils/defs, confutils/std/net,
chronicles, chronos,
libp2p/crypto/crypto,
libp2p/crypto/secp,
nimcrypto/utils,
eth/keys
type
WakuNodeConf* = object
logLevel* {.
desc: "Sets the log level."
defaultValue: LogLevel.INFO
name: "log-level" }: LogLevel
listenAddress* {.
defaultValue: defaultListenAddress(config)
desc: "Listening address for the LibP2P traffic."
name: "listen-address"}: ValidIpAddress
tcpPort* {.
desc: "TCP listening port."
defaultValue: 60000
name: "tcp-port" }: Port
udpPort* {.
desc: "UDP listening port."
defaultValue: 60000
name: "udp-port" }: Port
portsShift* {.
desc: "Add a shift to all port numbers."
defaultValue: 0
name: "ports-shift" }: uint16
nat* {.
desc: "Specify method to use for determining public address. " &
"Must be one of: any, none, upnp, pmp, extip:<IP>."
defaultValue: "any" }: string
staticnodes* {.
desc: "Enode URL to directly connect with. Argument may be repeated."
name: "staticnode" }: seq[string]
storenode* {.
desc: "Enode URL to query for storage.",
defaultValue: ""
name: "storenode" }: string
store* {.
desc: "Flag whether to start store protocol",
defaultValue: false
name: "store" }: bool
filter* {.
desc: "Flag whether to start filter protocol",
defaultValue: false
name: "filter" }: bool
relay* {.
desc: "Flag whether to start relay protocol",
defaultValue: true
name: "relay" }: bool
swap* {.
desc: "Flag whether to start swap protocol",
defaultValue: false
name: "swap" }: bool
filternode* {.
desc: "Enode URL to filter.",
defaultValue: ""
name: "filternode" }: string
dbpath* {.
desc: "The database path for the store protocol.",
defaultValue: ""
name: "dbpath" }: string
topics* {.
desc: "Default topics to subscribe to (space separated list)."
defaultValue: "/waku/2/default-waku/proto"
name: "topics" .}: string
# NOTE: Signature is different here, we return PrivateKey and not KeyPair
nodekey* {.
desc: "P2P node private key as hex.",
defaultValue: crypto.PrivateKey.random(Secp256k1, keys.newRng()[]).tryGet()
name: "nodekey" }: crypto.PrivateKey
rpc* {.
desc: "Enable Waku RPC server.",
defaultValue: true
name: "rpc" }: bool
rpcAddress* {.
desc: "Listening address of the RPC server.",
defaultValue: ValidIpAddress.init("127.0.0.1")
name: "rpc-address" }: ValidIpAddress
rpcPort* {.
desc: "Listening port of the RPC server.",
defaultValue: 8545
name: "rpc-port" }: uint16
metricsServer* {.
desc: "Enable the metrics server."
defaultValue: false
name: "metrics-server" }: bool
metricsServerAddress* {.
desc: "Listening address of the metrics server."
defaultValue: ValidIpAddress.init("127.0.0.1")
name: "metrics-server-address" }: ValidIpAddress
metricsServerPort* {.
desc: "Listening HTTP port of the metrics server."
defaultValue: 8008
name: "metrics-server-port" }: uint16
logMetrics* {.
desc: "Enable metrics logging."
defaultValue: false
name: "log-metrics" }: bool
# NOTE: Keys are different in nim-libp2p
proc parseCmdArg*(T: type crypto.PrivateKey, p: TaintedString): T =
try:
let key = SkPrivateKey.init(utils.fromHex(p)).tryGet()
# XXX: Here at the moment
result = crypto.PrivateKey(scheme: Secp256k1, skkey: key)
except CatchableError as e:
raise newException(ConfigurationError, "Invalid private key")
proc completeCmdArg*(T: type crypto.PrivateKey, val: TaintedString): seq[string] =
return @[]
proc parseCmdArg*(T: type ValidIpAddress, p: TaintedString): T =
try:
result = ValidIpAddress.init(p)
except CatchableError as e:
raise newException(ConfigurationError, "Invalid IP address")
proc completeCmdArg*(T: type ValidIpAddress, val: TaintedString): seq[string] =
return @[]
proc parseCmdArg*(T: type Port, p: TaintedString): T =
try:
result = Port(parseInt(p))
except CatchableError as e:
raise newException(ConfigurationError, "Invalid Port number")
proc completeCmdArg*(T: type Port, val: TaintedString): seq[string] =
return @[]
func defaultListenAddress*(conf: WakuNodeConf): ValidIpAddress =
# TODO: How should we select between IPv4 and IPv6
# Maybe there should be a config option for this.
(static ValidIpAddress.init("0.0.0.0"))

View File

@ -0,0 +1,54 @@
{.push raises: [Exception, Defect].}
import
std/[options,sequtils],
json_rpc/rpcserver,
libp2p/[peerinfo, switch],
../../waku_types,
../../protocol/waku_store/[waku_store_types, waku_store],
../../protocol/waku_swap/[waku_swap_types, waku_swap],
../../protocol/waku_filter,
../wakunode2,
./jsonrpc_types
proc constructMultiaddrStr*(peerInfo: PeerInfo): string =
# Constructs a multiaddress with both location address and p2p identity
$peerInfo.addrs[0] & "/p2p/" & $peerInfo.peerId
proc installAdminApiHandlers*(node: WakuNode, rpcsrv: RpcServer) =
## Admin API version 1 definitions
rpcsrv.rpc("get_waku_v2_admin_v1_peers") do() -> seq[WakuPeer]:
## Returns history for a list of content topics with optional paging
debug "get_waku_v2_admin_v1_peers"
# Create a single list of peers from mounted protocols.
# @TODO since the switch does not expose its connections, retrieving the connected peers requires a peer store/peer management
var wPeers: seq[WakuPeer] = @[]
if not node.wakuSwap.isNil:
# Map WakuSwap peers to WakuPeers and add to return list
wPeers.insert(node.wakuSwap.peers.mapIt(WakuPeer(multiaddr: constructMultiaddrStr(it.peerInfo),
protocol: WakuSwapCodec,
connected: node.switch.isConnected(it.peerInfo))),
wPeers.len) # Append to the end of the sequence
if not node.wakuFilter.isNil:
# Map WakuFilter peers to WakuPeers and add to return list
wPeers.insert(node.wakuFilter.peers.mapIt(WakuPeer(multiaddr: constructMultiaddrStr(it.peerInfo),
protocol: WakuFilterCodec,
connected: node.switch.isConnected(it.peerInfo))),
wPeers.len) # Append to the end of the sequence
if not node.wakuStore.isNil:
# Map WakuStore peers to WakuPeers and add to return list
wPeers.insert(node.wakuStore.peers.mapIt(WakuPeer(multiaddr: constructMultiaddrStr(it.peerInfo),
protocol: WakuStoreCodec,
connected: node.switch.isConnected(it.peerInfo))),
wPeers.len) # Append to the end of the sequence
# @TODO filter output on protocol/connected-status
return wPeers

View File

@ -0,0 +1,14 @@
import
json_rpc/rpcserver,
../../waku_types,
../wakunode2
proc installDebugApiHandlers*(node: WakuNode, rpcsrv: RpcServer) =
## Debug API version 1 definitions
rpcsrv.rpc("get_waku_v2_debug_v1_info") do() -> WakuInfo:
## Returns information about WakuNode
debug "get_waku_v2_debug_v1_info"
return node.info()

View File

@ -0,0 +1,94 @@
{.push raises: [Exception, Defect].}
import
std/[tables,sequtils],
json_rpc/rpcserver,
eth/[common, rlp, keys, p2p],
../../waku_types,
../wakunode2
const futTimeout* = 5.seconds # Max time to wait for futures
const maxCache* = 100 # Max number of messages cached per topic @TODO make this configurable
type
MessageCache* = Table[ContentTopic, seq[WakuMessage]]
proc installFilterApiHandlers*(node: WakuNode, rpcsrv: RpcServer) =
## Create a message cache indexed on content topic
## @TODO consider moving message cache elsewhere. Perhaps to node?
var
messageCache: MessageCache
proc filterHandler(msg: WakuMessage) {.gcsafe, closure.} =
# Add message to current cache
trace "WakuMessage received", msg=msg
# Make a copy of msgs for this topic to modify
var msgs = messageCache.getOrDefault(msg.contentTopic, @[])
if msgs.len >= maxCache:
# Message cache on this topic exceeds maximum. Delete oldest.
# @TODO this may become a bottle neck if called as the norm rather than exception when adding messages. Performance profile needed.
msgs.delete(0,0)
msgs.add(msg)
# Replace indexed entry with copy
# @TODO max number of content topics could be limited in node
messageCache[msg.contentTopic] = msgs
## Filter API version 1 definitions
rpcsrv.rpc("get_waku_v2_filter_v1_messages") do(contentTopic: ContentTopic) -> seq[WakuMessage]:
## Returns all WakuMessages received on a content topic since the
## last time this method was called
## @TODO ability to specify a return message limit
debug "get_waku_v2_filter_v1_messages", contentTopic=contentTopic
if messageCache.hasKey(contentTopic):
let msgs = messageCache[contentTopic]
# Clear cache before next call
messageCache[contentTopic] = @[]
return msgs
else:
# Not subscribed to this content topic
raise newException(ValueError, "Not subscribed to content topic: " & $contentTopic)
rpcsrv.rpc("post_waku_v2_filter_v1_subscription") do(contentFilters: seq[ContentFilter], topic: Option[string]) -> bool:
## Subscribes a node to a list of content filters
debug "post_waku_v2_filter_v1_subscription"
# Construct a filter request
# @TODO use default PubSub topic if undefined
let fReq = if topic.isSome: FilterRequest(topic: topic.get, contentFilters: contentFilters, subscribe: true) else: FilterRequest(contentFilters: contentFilters, subscribe: true)
if (await node.subscribe(fReq, filterHandler).withTimeout(futTimeout)):
# Successfully subscribed to all content filters
for cTopic in concat(contentFilters.mapIt(it.topics)):
# Create message cache for each subscribed content topic
messageCache[cTopic] = @[]
return true
else:
# Failed to subscribe to one or more content filters
raise newException(ValueError, "Failed to subscribe to contentFilters " & repr(fReq))
rpcsrv.rpc("delete_waku_v2_filter_v1_subscription") do(contentFilters: seq[ContentFilter], topic: Option[string]) -> bool:
## Unsubscribes a node from a list of content filters
debug "delete_waku_v2_filter_v1_subscription"
# Construct a filter request
# @TODO consider using default PubSub topic if undefined
let fReq = if topic.isSome: FilterRequest(topic: topic.get, contentFilters: contentFilters, subscribe: false) else: FilterRequest(contentFilters: contentFilters, subscribe: false)
if (await node.unsubscribe(fReq).withTimeout(futTimeout)):
# Successfully unsubscribed from all content filters
for cTopic in concat(contentFilters.mapIt(it.topics)):
# Remove message cache for each unsubscribed content topic
messageCache.del(cTopic)
return true
else:
# Failed to unsubscribe from one or more content filters
raise newException(ValueError, "Failed to unsubscribe from contentFilters " & repr(fReq))

View File

@ -0,0 +1,24 @@
# Admin API
proc get_waku_v2_admin_v1_peers(): seq[WakuPeer]
# Debug API
proc get_waku_v2_debug_v1_info(): WakuInfo
# Relay API
proc post_waku_v2_relay_v1_message(topic: string, message: WakuRelayMessage): bool
proc get_waku_v2_relay_v1_messages(topic: string): seq[WakuMessage]
proc post_waku_v2_relay_v1_subscriptions(topics: seq[string]): bool
proc delete_waku_v2_relay_v1_subscriptions(topics: seq[string]): bool
# Store API
proc get_waku_v2_store_v1_messages(topics: seq[ContentTopic], pagingOptions: Option[StorePagingOptions]): StoreResponse
# Filter API
proc get_waku_v2_filter_v1_messages(contentTopic: ContentTopic): seq[WakuMessage]
proc post_waku_v2_filter_v1_subscription(contentFilters: seq[ContentFilter], topic: Option[string]): bool
proc delete_waku_v2_filter_v1_subscription(contentFilters: seq[ContentFilter], topic: Option[string]): bool

View File

@ -0,0 +1,23 @@
import
../../waku_types,
std/options
type
StoreResponse* = object
messages*: seq[WakuMessage]
pagingOptions*: Option[StorePagingOptions]
StorePagingOptions* = object
## This type holds some options for pagination
pageSize*: uint64
cursor*: Option[Index]
forward*: bool
WakuRelayMessage* = object
payload*: seq[byte]
contentTopic*: Option[ContentTopic]
WakuPeer* = object
multiaddr*: string
protocol*: string
connected*: bool

View File

@ -0,0 +1,31 @@
import
std/options,
../../waku_types,
../../protocol/waku_store/waku_store_types,
../wakunode2,
./jsonrpc_types
## Conversion tools
## Since the Waku v2 JSON-RPC API has its own defined types,
## we need to convert between these and the types for the Nim API
proc toPagingInfo*(pagingOptions: StorePagingOptions): PagingInfo =
PagingInfo(pageSize: pagingOptions.pageSize,
cursor: if pagingOptions.cursor.isSome: pagingOptions.cursor.get else: Index(),
direction: if pagingOptions.forward: PagingDirection.FORWARD else: PagingDirection.BACKWARD)
proc toPagingOptions*(pagingInfo: PagingInfo): StorePagingOptions =
StorePagingOptions(pageSize: pagingInfo.pageSize,
cursor: some(pagingInfo.cursor),
forward: if pagingInfo.direction == PagingDirection.FORWARD: true else: false)
proc toStoreResponse*(historyResponse: HistoryResponse): StoreResponse =
StoreResponse(messages: historyResponse.messages,
pagingOptions: if historyResponse.pagingInfo != PagingInfo(): some(historyResponse.pagingInfo.toPagingOptions()) else: none(StorePagingOptions))
proc toWakuMessage*(relayMessage: WakuRelayMessage, version: uint32): WakuMessage =
# @TODO global definition for default content topic
const defaultCT = 0
WakuMessage(payload: relayMessage.payload,
contentTopic: if relayMessage.contentTopic.isSome: relayMessage.contentTopic.get else: defaultCT,
version: version)

View File

@ -0,0 +1,118 @@
{.push raises: [Exception, Defect].}
import
std/[tables,sequtils],
json_rpc/rpcserver,
libp2p/protocols/pubsub/pubsub,
eth/[common, rlp, keys, p2p],
../../waku_types,
../wakunode2,
./jsonrpc_types, ./jsonrpc_utils
const futTimeout* = 5.seconds # Max time to wait for futures
const maxCache* = 100 # Max number of messages cached per topic @TODO make this configurable
type
TopicCache* = Table[string, seq[WakuMessage]]
proc installRelayApiHandlers*(node: WakuNode, rpcsrv: RpcServer) =
## Create a per-topic message cache
var
topicCache: TopicCache
proc topicHandler(topic: string, data: seq[byte]) {.async.} =
trace "Topic handler triggered"
let msg = WakuMessage.init(data)
if msg.isOk():
# Add message to current cache
trace "WakuMessage received", msg=msg, topic=topic
# Make a copy of msgs for this topic to modify
var msgs = topicCache.getOrDefault(topic, @[])
if msgs.len >= maxCache:
# Message cache on this topic exceeds maximum. Delete oldest.
# @TODO this may become a bottle neck if called as the norm rather than exception when adding messages. Performance profile needed.
msgs.delete(0,0)
msgs.add(msg[])
# Replace indexed entry with copy
# @TODO max number of topics could be limited in node
topicCache[topic] = msgs
else:
debug "WakuMessage received but failed to decode", msg=msg, topic=topic
# @TODO handle message decode failure
## Relay API version 1 definitions
rpcsrv.rpc("post_waku_v2_relay_v1_message") do(topic: string, message: WakuRelayMessage) -> bool:
## Publishes a WakuMessage to a PubSub topic
debug "post_waku_v2_relay_v1_message"
if (await node.publish(topic, message.toWakuMessage(version = 0)).withTimeout(futTimeout)):
# Successfully published message
return true
else:
# Failed to publish message to topic
raise newException(ValueError, "Failed to publish to topic " & topic)
rpcsrv.rpc("get_waku_v2_relay_v1_messages") do(topic: string) -> seq[WakuMessage]:
## Returns all WakuMessages received on a PubSub topic since the
## last time this method was called
## @TODO ability to specify a return message limit
debug "get_waku_v2_relay_v1_messages", topic=topic
if topicCache.hasKey(topic):
let msgs = topicCache[topic]
# Clear cache before next call
topicCache[topic] = @[]
return msgs
else:
# Not subscribed to this topic
raise newException(ValueError, "Not subscribed to topic: " & topic)
rpcsrv.rpc("post_waku_v2_relay_v1_subscriptions") do(topics: seq[string]) -> bool:
## Subscribes a node to a list of PubSub topics
debug "post_waku_v2_relay_v1_subscriptions"
var failedTopics: seq[string]
# Subscribe to all requested topics
for topic in topics:
if not(await node.subscribe(topic, topicHandler).withTimeout(futTimeout)):
# If any topic fails to subscribe, add to list of failedTopics
failedTopics.add(topic)
else:
# Create message cache for this topic
debug "MessageCache for topic", topic=topic
topicCache[topic] = @[]
if (failedTopics.len() == 0):
# Successfully subscribed to all requested topics
return true
else:
# Failed to subscribe to one or more topics
raise newException(ValueError, "Failed to subscribe to topics " & repr(failedTopics))
rpcsrv.rpc("delete_waku_v2_relay_v1_subscriptions") do(topics: seq[string]) -> bool:
## Unsubscribes a node from a list of PubSub topics
debug "delete_waku_v2_relay_v1_subscriptions"
var failedTopics: seq[string]
# Unsubscribe all handlers from requested topics
for topic in topics:
if not(await node.unsubscribeAll(topic).withTimeout(futTimeout)):
# If any topic fails to unsubscribe, add to list of failedTopics
failedTopics.add(topic)
else:
# Remove message cache for topic
topicCache.del(topic)
if (failedTopics.len() == 0):
# Successfully unsubscribed from all requested topics
return true
else:
# Failed to unsubscribe from one or more topics
raise newException(ValueError, "Failed to unsubscribe from topics " & repr(failedTopics))

View File

@ -0,0 +1,36 @@
{.push raises: [Exception, Defect].}
import
std/options,
json_rpc/rpcserver,
../../waku_types,
../../protocol/waku_store/waku_store_types,
../wakunode2,
./jsonrpc_types, ./jsonrpc_utils
proc installStoreApiHandlers*(node: WakuNode, rpcsrv: RpcServer) =
const futTimeout = 5.seconds
## Store API version 1 definitions
rpcsrv.rpc("get_waku_v2_store_v1_messages") do(topics: seq[ContentTopic], pagingOptions: Option[StorePagingOptions]) -> StoreResponse:
## Returns history for a list of content topics with optional paging
debug "get_waku_v2_store_v1_messages"
var responseFut = newFuture[StoreResponse]()
proc queryFuncHandler(response: HistoryResponse) {.gcsafe, closure.} =
debug "get_waku_v2_store_v1_messages response"
responseFut.complete(response.toStoreResponse())
let historyQuery = HistoryQuery(topics: topics,
pagingInfo: if pagingOptions.isSome: pagingOptions.get.toPagingInfo() else: PagingInfo())
await node.query(historyQuery, queryFuncHandler)
if (await responseFut.withTimeout(futTimeout)):
# Future completed
return responseFut.read()
else:
# Future failed to complete
raise newException(ValueError, "No history response received")

View File

@ -0,0 +1,99 @@
import
sqlite3_abi,
chronos, metrics, stew/results,
libp2p/crypto/crypto,
libp2p/protocols/protocol,
libp2p/protobuf/minprotobuf,
libp2p/stream/connection,
stew/results, metrics,
../waku_types,
./sqlite
# The code in this file is an adaptation of the Sqlite KV Store found in nim-eth.
# https://github.com/status-im/nim-eth/blob/master/eth/db/kvstore_sqlite3.nim
#
# Most of it is a direct copy, the only unique functions being `get` and `put`.
type
DataProc* = proc(timestamp: uint64, msg: WakuMessage) {.closure.}
proc init*(T: type MessageStore, db: SqliteDatabase): MessageStoreResult[T] =
## Table is the SQL query for creating the messages Table.
## It contains:
## - 4-Byte ContentTopic stored as an Integer
## - Payload stored as a blob
let prepare = db.prepareStmt("""
CREATE TABLE IF NOT EXISTS messages (
id BLOB PRIMARY KEY,
timestamp INTEGER NOT NULL,
contentTopic INTEGER NOT NULL,
payload BLOB
) WITHOUT ROWID;
""", NoParams, void)
if prepare.isErr:
return err("failed to prepare")
let res = prepare.value.exec(())
if res.isErr:
return err("failed to exec")
ok(MessageStore(database: db))
proc put*(db: MessageStore, cursor: Index, message: WakuMessage): MessageStoreResult[void] =
## Adds a message to the storage.
##
## **Example:**
##
## .. code-block::
## let res = db.put(message)
## if res.isErr:
## echo "error"
##
let prepare = db.database.prepareStmt(
"INSERT INTO messages (id, timestamp, contentTopic, payload) VALUES (?, ?, ?, ?);",
(seq[byte], int64, uint32, seq[byte]),
void
)
if prepare.isErr:
return err("failed to prepare")
let res = prepare.value.exec((@(cursor.digest.data), int64(cursor.receivedTime), message.contentTopic, message.payload))
if res.isErr:
return err("failed")
ok()
proc getAll*(db: MessageStore, onData: DataProc): MessageStoreResult[bool] =
## Retreives all messages from the storage.
##
## **Example:**
##
## .. code-block::
## proc data(timestamp: uint64, msg: WakuMessage) =
## echo cast[string](msg.payload)
##
## let res = db.get(data)
## if res.isErr:
## echo "error"
var gotMessages = false
proc msg(s: ptr sqlite3_stmt) =
gotMessages = true
let
timestamp = sqlite3_column_int64(s, 0)
topic = sqlite3_column_int(s, 1)
p = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, 2))
l = sqlite3_column_bytes(s, 2)
onData(uint64(timestamp), WakuMessage(contentTopic: ContentTopic(int(topic)), payload: @(toOpenArray(p, 0, l-1))))
let res = db.database.query("SELECT timestamp, contentTopic, payload FROM messages", msg)
if res.isErr:
return err("failed")
ok gotMessages
proc close*(db: MessageStore) =
## Closes the database.
db.database.close()

5
waku/v2/node/nim.cfg Normal file
View File

@ -0,0 +1,5 @@
-d:chronicles_line_numbers
-d:"chronicles_runtime_filtering=on"
-d:nimDebugDlOpen
# Results in empty output for some reason
#-d:"chronicles_enabled_topics=GossipSub:TRACE,WakuRelay:TRACE"

140
waku/v2/node/quicksim2.nim Normal file
View File

@ -0,0 +1,140 @@
import
os, strutils, chronicles, json_rpc/[rpcclient, rpcserver],
libp2p/protobuf/minprotobuf,
eth/common as eth_common, eth/keys,
options
#options as what # TODO: Huh? Redefinition?
from strutils import rsplit
template sourceDir: string = currentSourcePath.rsplit(DirSep, 1)[0]
const sigWakuPath = sourceDir / "rpc" / "wakucallsigs.nim"
createRpcSigs(RpcHttpClient, sigWakuPath)
const defaultTopic = "/waku/2/default-waku/proto"
const topicAmount = 10 #100
proc message(i: int): ProtoBuffer =
let value = "hello " & $(i)
var result = initProtoBuffer()
result.write(initProtoField(1, value))
result.finish()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
debug "Hit handler", topic=topic, data=data
# Scenario xx1 - 16 full nodes
#########################################
let amount = 16
var nodes: seq[RPCHttpClient]
for i in 0..<amount:
var node = newRpcHttpClient()
nodes.add(node)
waitFor nodes[i].connect("localhost", Port(8547+i))
var res = waitFor nodes[i].wakuSubscribe(defaultTopic)
os.sleep(2000)
# # TODO: Show plaintext message in log
# for i in 0..<topicAmount:
# os.sleep(50)
# # TODO: This would then publish on a subtopic here
# var s = "hello " & $2
# var res3 = waitFor nodes[0].wakuPublish(defaultTopic, s)
# Scenario xx3 - same as xx1 but publish from multiple nodes
# To compare FloodSub and GossipSub factor
for i in 0..<topicAmount:
os.sleep(50)
# TODO: This would then publish on a subtopic here
var res3 = waitFor nodes[0].wakuPublish(defaultTopic, message(0).buffer)
res3 = waitFor nodes[1].wakuPublish(defaultTopic, message(1).buffer)
res3 = waitFor nodes[2].wakuPublish(defaultTopic, message(2).buffer)
res3 = waitFor nodes[3].wakuPublish(defaultTopic, message(3).buffer)
res3 = waitFor nodes[4].wakuPublish(defaultTopic, message(4).buffer)
# Scenario xx2 - 14 full nodes, two edge nodes
# Assume one full topic
#########################################
#let nodea = newRpcHttpClient()
#let nodeb = newRpcHttpClient()
#
#waitFor nodea.connect("localhost", Port(8545))
#waitFor nodeb.connect("localhost", Port(8546))
#
#let version = waitFor nodea.wakuVersion()
#info "Version is", version
#
#let res1 = waitFor nodea.wakuSubscribe(defaultTopic)
#let res2 = waitFor nodeb.wakuSubscribe(defaultTopic)
#
#let amount = 14
#var nodes: seq[RPCHttpClient]
#for i in 0..<amount:
# var node = newRpcHttpClient()
# nodes.add(node)
# waitFor nodes[i].connect("localhost", Port(8547+i))
# var res = waitFor nodes[i].wakuSubscribe(defaultTopic)
#
#os.sleep(2000)
#
## TODO: Show plaintext message in log
#for i in 0..<topicAmount:
# os.sleep(50)
# # TODO: This would then publish on a subtopic here
# var s = "hello " & $2
# var res3 = waitFor nodea.wakuPublish(defaultTopic, s)
# Misc old scenarios
#########################################
# All full nodes connected etc
#
# let node1 = newRpcHttpClient()
# let node2 = newRpcHttpClient()
# let node3 = newRpcHttpClient()
# let node4 = newRpcHttpClient()
# let node5 = newRpcHttpClient()
# let node6 = newRpcHttpClient()
# waitFor node1.connect("localhost", Port(8547))
# waitFor node2.connect("localhost", Port(8548))
# waitFor node3.connect("localhost", Port(8549))
# waitFor node4.connect("localhost", Port(8550))
# waitFor node5.connect("localhost", Port(8551))
# waitFor node6.connect("localhost", Port(8552))
# let version = waitFor node6.wakuVersion()
# info "Version is", version
# # TODO: Implement handler logic
# # All subscribing to foobar topic
# let res2 = waitFor node2.wakuSubscribe("foobar")
# let res3 = waitFor node3.wakuSubscribe("foobar")
# let res4 = waitFor node4.wakuSubscribe("foobar")
# let res5 = waitFor node5.wakuSubscribe("foobar")
# let res6 = waitFor node6.wakuSubscribe("foobar")
# os.sleep(2000)
# # info "Posting envelopes on all subscribed topics"
# for i in 0..<topicAmount:
# os.sleep(50)
# let res2 = waitFor node1.wakuPublish("foobar", "hello world")
# os.sleep(2000)
# for i in 0..<topicAmount:
# os.sleep(50)
# let res2 = waitFor node1.wakuPublish("foobar", "hello world2")
# Node 00 and 05 also subscribe
# XXX I confirm this works. As in - with this we have A-B
# Now to tweak it!
# let node0 = newRpcHttpClient()
# let node5 = newRpcHttpClient()
# waitFor node0.connect("localhost", Port(8547))
# waitFor node5.connect("localhost", Port(8552))
# let res4 = waitFor node0.wakuSubscribe("foobar")
# let res5 = waitFor node5.wakuSubscribe("foobar")

View File

@ -0,0 +1,19 @@
import
os, strutils, strformat, chronicles, json_rpc/[rpcclient, rpcserver], nimcrypto/sysrand,
libp2p/protobuf/minprotobuf,
libp2p/[peerinfo, multiaddress],
eth/common as eth_common, eth/keys,
system,
options
from strutils import rsplit
template sourceDir: string = currentSourcePath.rsplit(DirSep, 1)[0]
const sigWakuPath = sourceDir / "wakucallsigs.nim"
createRpcSigs(RpcHttpClient, sigWakuPath)
var node = newRpcHttpClient()
waitfor node.connect("localhost", Port(8545))
var res = waitfor node.wakuInfo()
echo "Waku info res: ", res

View File

@ -0,0 +1,36 @@
import
os, strutils, strformat, chronicles, json_rpc/[rpcclient, rpcserver], nimcrypto/sysrand,
stew/byteutils,
libp2p/protobuf/minprotobuf,
eth/common as eth_common, eth/keys,
system,
options,
../waku_types
from strutils import rsplit
template sourceDir: string = currentSourcePath.rsplit(DirSep, 1)[0]
const sigWakuPath = sourceDir / "wakucallsigs.nim"
createRpcSigs(RpcHttpClient, sigWakuPath)
if paramCount() < 1:
echo "Please provide rpcPort as argument."
quit(1)
let rpcPort = Port(parseInt(paramStr(1)))
echo "Please enter your message:"
let raw_input = readLine(stdin)
let input = fmt"{raw_input}"
echo "Input is:", input
var node = newRpcHttpClient()
waitfor node.connect("localhost", rpcPort)
let pubSubTopic = "/waku/2/default-waku/proto"
let contentTopic = "foobar"
var wakuMessage = WakuMessage(payload: input.toBytes(), contentTopic: contentTopic)
# XXX This should be WakuMessage type, but need to setup JSON-RPC mapping for that to work
var raw_bytes = wakuMessage.encode().buffer
var res = waitfor node.wakuPublish2(pubSubTopic, raw_bytes)
echo "Waku publish response: ", res

View File

@ -0,0 +1,30 @@
import
os, strutils, strformat, chronicles, json_rpc/[rpcclient, rpcserver], nimcrypto/sysrand,
libp2p/protobuf/minprotobuf,
libp2p/[peerinfo, multiaddress],
eth/common as eth_common, eth/keys,
system,
options
from strutils import rsplit
template sourceDir: string = currentSourcePath.rsplit(DirSep, 1)[0]
const sigWakuPath = sourceDir / "wakucallsigs.nim"
createRpcSigs(RpcHttpClient, sigWakuPath)
if paramCount() < 1:
echo "Please provide rpcPort as argument."
quit(1)
let rpcPort = Port(parseInt(paramStr(1)))
echo "Please enter your topic:"
let raw_input = readLine(stdin)
let input = fmt"{raw_input}"
echo "Input is:", input
var node = newRpcHttpClient()
waitfor node.connect("localhost", rpcPort)
var res = waitfor node.wakuQuery(@[input])
echo "Waku query response: ", res

View File

@ -0,0 +1,27 @@
import
os, strutils, strformat, chronicles, json_rpc/[rpcclient, rpcserver], nimcrypto/sysrand,
libp2p/protobuf/minprotobuf,
eth/common as eth_common, eth/keys,
system,
options
from strutils import rsplit
template sourceDir: string = currentSourcePath.rsplit(DirSep, 1)[0]
const sigWakuPath = sourceDir / "wakucallsigs.nim"
createRpcSigs(RpcHttpClient, sigWakuPath)
if paramCount() < 1:
echo "Please provide rpcPort as argument."
quit(1)
let rpcPort = Port(parseInt(paramStr(1)))
var client = newRpcHttpClient()
waitfor client.connect("localhost", rpcPort)
echo "Subscribing"
# Subscribe to waku topic
var res = waitFor client.wakuSubscribe("/waku/2/default-waku/proto")
echo res

View File

@ -0,0 +1,32 @@
import
os, strutils, strformat, chronicles, json_rpc/[rpcclient, rpcserver], nimcrypto/sysrand,
libp2p/protobuf/minprotobuf,
libp2p/[peerinfo, multiaddress],
eth/common as eth_common, eth/keys,
system,
options
from strutils import rsplit
template sourceDir: string = currentSourcePath.rsplit(DirSep, 1)[0]
const sigWakuPath = sourceDir / "wakucallsigs.nim"
createRpcSigs(RpcHttpClient, sigWakuPath)
if paramCount() < 1:
echo "Please provide rpcPort as argument."
quit(1)
let rpcPort = Port(parseInt(paramStr(1)))
echo "Please enter your topic:"
let raw_input = readLine(stdin)
let input = fmt"{raw_input}"
echo "Input is:", input
var node = newRpcHttpClient()
waitfor node.connect("localhost", rpcPort)
let pubSubTopic = "/waku/2/default-waku/proto"
let contentTopic = "foobar"
var res = waitfor node.wakuSubscribeFilter(pubSubTopic, @[@[contentTopic]])
echo "Waku query response: ", res

View File

@ -0,0 +1,41 @@
# Alpha - Currently implemented in v2
proc waku_version(): string
# TODO Deprecate old waku_publish, requires adjust simulation code etc
proc waku_publish(topic: string, message: seq[byte]): bool
# TODO This should be properly done with rpc types, etc.
proc waku_publish2(topic: string, message: seq[byte]): bool
proc waku_subscribe(topic: string): bool
proc waku_query(topics: seq[string]): bool
proc waku_subscribe_filter(topic: string, contentFilters: seq[seq[string]]): bool
#proc waku_subscribe(topic: string, handler: Topichandler): bool
#
# TODO turn into WakuInfo object
proc waku_info(): string
# NYI
#proc waku_info(): WakuInfo
#proc waku_setMaxMessageSize(size: uint64): bool
#proc waku_setMinPoW(pow: float): bool
#proc waku_markTrustedPeer(enode: string): bool
#
#proc waku_newKeyPair(): Identifier
#proc waku_addPrivateKey(key: string): Identifier
#proc waku_deleteKeyPair(id: Identifier): bool
#proc waku_hasKeyPair(id: Identifier): bool
#proc waku_getPublicKey(id: Identifier): PublicKey
#proc waku_getPrivateKey(id: Identifier): PrivateKey
#
#proc waku_newSymKey(): Identifier
#proc waku_addSymKey(key: string): Identifier
#proc waku_generateSymKeyFromPassword(password: string): Identifier
#proc waku_hasSymKey(id: Identifier): bool
#proc waku_getSymKey(id: Identifier): SymKey
#proc waku_deleteSymKey(id: Identifier): bool
#
#proc waku_newMessageFilter(options: WakuFilterOptions): Identifier
#proc waku_deleteMessageFilter(id: Identifier): bool
#proc waku_getFilterMessages(id: Identifier): seq[WakuFilterMessage]
##proc waku_post(message: WakuPostMessage): bool
#
#proc wakusim_generateTraffic(amount: int): bool
#proc wakusim_generateRandomTraffic(amount: int): bool

View File

@ -0,0 +1,100 @@
import
std/options,
json_rpc/rpcserver,
nimcrypto/[sysrand, hmac, sha2],
eth/[common, rlp, keys, p2p],
../../protocol/waku_relay,
../../waku_types,
../../protocol/waku_store/waku_store,
../wakunode2
proc setupWakuRPC*(node: WakuNode, rpcsrv: RpcServer) =
rpcsrv.rpc("waku_version") do() -> string:
## Returns string of the current Waku protocol version.
result = WakuRelayCodec
# TODO: Implement symkey etc logic
rpcsrv.rpc("waku_publish") do(topic: string, payload: seq[byte]) -> bool:
let wakuRelay = node.wakuRelay
# XXX also future return type
# TODO: Shouldn't we really be doing WakuNode publish here?
debug "waku_publish", topic=topic, payload=payload
discard wakuRelay.publish(topic, payload)
return true
#if not result:
# raise newException(ValueError, "Message could not be posted")
rpcsrv.rpc("waku_publish2") do(topic: string, payload: seq[byte]) -> bool:
let msg = WakuMessage.init(payload)
if msg.isOk():
debug "waku_publish", msg=msg
else:
warn "waku_publish decode error", msg=msg
debug "waku_publish", topic=topic, payload=payload, msg=msg[]
await node.publish(topic, msg[])
return true
#if not result:
# raise newException(ValueError, "Message could not be posted")
# TODO: Handler / Identifier logic
rpcsrv.rpc("waku_subscribe") do(topic: string) -> bool:
debug "waku_subscribe", topic=topic
# XXX: Hacky in-line handler
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
let msg = WakuMessage.init(data)
if msg.isOk():
debug "waku_subscribe handler", msg=msg
var readable_str = cast[string](msg[].payload)
info "Hit subscribe handler", topic=topic, msg=msg[], payload=readable_str
else:
warn "waku_subscribe decode error", msg=msg
info "waku_subscribe raw data string", str=cast[string](data)
# XXX: Can we make this context async to use await?
discard node.subscribe(topic, handler)
return true
#if not result:
# raise newException(ValueError, "Message could not be posted")
rpcsrv.rpc("waku_query") do(topics: seq[int]) -> bool:
debug "waku_query"
# XXX: Hacky in-line handler
proc handler(response: HistoryResponse) {.gcsafe.} =
info "Hit response handler", messages=response.messages
var contentTopics = newSeq[ContentTopic]()
for topic in topics:
contentTopics.add(ContentTopic(topic))
await node.query(HistoryQuery(topics: contentTopics), handler)
return true
rpcsrv.rpc("waku_subscribe_filter") do(topic: string, contentFilters: seq[seq[int]]) -> bool:
debug "waku_subscribe_filter"
# XXX: Hacky in-line handler
proc handler(msg: WakuMessage) {.gcsafe, closure.} =
info "Hit subscribe response", message=msg
var filters = newSeq[ContentFilter]()
for topics in contentFilters:
var contentTopics = newSeq[ContentTopic]()
for topic in topics:
contentTopics.add(ContentTopic(topic))
filters.add(ContentFilter(topics: contentTopics))
await node.subscribe(FilterRequest(topic: topic, contentFilters: filters, subscribe: true), handler)
return true
rpcsrv.rpc("waku_info") do() -> string:
debug "waku_node_info"
let wakuInfo = node.info()
let listenStr = wakuInfo.listenStr
info "Listening on", full = listenStr
return listenStr

207
waku/v2/node/sqlite.nim Normal file
View File

@ -0,0 +1,207 @@
import
os,
sqlite3_abi,
chronos, chronicles, metrics, stew/results,
libp2p/crypto/crypto,
libp2p/protocols/protocol,
libp2p/protobuf/minprotobuf,
libp2p/stream/connection,
stew/results, metrics
{.push raises: [Defect].}
# The code in this file is an adaptation of the Sqlite KV Store found in nim-eth.
# https://github.com/status-im/nim-eth/blob/master/eth/db/kvstore_sqlite3.nim
#
# Most of it is a direct copy, the only unique functions being `get` and `put`.
type
DatabaseResult*[T] = Result[T, string]
Sqlite = ptr sqlite3
NoParams* = tuple
RawStmtPtr = ptr sqlite3_stmt
SqliteStmt*[Params; Result] = distinct RawStmtPtr
AutoDisposed[T: ptr|ref] = object
val: T
SqliteDatabase* = ref object of RootObj
env*: Sqlite
template dispose(db: Sqlite) =
discard sqlite3_close(db)
template dispose(db: RawStmtPtr) =
discard sqlite3_finalize(db)
proc release[T](x: var AutoDisposed[T]): T =
result = x.val
x.val = nil
proc disposeIfUnreleased[T](x: var AutoDisposed[T]) =
mixin dispose
if x.val != nil:
dispose(x.release)
template checkErr*(op, cleanup: untyped) =
if (let v = (op); v != SQLITE_OK):
cleanup
return err($sqlite3_errstr(v))
template checkErr*(op) =
checkErr(op): discard
proc init*(
T: type SqliteDatabase,
basePath: string,
name: string = "store",
readOnly = false,
inMemory = false): DatabaseResult[T] =
var env: AutoDisposed[ptr sqlite3]
defer: disposeIfUnreleased(env)
let
name =
if inMemory: ":memory:"
else: basepath / name & ".sqlite3"
flags =
if readOnly: SQLITE_OPEN_READONLY
else: SQLITE_OPEN_READWRITE or SQLITE_OPEN_CREATE
if not inMemory:
try:
createDir(basePath)
except OSError, IOError:
return err("`sqlite: cannot create database directory")
checkErr sqlite3_open_v2(name, addr env.val, flags.cint, nil)
template prepare(q: string, cleanup: untyped): ptr sqlite3_stmt =
var s: ptr sqlite3_stmt
checkErr sqlite3_prepare_v2(env.val, q, q.len.cint, addr s, nil):
cleanup
s
template checkExec(s: ptr sqlite3_stmt) =
if (let x = sqlite3_step(s); x != SQLITE_DONE):
discard sqlite3_finalize(s)
return err($sqlite3_errstr(x))
if (let x = sqlite3_finalize(s); x != SQLITE_OK):
return err($sqlite3_errstr(x))
template checkExec(q: string) =
let s = prepare(q): discard
checkExec(s)
template checkWalPragmaResult(journalModePragma: ptr sqlite3_stmt) =
if (let x = sqlite3_step(journalModePragma); x != SQLITE_ROW):
discard sqlite3_finalize(journalModePragma)
return err($sqlite3_errstr(x))
if (let x = sqlite3_column_type(journalModePragma, 0); x != SQLITE3_TEXT):
discard sqlite3_finalize(journalModePragma)
return err($sqlite3_errstr(x))
if (let x = sqlite3_column_text(journalModePragma, 0);
x != "memory" and x != "wal"):
discard sqlite3_finalize(journalModePragma)
return err("Invalid pragma result: " & $x)
# TODO: check current version and implement schema versioning
checkExec "PRAGMA user_version = 1;"
let journalModePragma = prepare("PRAGMA journal_mode = WAL;"): discard
checkWalPragmaResult(journalModePragma)
checkExec(journalModePragma)
ok(SqliteDatabase(
env: env.release
))
template prepare*(env: Sqlite, q: string, cleanup: untyped): ptr sqlite3_stmt =
var s: ptr sqlite3_stmt
checkErr sqlite3_prepare_v2(env, q, q.len.cint, addr s, nil):
cleanup
s
proc bindParam*(s: RawStmtPtr, n: int, val: auto): cint =
when val is openarray[byte]|seq[byte]:
if val.len > 0:
sqlite3_bind_blob(s, n.cint, unsafeAddr val[0], val.len.cint, nil)
else:
sqlite3_bind_blob(s, n.cint, nil, 0.cint, nil)
elif val is int32:
sqlite3_bind_int(s, n.cint, val)
elif val is uint32:
sqlite3_bind_int(s, int(n).cint, int(val).cint)
elif val is int64:
sqlite3_bind_int64(s, n.cint, val)
else:
{.fatal: "Please add support for the 'kek' type".}
template bindParams(s: RawStmtPtr, params: auto) =
when params is tuple:
var i = 1
for param in fields(params):
checkErr bindParam(s, i, param)
inc i
else:
checkErr bindParam(s, 1, params)
proc exec*[P](s: SqliteStmt[P, void], params: P): DatabaseResult[void] =
let s = RawStmtPtr s
bindParams(s, params)
let res =
if (let v = sqlite3_step(s); v != SQLITE_DONE):
err($sqlite3_errstr(v))
else:
ok()
# release implict transaction
discard sqlite3_reset(s) # same return information as step
discard sqlite3_clear_bindings(s) # no errors possible
res
type
DataProc* = proc(s: ptr sqlite3_stmt) {.closure.}
proc query*(db: SqliteDatabase, query: string, onData: DataProc): DatabaseResult[bool] =
var s = prepare(db.env, query): discard
try:
var gotResults = false
while true:
let v = sqlite3_step(s)
case v
of SQLITE_ROW:
onData(s)
gotResults = true
of SQLITE_DONE:
break
else:
return err($sqlite3_errstr(v))
return ok gotResults
finally:
# release implicit transaction
discard sqlite3_reset(s) # same return information as step
discard sqlite3_clear_bindings(s) # no errors possible
proc prepareStmt*(
db: SqliteDatabase,
stmt: string,
Params: type,
Res: type
): DatabaseResult[SqliteStmt[Params, Res]] =
var s: RawStmtPtr
checkErr sqlite3_prepare_v2(db.env, stmt, stmt.len.cint, addr s, nil)
ok SqliteStmt[Params, Res](s)
proc close*(db: SqliteDatabase) =
discard sqlite3_close(db.env)
db[] = SqliteDatabase()[]

View File

@ -0,0 +1,187 @@
import
strformat, os, osproc, net, strformat, chronicles, json,
libp2p/multiaddress,
libp2p/crypto/crypto,
libp2p/crypto/secp,
libp2p/peerinfo
# Fix ambiguous call error
import strutils except fromHex
const
defaults ="--log-level:TRACE --log-metrics --metrics-server --rpc"
wakuNodeBin = "build" / "wakunode2"
metricsDir = "metrics"
portOffset = 2
type
NodeInfo* = object
cmd: string
master: bool
address: string
shift: int
label: string
Topology = enum
Star,
FullMesh
# NOTE: Don't distinguish between node types here a la full node, light node etc
proc initNodeCmd(shift: int, staticNodes: seq[string] = @[], master = false, label: string): NodeInfo =
let
rng = crypto.newRng()
key = SkPrivateKey.random(rng[])
hkey = key.getBytes().toHex()
rkey = SkPrivateKey.init(fromHex(hkey))[] #assumes ok
privKey = PrivateKey(scheme: Secp256k1, skkey: rkey)
#privKey = PrivateKey.random(Secp256k1)
pubkey = privKey.getKey()[] #assumes ok
keys = KeyPair(seckey: privKey, pubkey: pubkey)
peerInfo = PeerInfo.init(privKey)
port = 60000 + shift
#DefaultAddr = "/ip4/127.0.0.1/tcp/55505"
address = "/ip4/127.0.0.1/tcp/" & $port
hostAddress = MultiAddress.init(address).tryGet()
info "Address", address
# TODO: Need to port shift
peerInfo.addrs.add(hostAddress)
let id = $peerInfo.peerId
info "PeerInfo", id = id, addrs = peerInfo.addrs
let listenStr = $peerInfo.addrs[0] & "/p2p/" & id
result.cmd = wakuNodeBin & " " & defaults & " "
result.cmd &= "--nodekey:" & hkey & " "
result.cmd &= "--ports-shift:" & $shift & " "
if staticNodes.len > 0:
for staticNode in staticNodes:
result.cmd &= "--staticnode:" & staticNode & " "
result.shift = shift
result.label = label
result.master = master
result.address = listenStr
info "Node command created.", cmd=result.cmd, address = result.address
proc starNetwork(amount: int): seq[NodeInfo] =
let masterNode = initNodeCmd(portOffset, master = true, label = "master node")
result.add(masterNode)
for i in 1..<amount:
result.add(initNodeCmd(portOffset + i, @[masterNode.address], label = "full node"))
proc fullMeshNetwork(amount: int): seq[NodeInfo] =
debug "amount", amount
for i in 0..<amount:
var staticnodes: seq[string]
for item in result:
staticnodes.add(item.address)
result.add(initNodeCmd(portOffset + i, staticnodes, label = "full node"))
proc generatePrometheusConfig(nodes: seq[NodeInfo], outputFile: string) =
var config = """
global:
scrape_interval: 1s
scrape_configs:
- job_name: "wakusim"
static_configs:"""
var count = 0
for node in nodes:
let port = 8008 + node.shift
config &= &"""
- targets: ['127.0.0.1:{port}']
labels:
node: '{count}'"""
count += 1
var (path, file) = splitPath(outputFile)
createDir(path)
writeFile(outputFile, config)
proc proccessGrafanaDashboard(nodes: int, inputFile: string,
outputFile: string) =
# from https://github.com/status-im/nim-beacon-chain/blob/master/tests/simulation/process_dashboard.nim
var
inputData = parseFile(inputFile)
panels = inputData["panels"].copy()
numPanels = len(panels)
gridHeight = 0
outputData = inputData
for panel in panels:
if panel["gridPos"]["x"].getInt() == 0:
gridHeight += panel["gridPos"]["h"].getInt()
outputData["panels"] = %* []
for nodeNum in 0 .. (nodes - 1):
var
nodePanels = panels.copy()
panelIndex = 0
for panel in nodePanels.mitems:
panel["title"] = %* replace(panel["title"].getStr(), "#0", "#" & $nodeNum)
panel["id"] = %* (panelIndex + (nodeNum * numPanels))
panel["gridPos"]["y"] = %* (panel["gridPos"]["y"].getInt() + (nodeNum * gridHeight))
var targets = panel["targets"]
for target in targets.mitems:
target["expr"] = %* replace(target["expr"].getStr(), "{node=\"0\"}", "{node=\"" & $nodeNum & "\"}")
outputData["panels"].add(panel)
panelIndex.inc()
outputData["uid"] = %* (outputData["uid"].getStr() & "a")
outputData["title"] = %* (outputData["title"].getStr() & " (all nodes)")
writeFile(outputFile, pretty(outputData))
when isMainModule:
# TODO: WakuNetworkConf
var nodes: seq[NodeInfo]
let topology = FullMesh
# Scenario xx2 14
let amount = 16
case topology:
of Star:
nodes = starNetwork(amount)
of FullMesh:
nodes = fullMeshNetwork(amount)
var staticnodes: seq[string]
for i in 0..<amount:
# TODO: could also select nodes randomly
staticnodes.add(nodes[i].address)
# Scenario xx1 - 16 full nodes, one app topic, full mesh, gossip
# Scenario xx2 - 14 full nodes, two edge nodes, one app topic, full mesh, gossip
# NOTE: Only connecting to one node here
#var nodesubseta: seq[string]
#var nodesubsetb: seq[string]
#nodesubseta.add(staticnodes[0])
#nodesubsetb.add(staticnodes[amount-1])
## XXX: Let's turn them into normal nodes
#nodes.add(initNodeCmd(0, nodesubseta, label = "edge node (A)"))
#nodes.add(initNodeCmd(1, nodesubsetb, label = "edge node (B)"))
var commandStr = "multitail -s 2 -M 0 -x \"Waku Simulation\""
var count = 0
var sleepDuration = 0
for node in nodes:
if topology in {Star}: #DiscoveryBased
sleepDuration = if node.master: 0
else: 1
commandStr &= &" -cT ansi -t 'node #{count} {node.label}' -l 'sleep {sleepDuration}; {node.cmd}; echo [node execution completed]; while true; do sleep 100; done'"
if topology == FullMesh:
sleepDuration += 1
count += 1
generatePrometheusConfig(nodes, metricsDir / "prometheus" / "prometheus.yml")
proccessGrafanaDashboard(nodes.len,
metricsDir / "waku-grafana-dashboard.json",
metricsDir / "waku-sim-all-nodes-grafana-dashboard.json")
let errorCode = execCmd(commandStr)
if errorCode != 0:
error "launch command failed", command=commandStr

View File

@ -0,0 +1,73 @@
import
std/options,
eth/keys,
eth/p2p/rlpx_protocols/whisper/whisper_types,
../waku_types
export whisper_types, waku_types, keys, options
type
KeyKind* = enum
Symmetric
Asymmetric
None
KeyInfo* = object
case kind*: KeyKind
of Symmetric:
symKey*: SymKey
of Asymmetric:
privKey*: PrivateKey
of None:
discard
# TODO:
# - This is using `DecodedPayload` from Waku v1 / Whisper and could be altered
# by making that a case object also, e.g. useful for the version 0, but
# especially in the future if there would be yet another version.
# - Also reworking that API to use Result instead of Option could make this
# cleaner.
# - For now this `KeyInfo` is a bit silly also, but perhaps with v2 or
# adjustments to Waku v1 encoding, it can be better.
proc decodePayload*(message: WakuMessage, keyInfo: KeyInfo):
WakuResult[DecodedPayload] =
case message.version
of 0:
return ok(DecodedPayload(payload:message.payload))
of 1:
case keyInfo.kind
of Symmetric:
let decoded = message.payload.decode(none[PrivateKey](),
some(keyInfo.symKey))
if decoded.isSome():
return ok(decoded.get())
else:
return err("Couldn't decrypt using symmetric key")
of Asymmetric:
let decoded = message.payload.decode(some(keyInfo.privkey),
none[SymKey]())
if decoded.isSome():
return ok(decoded.get())
else:
return err("Couldn't decrypt using asymmetric key")
of None:
discard
else:
return err("Unsupported WakuMessage version")
# TODO: same story as for `decodedPayload`, but then regarding the `Payload`
# object.
proc encode*(payload: Payload, version: uint32, rng: var BrHmacDrbgContext):
WakuResult[seq[byte]] =
case version
of 0:
# This is rather silly
return ok(payload.payload)
of 1:
let encoded = encode(rng, payload)
if encoded.isSome():
return ok(encoded.get())
else:
return err("Couldn't encode the payload")
else:
return err("Unsupported WakuMessage version")

472
waku/v2/node/wakunode2.nim Normal file
View File

@ -0,0 +1,472 @@
import
std/[options, tables, strutils, sequtils],
chronos, chronicles, stew/shims/net as stewNet,
# TODO: Why do we need eth keys?
eth/keys,
libp2p/multiaddress,
libp2p/crypto/crypto,
libp2p/protocols/protocol,
# NOTE For TopicHandler, solve with exports?
libp2p/protocols/pubsub/pubsub,
libp2p/peerinfo,
libp2p/standard_setup,
../protocol/[waku_relay, waku_filter, message_notifier],
../protocol/waku_store/waku_store,
../protocol/waku_swap/waku_swap,
../waku_types,
./message_store,
./sqlite
export waku_types
logScope:
topics = "wakunode"
# Default clientId
const clientId* = "Nimbus Waku v2 node"
# key and crypto modules different
type
KeyPair* = crypto.KeyPair
PublicKey* = crypto.PublicKey
PrivateKey* = crypto.PrivateKey
# TODO Get rid of this and use waku_types one
Topic* = waku_types.Topic
Message* = seq[byte]
# NOTE based on Eth2Node in NBC eth2_network.nim
WakuNode* = ref object of RootObj
switch*: Switch
wakuRelay*: WakuRelay
wakuStore*: WakuStore
wakuFilter*: WakuFilter
wakuSwap*: WakuSwap
peerInfo*: PeerInfo
libp2pTransportLoops*: seq[Future[void]]
# TODO Revist messages field indexing as well as if this should be Message or WakuMessage
messages*: seq[(Topic, WakuMessage)]
filters*: Filters
subscriptions*: MessageNotificationSubscriptions
rng*: ref BrHmacDrbgContext
# NOTE Any difference here in Waku vs Eth2?
# E.g. Devp2p/Libp2p support, etc.
#func asLibp2pKey*(key: keys.PublicKey): PublicKey =
# PublicKey(scheme: Secp256k1, skkey: secp.SkPublicKey(key))
func asEthKey*(key: PrivateKey): keys.PrivateKey =
keys.PrivateKey(key.skkey)
proc initAddress(T: type MultiAddress, str: string): T =
let address = MultiAddress.init(str).tryGet()
if IPFS.match(address) and matchPartial(multiaddress.TCP, address):
result = address
else:
raise newException(ValueError,
"Invalid bootstrap node multi-address")
proc removeContentFilters(filters: var Filters, contentFilters: seq[ContentFilter]) {.gcsafe.} =
# Flatten all unsubscribe topics into single seq
var unsubscribeTopics: seq[ContentTopic]
for cf in contentFilters:
unsubscribeTopics = unsubscribeTopics.concat(cf.topics)
debug "unsubscribing", unsubscribeTopics=unsubscribeTopics
var rIdToRemove: seq[string] = @[]
for rId, f in filters.mpairs:
# Iterate filter entries to remove matching content topics
for cf in f.contentFilters.mitems:
# Iterate content filters in filter entry
cf.topics.keepIf(proc (t: auto): bool = t notin unsubscribeTopics)
# make sure we delete the content filter
# if no more topics are left
f.contentFilters.keepIf(proc (cf: auto): bool = cf.topics.len > 0)
if f.contentFilters.len == 0:
rIdToRemove.add(rId)
# make sure we delete the filter entry
# if no more content filters left
for rId in rIdToRemove:
filters.del(rId)
debug "filters modified", filters=filters
template tcpEndPoint(address, port): auto =
MultiAddress.init(address, tcpProtocol, port)
## Public API
##
proc init*(T: type WakuNode, nodeKey: crypto.PrivateKey,
bindIp: ValidIpAddress, bindPort: Port,
extIp = none[ValidIpAddress](), extPort = none[Port]()): T =
## Creates a Waku Node.
##
## Status: Implemented.
##
let
rng = crypto.newRng()
hostAddress = tcpEndPoint(bindIp, bindPort)
announcedAddresses = if extIp.isNone() or extPort.isNone(): @[]
else: @[tcpEndPoint(extIp.get(), extPort.get())]
peerInfo = PeerInfo.init(nodekey)
info "Initializing networking", hostAddress,
announcedAddresses
# XXX: Add this when we create node or start it?
peerInfo.addrs.add(hostAddress)
var switch = newStandardSwitch(some(nodekey), hostAddress,
transportFlags = {ServerFlags.ReuseAddr}, rng = rng)
# TODO Untested - verify behavior after switch interface change
# More like this:
# let pubsub = GossipSub.init(
# switch = switch,
# msgIdProvider = msgIdProvider,
# triggerSelf = true, sign = false,
# verifySignature = false).PubSub
result = WakuNode(
switch: switch,
rng: rng,
peerInfo: peerInfo,
subscriptions: newTable[string, MessageNotificationSubscription](),
filters: initTable[string, Filter]()
)
proc start*(node: WakuNode) {.async.} =
## Starts a created Waku Node.
##
## Status: Implemented.
##
node.libp2pTransportLoops = await node.switch.start()
# TODO Get this from WakuNode obj
let peerInfo = node.peerInfo
info "PeerInfo", peerId = peerInfo.peerId, addrs = peerInfo.addrs
let listenStr = $peerInfo.addrs[0] & "/p2p/" & $peerInfo.peerId
## XXX: this should be /ip4..., / stripped?
info "Listening on", full = listenStr
proc stop*(node: WakuNode) {.async.} =
if not node.wakuRelay.isNil:
await node.wakuRelay.stop()
await node.switch.stop()
proc subscribe*(node: WakuNode, topic: Topic, handler: TopicHandler) {.async.} =
## Subscribes to a PubSub topic. Triggers handler when receiving messages on
## this topic. TopicHandler is a method that takes a topic and some data.
##
## NOTE The data field SHOULD be decoded as a WakuMessage.
## Status: Implemented.
info "subscribe", topic=topic
let wakuRelay = node.wakuRelay
await wakuRelay.subscribe(topic, handler)
proc subscribe*(node: WakuNode, request: FilterRequest, handler: ContentFilterHandler) {.async, gcsafe.} =
## Registers for messages that match a specific filter. Triggers the handler whenever a message is received.
## FilterHandler is a method that takes a MessagePush.
##
## Status: Implemented.
# Sanity check for well-formed subscribe FilterRequest
doAssert(request.subscribe, "invalid subscribe request")
info "subscribe content", filter=request
var id = generateRequestId(node.rng)
if node.wakuFilter.isNil == false:
# @TODO: ERROR HANDLING
id = await node.wakuFilter.subscribe(request)
node.filters[id] = Filter(contentFilters: request.contentFilters, handler: handler)
proc unsubscribe*(node: WakuNode, topic: Topic, handler: TopicHandler) {.async.} =
## Unsubscribes a handler from a PubSub topic.
##
## Status: Implemented.
info "unsubscribe", topic=topic
let wakuRelay = node.wakuRelay
await wakuRelay.unsubscribe(@[(topic, handler)])
proc unsubscribeAll*(node: WakuNode, topic: Topic) {.async.} =
## Unsubscribes all handlers registered on a specific PubSub topic.
##
## Status: Implemented.
info "unsubscribeAll", topic=topic
let wakuRelay = node.wakuRelay
await wakuRelay.unsubscribeAll(topic)
proc unsubscribe*(node: WakuNode, request: FilterRequest) {.async, gcsafe.} =
## Unsubscribe from a content filter.
##
## Status: Implemented.
# Sanity check for well-formed unsubscribe FilterRequest
doAssert(request.subscribe == false, "invalid unsubscribe request")
info "unsubscribe content", filter=request
await node.wakuFilter.unsubscribe(request)
node.filters.removeContentFilters(request.contentFilters)
proc publish*(node: WakuNode, topic: Topic, message: WakuMessage) {.async, gcsafe.} =
## Publish a `WakuMessage` to a PubSub topic. `WakuMessage` should contain a
## `contentTopic` field for light node functionality. This field may be also
## be omitted.
##
## Status: Implemented.
##
let wakuRelay = node.wakuRelay
debug "publish", topic=topic, contentTopic=message.contentTopic
let data = message.encode().buffer
discard await wakuRelay.publish(topic, data)
proc query*(node: WakuNode, query: HistoryQuery, handler: QueryHandlerFunc) {.async, gcsafe.} =
## Queries known nodes for historical messages. Triggers the handler whenever a response is received.
## QueryHandlerFunc is a method that takes a HistoryResponse.
##
## Status: Implemented.
# TODO Once waku swap is less experimental, this can simplified
if node.wakuSwap.isNil:
debug "Using default query"
await node.wakuStore.query(query, handler)
else:
debug "Using SWAPAccounting query"
# TODO wakuSwap now part of wakuStore object
await node.wakuStore.queryWithAccounting(query, handler)
# TODO Extend with more relevant info: topics, peers, memory usage, online time, etc
proc info*(node: WakuNode): WakuInfo =
## Returns information about the Node, such as what multiaddress it can be reached at.
##
## Status: Implemented.
##
# TODO Generalize this for other type of multiaddresses
let peerInfo = node.peerInfo
let listenStr = $peerInfo.addrs[0] & "/p2p/" & $peerInfo.peerId
let wakuInfo = WakuInfo(listenStr: listenStr)
return wakuInfo
proc mountFilter*(node: WakuNode) =
info "mounting filter"
proc filterHandler(requestId: string, msg: MessagePush) {.gcsafe.} =
info "push received"
for message in msg.messages:
node.filters.notify(message, requestId)
node.wakuFilter = WakuFilter.init(node.switch, node.rng, filterHandler)
node.switch.mount(node.wakuFilter)
node.subscriptions.subscribe(WakuFilterCodec, node.wakuFilter.subscription())
# NOTE: If using the swap protocol, it must be mounted before store. This is
# because store is using a reference to the swap protocol.
proc mountSwap*(node: WakuNode) =
info "mounting swap"
node.wakuSwap = WakuSwap.init(node.switch, node.rng)
node.switch.mount(node.wakuSwap)
# NYI - Do we need this?
#node.subscriptions.subscribe(WakuSwapCodec, node.wakuSwap.subscription())
proc mountStore*(node: WakuNode, store: MessageStore = nil) =
info "mounting store"
if node.wakuSwap.isNil:
debug "mounting store without swap"
node.wakuStore = WakuStore.init(node.switch, node.rng, store)
else:
debug "mounting store with swap"
node.wakuStore = WakuStore.init(node.switch, node.rng, store, node.wakuSwap)
node.switch.mount(node.wakuStore)
node.subscriptions.subscribe(WakuStoreCodec, node.wakuStore.subscription())
proc mountRelay*(node: WakuNode, topics: seq[string] = newSeq[string]()) {.async, gcsafe.} =
let wakuRelay = WakuRelay.init(
switch = node.switch,
# Use default
#msgIdProvider = msgIdProvider,
triggerSelf = true,
sign = false,
verifySignature = false
)
node.wakuRelay = wakuRelay
node.switch.mount(wakuRelay)
info "mounting relay"
proc relayHandler(topic: string, data: seq[byte]) {.async, gcsafe.} =
let msg = WakuMessage.init(data)
if msg.isOk():
node.filters.notify(msg.value(), "")
await node.subscriptions.notify(topic, msg.value())
await node.wakuRelay.subscribe("/waku/2/default-waku/proto", relayHandler)
for topic in topics:
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
debug "Hit handler", topic=topic, data=data
# XXX: Is using discard here fine? Not sure if we want init to be async?
# Can also move this to the start proc, possibly wiser?
discard node.subscribe(topic, handler)
## Helpers
proc parsePeerInfo(address: string): PeerInfo =
let multiAddr = MultiAddress.initAddress(address)
let parts = address.split("/")
return PeerInfo.init(parts[^1], [multiAddr])
proc dialPeer*(n: WakuNode, address: string) {.async.} =
info "dialPeer", address = address
# XXX: This turns ipfs into p2p, not quite sure why
let remotePeer = parsePeerInfo(address)
info "Dialing peer", ma = remotePeer.addrs[0]
# NOTE This is dialing on WakuRelay protocol specifically
# TODO Keep track of conn and connected state somewhere (WakuRelay?)
#p.conn = await p.switch.dial(remotePeer, WakuRelayCodec)
#p.connected = true
discard await n.switch.dial(remotePeer.peerId, remotePeer.addrs, WakuRelayCodec)
info "Post switch dial"
proc setStorePeer*(n: WakuNode, address: string) =
info "dialPeer", address = address
let remotePeer = parsePeerInfo(address)
n.wakuStore.setPeer(remotePeer)
proc setFilterPeer*(n: WakuNode, address: string) =
info "dialPeer", address = address
let remotePeer = parsePeerInfo(address)
n.wakuFilter.setPeer(remotePeer)
proc connectToNodes*(n: WakuNode, nodes: seq[string]) {.async.} =
for nodeId in nodes:
info "connectToNodes", node = nodeId
# XXX: This seems...brittle
await dialPeer(n, nodeId)
# The issue seems to be around peers not being fully connected when
# trying to subscribe. So what we do is sleep to guarantee nodes are
# fully connected.
#
# This issue was known to Dmitiry on nim-libp2p and may be resolvable
# later.
await sleepAsync(5.seconds)
proc connectToNodes*(n: WakuNode, nodes: seq[PeerInfo]) {.async.} =
for peerInfo in nodes:
info "connectToNodes", peer = peerInfo
discard await n.switch.dial(peerInfo.peerId, peerInfo.addrs, WakuRelayCodec)
# The issue seems to be around peers not being fully connected when
# trying to subscribe. So what we do is sleep to guarantee nodes are
# fully connected.
#
# This issue was known to Dmitiry on nim-libp2p and may be resolvable
# later.
await sleepAsync(5.seconds)
when isMainModule:
import
confutils, json_rpc/rpcserver, metrics,
./config, ./rpc/wakurpc,
../../common/utils/nat
proc startRpc(node: WakuNode, rpcIp: ValidIpAddress, rpcPort: Port) =
let
ta = initTAddress(rpcIp, rpcPort)
rpcServer = newRpcHttpServer([ta])
setupWakuRPC(node, rpcServer)
rpcServer.start()
info "RPC Server started", ta
proc startMetricsServer(serverIp: ValidIpAddress, serverPort: Port) =
info "Starting metrics HTTP server", serverIp, serverPort
metrics.startHttpServer($serverIp, serverPort)
proc startMetricsLog() =
proc logMetrics(udata: pointer) {.closure, gcsafe.} =
{.gcsafe.}:
# TODO: libp2p_pubsub_peers is not public, so we need to make this either
# public in libp2p or do our own peer counting after all.
let
totalMessages = total_messages.value
info "Node metrics", totalMessages
discard setTimer(Moment.fromNow(2.seconds), logMetrics)
discard setTimer(Moment.fromNow(2.seconds), logMetrics)
let
conf = WakuNodeConf.load()
(extIp, extTcpPort, extUdpPort) = setupNat(conf.nat, clientId,
Port(uint16(conf.tcpPort) + conf.portsShift),
Port(uint16(conf.udpPort) + conf.portsShift))
node = WakuNode.init(conf.nodeKey, conf.listenAddress,
Port(uint16(conf.tcpPort) + conf.portsShift), extIp, extTcpPort)
waitFor node.start()
if conf.swap:
mountSwap(node)
# TODO Set swap peer, for now should be same as store peer
if conf.store:
var store: MessageStore
if conf.dbpath != "":
let dbRes = SqliteDatabase.init(conf.dbpath)
if dbRes.isErr:
warn "failed to init database", err = dbRes.error
let res = MessageStore.init(dbRes.value)
if res.isErr:
warn "failed to init MessageStore", err = res.error
else:
store = res.value
mountStore(node, store)
if conf.filter:
mountFilter(node)
if conf.relay:
waitFor mountRelay(node, conf.topics.split(" "))
if conf.staticnodes.len > 0:
waitFor connectToNodes(node, conf.staticnodes)
if conf.storenode != "":
setStorePeer(node, conf.storenode)
if conf.filternode != "":
setFilterPeer(node, conf.filternode)
if conf.rpc:
startRpc(node, conf.rpcAddress, Port(conf.rpcPort + conf.portsShift))
if conf.logMetrics:
startMetricsLog()
when defined(insecure):
if conf.metricsServer:
startMetricsServer(conf.metricsServerAddress,
Port(conf.metricsServerPort + conf.portsShift))
runForever()

View File

@ -0,0 +1,3 @@
# Waku v2 protocol
This folder contains implementations of [Waku v2 protocols](https://specs.vac.dev/specs/waku/v2/waku-v2.html).

Some files were not shown because too many files have changed in this diff Show More