diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml
index cf6711260..b138a2248 100644
--- a/.github/workflows/pre-release.yml
+++ b/.github/workflows/pre-release.yml
@@ -76,14 +76,14 @@ jobs:
tar -cvzf ${{steps.vars.outputs.nwakutools}} ./build/wakucanary ./build/networkmonitor
- name: upload artifacts
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
with:
name: wakunode2
path: ${{steps.vars.outputs.nwaku}}
retention-days: 2
- name: upload artifacts
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
with:
name: wakutools
path: ${{steps.vars.outputs.nwakutools}}
diff --git a/.gitignore b/.gitignore
index 69106b9df..7430c3e99 100644
--- a/.gitignore
+++ b/.gitignore
@@ -76,3 +76,6 @@ coverage_html_report/
.qmake.stash
main-qt
waku_handler.moc.cpp
+
+# Nix build result
+result
diff --git a/.gitmodules b/.gitmodules
index bde56a76e..b7e52550a 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -168,7 +168,7 @@
path = vendor/db_connector
url = https://github.com/nim-lang/db_connector.git
ignore = untracked
- branch = master
+ branch = devel
[submodule "vendor/nph"]
ignore = untracked
branch = master
@@ -179,16 +179,6 @@
url = https://github.com/status-im/nim-minilru.git
ignore = untracked
branch = master
-[submodule "vendor/nim-quic"]
- path = vendor/nim-quic
- url = https://github.com/status-im/nim-quic.git
- ignore = untracked
- branch = master
-[submodule "vendor/nim-ngtcp2"]
- path = vendor/nim-ngtcp2
- url = https://github.com/vacp2p/nim-ngtcp2.git
- ignore = untracked
- branch = master
[submodule "vendor/waku-rlnv2-contract"]
path = vendor/waku-rlnv2-contract
url = https://github.com/waku-org/waku-rlnv2-contract.git
diff --git a/CHANGELOG.md b/CHANGELOG.md
index ec377ef5b..151392f1b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,22 @@
+## v0.35.1 (2025-03-30)
+
+### Bug fixes
+
+* Update RLN references ([3287](https://github.com/waku-org/nwaku/pull/3287)) ([ea961fa](https://github.com/waku-org/nwaku/pull/3287/commits/ea961faf4ed4f8287a2043a6b5d84b660745072b))
+
+**Info:** before upgrading to this version, make sure you delete the previous rln_tree folder, i.e.,
+the one that is passed through this CLI: `--rln-relay-tree-path`.
+
+This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/):
+| Protocol | Spec status | Protocol id |
+| ---: | :---: | :--- |
+| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` |
+| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` |
+| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` |
+| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` |
+| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` |
+| [`WAKU-SYNC`](https://github.com/waku-org/specs/blob/feat--waku-sync/standards/core/sync.md) | `draft` | `/vac/waku/sync/1.0.0` |
+
## v0.35.0 (2025-03-03)
### Notes
diff --git a/Dockerfile b/Dockerfile
index 84e457767..8a1a743c9 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,5 +1,5 @@
# BUILD NIM APP ----------------------------------------------------------------
-FROM rust:1.77.1-alpine3.18 AS nim-build
+FROM rust:1.81.0-alpine3.19 AS nim-build
ARG NIMFLAGS
ARG MAKE_TARGET=wakunode2
diff --git a/Makefile b/Makefile
index 473bb7801..d15668673 100644
--- a/Makefile
+++ b/Makefile
@@ -4,8 +4,8 @@
# - MIT license
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
-BUILD_SYSTEM_DIR := vendor/nimbus-build-system
-EXCLUDED_NIM_PACKAGES := vendor/nim-dnsdisc/vendor
+export BUILD_SYSTEM_DIR := vendor/nimbus-build-system
+export EXCLUDED_NIM_PACKAGES := vendor/nim-dnsdisc/vendor
LINK_PCRE := 0
FORMAT_MSG := "\\x1B[95mFormatting:\\x1B[39m"
# we don't want an error here, so we can handle things later, in the ".DEFAULT" target
@@ -40,8 +40,8 @@ ifeq ($(detected_OS),Windows)
NIM_PARAMS += --passL:"-L$(MINGW_PATH)/lib"
NIM_PARAMS += --passL:"-Lvendor/nim-nat-traversal/vendor/miniupnp/miniupnpc"
NIM_PARAMS += --passL:"-Lvendor/nim-nat-traversal/vendor/libnatpmp-upstream"
-
- LIBS = -static -lws2_32 -lbcrypt -liphlpapi -luserenv -lntdll -lminiupnpc -lnatpmp -lpq
+
+ LIBS = -static -lws2_32 -lbcrypt -liphlpapi -luserenv -lntdll -lminiupnpc -lnatpmp -lpq
NIM_PARAMS += $(foreach lib,$(LIBS),--passL:"$(lib)")
endif
@@ -83,7 +83,7 @@ HEAPTRACKER_INJECT ?= 0
ifeq ($(HEAPTRACKER), 1)
# Needed to make nimbus-build-system use the Nim's 'heaptrack_support' branch
DOCKER_NIM_COMMIT := NIM_COMMIT=heaptrack_support
-TARGET := prod-with-heaptrack
+TARGET := heaptrack-build
ifeq ($(HEAPTRACKER_INJECT), 1)
# the Nim compiler will load 'libheaptrack_inject.so'
@@ -152,6 +152,12 @@ endif
clean: | clean-libbacktrace
+### Create nimble links (used when building with Nix)
+
+nimbus-build-system-nimble-dir:
+ NIMBLE_DIR="$(CURDIR)/$(NIMBLE_DIR)" \
+ PWD_CMD="$(PWD)" \
+ $(CURDIR)/scripts/generate_nimble_links.sh
##################
## RLN ##
@@ -159,7 +165,7 @@ clean: | clean-libbacktrace
.PHONY: librln
LIBRLN_BUILDDIR := $(CURDIR)/vendor/zerokit
-LIBRLN_VERSION := v0.5.1
+LIBRLN_VERSION := v0.7.0
ifeq ($(detected_OS),Windows)
LIBRLN_FILE := rln.lib
@@ -334,6 +340,17 @@ docker-image:
--target $(TARGET) \
--tag $(DOCKER_IMAGE_NAME) .
+docker-quick-image: MAKE_TARGET ?= wakunode2
+docker-quick-image: DOCKER_IMAGE_TAG ?= $(MAKE_TARGET)-$(GIT_VERSION)
+docker-quick-image: DOCKER_IMAGE_NAME ?= wakuorg/nwaku:$(DOCKER_IMAGE_TAG)
+docker-quick-image: NIM_PARAMS := $(NIM_PARAMS) -d:chronicles_colors:none -d:insecure -d:postgres --passL:$(LIBRLN_FILE) --passL:-lm
+docker-quick-image: | build deps librln wakunode2
+ docker build \
+ --build-arg="MAKE_TARGET=$(MAKE_TARGET)" \
+ --tag $(DOCKER_IMAGE_NAME) \
+ --file docker/binaries/Dockerfile.bn.amd64 \
+ .
+
docker-push:
docker push $(DOCKER_IMAGE_NAME)
@@ -361,6 +378,14 @@ docker-liteprotocoltester:
--file apps/liteprotocoltester/Dockerfile.liteprotocoltester.compile \
.
+docker-quick-liteprotocoltester: DOCKER_LPT_TAG ?= latest
+docker-quick-liteprotocoltester: DOCKER_LPT_NAME ?= wakuorg/liteprotocoltester:$(DOCKER_LPT_TAG)
+docker-quick-liteprotocoltester: | liteprotocoltester
+ docker build \
+ --tag $(DOCKER_LPT_NAME) \
+ --file apps/liteprotocoltester/Dockerfile.liteprotocoltester \
+ .
+
docker-liteprotocoltester-push:
docker push $(DOCKER_LPT_NAME)
diff --git a/README.md b/README.md
index 9d8b58110..057d0b622 100644
--- a/README.md
+++ b/README.md
@@ -21,6 +21,13 @@ The standard developer tools, including a C compiler, GNU Make, Bash, and Git. M
> In some distributions (Fedora linux for example), you may need to install `which` utility separately. Nimbus build system is relying on it.
+You'll also need an installation of Rust and its toolchain (specifically `rustc` and `cargo`).
+The easiest way to install these, is using `rustup`:
+
+```bash
+curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
+```
+
### Wakunode
```bash
@@ -126,6 +133,9 @@ Binary will be created as `.bin` under the `build` d
make test/tests/common/test_enr_builder.nim
```
+### Testing against `js-waku`
+Refer to [js-waku repo](https://github.com/waku-org/js-waku/tree/master/packages/tests) for instructions.
+
## Formatting
Nim files are expected to be formatted using the [`nph`](https://github.com/arnetheduck/nph) version present in `vendor/nph`.
diff --git a/apps/chat2/chat2.nim b/apps/chat2/chat2.nim
index 3723291e3..127a761c0 100644
--- a/apps/chat2/chat2.nim
+++ b/apps/chat2/chat2.nim
@@ -381,7 +381,9 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
if conf.relay:
let shards =
conf.shards.mapIt(RelayShard(clusterId: conf.clusterId, shardId: uint16(it)))
- await node.mountRelay(shards)
+ (await node.mountRelay(shards)).isOkOr:
+ echo "failed to mount relay: " & error
+ return
await node.mountLibp2pPing()
@@ -535,7 +537,9 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
node.subscribe(
(kind: PubsubSub, topic: DefaultPubsubTopic), some(WakuRelayHandler(handler))
- )
+ ).isOkOr:
+ error "failed to subscribe to pubsub topic",
+ topic = DefaultPubsubTopic, error = error
if conf.rlnRelay:
info "WakuRLNRelay is enabled"
@@ -553,14 +557,18 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
echo "rln-relay preparation is in progress..."
let rlnConf = WakuRlnConfig(
- rlnRelayDynamic: conf.rlnRelayDynamic,
- rlnRelayCredIndex: conf.rlnRelayCredIndex,
- rlnRelayEthContractAddress: conf.rlnRelayEthContractAddress,
- rlnRelayEthClientAddress: string(conf.rlnRelayethClientAddress),
- rlnRelayCredPath: conf.rlnRelayCredPath,
- rlnRelayCredPassword: conf.rlnRelayCredPassword,
- rlnRelayUserMessageLimit: conf.rlnRelayUserMessageLimit,
- rlnEpochSizeSec: conf.rlnEpochSizeSec,
+ dynamic: conf.rlnRelayDynamic,
+ credIndex: conf.rlnRelayCredIndex,
+ chainId: conf.rlnRelayChainId,
+ ethClientUrls: conf.ethClientUrls.mapIt(string(it)),
+ creds: some(
+ RlnRelayCreds(
+ path: conf.rlnRelayCredPath, password: conf.rlnRelayCredPassword
+ )
+ ),
+ userMessageLimit: conf.rlnRelayUserMessageLimit,
+ epochSizeSec: conf.rlnEpochSizeSec,
+ treePath: conf.rlnRelayTreePath,
)
waitFor node.mountRlnRelay(rlnConf, spamHandler = some(spamHandler))
diff --git a/apps/chat2/config_chat2.nim b/apps/chat2/config_chat2.nim
index 4bdc0d586..8cc525208 100644
--- a/apps/chat2/config_chat2.nim
+++ b/apps/chat2/config_chat2.nim
@@ -18,7 +18,8 @@ type
prod
test
- EthRpcUrl = distinct string
+ EthRpcUrl* = distinct string
+
Chat2Conf* = object ## General node config
logLevel* {.
desc: "Sets the log level.", defaultValue: LogLevel.INFO, name: "log-level"
@@ -213,6 +214,13 @@ type
name: "rln-relay"
.}: bool
+ rlnRelayChainId* {.
+ desc:
+ "Chain ID of the provided contract (optional, will fetch from RPC provider if not used)",
+ defaultValue: 0,
+ name: "rln-relay-chain-id"
+ .}: uint
+
rlnRelayCredPath* {.
desc: "The path for peristing rln-relay credential",
defaultValue: "",
@@ -241,11 +249,12 @@ type
name: "rln-relay-id-commitment-key"
.}: string
- rlnRelayEthClientAddress* {.
- desc: "HTTP address of an Ethereum testnet client e.g., http://localhost:8540/",
- defaultValue: "http://localhost:8540/",
+ ethClientUrls* {.
+ desc:
+ "HTTP address of an Ethereum testnet client e.g., http://localhost:8540/. Argument may be repeated.",
+ defaultValue: newSeq[EthRpcUrl](0),
name: "rln-relay-eth-client-address"
- .}: EthRpcUrl
+ .}: seq[EthRpcUrl]
rlnRelayEthContractAddress* {.
desc: "Address of membership contract on an Ethereum testnet",
@@ -273,6 +282,12 @@ type
name: "rln-relay-epoch-sec"
.}: uint64
+ rlnRelayTreePath* {.
+ desc: "Path to the RLN merkle tree sled db (https://github.com/spacejam/sled)",
+ defaultValue: "",
+ name: "rln-relay-tree-path"
+ .}: string
+
# NOTE: Keys are different in nim-libp2p
proc parseCmdArg*(T: type crypto.PrivateKey, p: string): T =
try:
diff --git a/apps/chat2bridge/chat2bridge.nim b/apps/chat2bridge/chat2bridge.nim
index 96782360b..7a7a5d08f 100644
--- a/apps/chat2bridge/chat2bridge.nim
+++ b/apps/chat2bridge/chat2bridge.nim
@@ -215,7 +215,10 @@ proc start*(cmb: Chat2MatterBridge) {.async.} =
# Always mount relay for bridge
# `triggerSelf` is false on a `bridge` to avoid duplicates
- await cmb.nodev2.mountRelay()
+ (await cmb.nodev2.mountRelay()).isOkOr:
+ error "failed to mount relay", error = error
+ return
+
cmb.nodev2.wakuRelay.triggerSelf = false
# Bridging
@@ -229,7 +232,9 @@ proc start*(cmb: Chat2MatterBridge) {.async.} =
except:
error "exception in relayHandler: " & getCurrentExceptionMsg()
- cmb.nodev2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler))
+ cmb.nodev2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)).isOkOr:
+ error "failed to subscribe to relay", topic = DefaultPubsubTopic, error = error
+ return
proc stop*(cmb: Chat2MatterBridge) {.async: (raises: [Exception]).} =
info "Stopping Chat2MatterBridge"
diff --git a/apps/liteprotocoltester/.env b/apps/liteprotocoltester/.env
index 4f7c49976..0330284e1 100644
--- a/apps/liteprotocoltester/.env
+++ b/apps/liteprotocoltester/.env
@@ -12,16 +12,16 @@ MIN_MESSAGE_SIZE=15Kb
MAX_MESSAGE_SIZE=145Kb
## for wakusim
-#PUBSUB=/waku/2/rs/66/0
+#SHARD=0
#CONTENT_TOPIC=/tester/2/light-pubsub-test/wakusim
#CLUSTER_ID=66
## for status.prod
-PUBSUB=/waku/2/rs/16/32
+#SHARDS=32
CONTENT_TOPIC=/tester/2/light-pubsub-test/fleet
CLUSTER_ID=16
## for TWN
-#PUBSUB=/waku/2/rs/1/4
+#SHARD=4
#CONTENT_TOPIC=/tester/2/light-pubsub-test/twn
#CLUSTER_ID=1
diff --git a/apps/liteprotocoltester/Dockerfile.liteprotocoltester.compile b/apps/liteprotocoltester/Dockerfile.liteprotocoltester.compile
index cee1929ce..6d789ebd1 100644
--- a/apps/liteprotocoltester/Dockerfile.liteprotocoltester.compile
+++ b/apps/liteprotocoltester/Dockerfile.liteprotocoltester.compile
@@ -55,6 +55,8 @@ RUN chmod +x /usr/bin/liteprotocoltester
FROM base_lpt AS standalone_lpt
COPY --from=nim-build /app/apps/liteprotocoltester/run_tester_node.sh /usr/bin/
+COPY --from=nim-build /app/apps/liteprotocoltester/run_tester_node_on_fleet.sh /usr/bin/
+
RUN chmod +x /usr/bin/run_tester_node.sh
ENTRYPOINT ["/usr/bin/run_tester_node.sh", "/usr/bin/liteprotocoltester"]
diff --git a/apps/liteprotocoltester/README.md b/apps/liteprotocoltester/README.md
index eff025969..ea02ec1c1 100644
--- a/apps/liteprotocoltester/README.md
+++ b/apps/liteprotocoltester/README.md
@@ -127,7 +127,7 @@ Run a SENDER role liteprotocoltester and a RECEIVER role one on different termin
| ---: | :--- | :--- |
| NUM_MESSAGES | Number of message to publish, 0 means infinite | 120 |
| MESSAGE_INTERVAL_MILLIS | Frequency of messages in milliseconds | 1000 |
-| PUBSUB | Used pubsub_topic for testing | /waku/2/rs/66/0 |
+| SHARD | Used shard for testing | 0 |
| CONTENT_TOPIC | content_topic for testing | /tester/1/light-pubsub-example/proto |
| CLUSTER_ID | cluster_id of the network | 16 |
| START_PUBLISHING_AFTER_SECS | Delay in seconds before starting to publish to let service node connected | 5 |
@@ -272,7 +272,7 @@ export NUM_MESSAGES=200
export MESSAGE_INTERVAL_MILLIS=1000
export MIN_MESSAGE_SIZE=15Kb
export MAX_MESSAGE_SIZE=145Kb
-export PUBSUB=/waku/2/rs/16/32
+export SHARD=32
export CONTENT_TOPIC=/tester/2/light-pubsub-test/fleet
export CLUSTER_ID=16
@@ -307,7 +307,7 @@ export NUM_MESSAGES=300
export MESSAGE_INTERVAL_MILLIS=7000
export MIN_MESSAGE_SIZE=15Kb
export MAX_MESSAGE_SIZE=145Kb
-export PUBSUB=/waku/2/rs/1/4
+export SHARD=4
export CONTENT_TOPIC=/tester/2/light-pubsub-test/twn
export CLUSTER_ID=1
diff --git a/apps/liteprotocoltester/diagnose_connections.nim b/apps/liteprotocoltester/diagnose_connections.nim
index 788f83c68..a4007d59c 100644
--- a/apps/liteprotocoltester/diagnose_connections.nim
+++ b/apps/liteprotocoltester/diagnose_connections.nim
@@ -42,7 +42,7 @@ proc `$`*(cap: Capabilities): string =
proc allPeers(pm: PeerManager): string =
var allStr: string = ""
- for idx, peer in pm.wakuPeerStore.peers():
+ for idx, peer in pm.switch.peerStore.peers():
allStr.add(
" " & $idx & ". | " & constructMultiaddrStr(peer) & " | agent: " &
peer.getAgent() & " | protos: " & $peer.protocols & " | caps: " &
@@ -51,10 +51,10 @@ proc allPeers(pm: PeerManager): string =
return allStr
proc logSelfPeers*(pm: PeerManager) =
- let selfLighpushPeers = pm.wakuPeerStore.getPeersByProtocol(WakuLightPushCodec)
- let selfRelayPeers = pm.wakuPeerStore.getPeersByProtocol(WakuRelayCodec)
- let selfFilterPeers = pm.wakuPeerStore.getPeersByProtocol(WakuFilterSubscribeCodec)
- let selfPxPeers = pm.wakuPeerStore.getPeersByProtocol(WakuPeerExchangeCodec)
+ let selfLighpushPeers = pm.switch.peerStore.getPeersByProtocol(WakuLightPushCodec)
+ let selfRelayPeers = pm.switch.peerStore.getPeersByProtocol(WakuRelayCodec)
+ let selfFilterPeers = pm.switch.peerStore.getPeersByProtocol(WakuFilterSubscribeCodec)
+ let selfPxPeers = pm.switch.peerStore.getPeersByProtocol(WakuPeerExchangeCodec)
let printable = catch:
"""*------------------------------------------------------------------------------------------*
diff --git a/apps/liteprotocoltester/docker-compose-on-simularor.yml b/apps/liteprotocoltester/docker-compose-on-simularor.yml
index c63a294f2..9e899f78f 100644
--- a/apps/liteprotocoltester/docker-compose-on-simularor.yml
+++ b/apps/liteprotocoltester/docker-compose-on-simularor.yml
@@ -16,7 +16,7 @@ x-rln-environment: &rln_env
x-test-running-conditions: &test_running_conditions
NUM_MESSAGES: ${NUM_MESSAGES:-120}
MESSAGE_INTERVAL_MILLIS: "${MESSAGE_INTERVAL_MILLIS:-1000}"
- PUBSUB: ${PUBSUB:-/waku/2/rs/66/0}
+ SHARD: ${SHARD:-0}
CONTENT_TOPIC: ${CONTENT_TOPIC:-/tester/2/light-pubsub-test/wakusim}
CLUSTER_ID: ${CLUSTER_ID:-66}
MIN_MESSAGE_SIZE: ${MIN_MESSAGE_SIZE:-1Kb}
diff --git a/apps/liteprotocoltester/docker-compose.yml b/apps/liteprotocoltester/docker-compose.yml
index afd2f1e72..0effbf8f0 100644
--- a/apps/liteprotocoltester/docker-compose.yml
+++ b/apps/liteprotocoltester/docker-compose.yml
@@ -16,7 +16,7 @@ x-rln-environment: &rln_env
x-test-running-conditions: &test_running_conditions
NUM_MESSAGES: ${NUM_MESSAGES:-120}
MESSAGE_INTERVAL_MILLIS: "${MESSAGE_INTERVAL_MILLIS:-1000}"
- PUBSUB: ${PUBSUB:-/waku/2/rs/66/0}
+ SHARD: ${SHARD:-0}
CONTENT_TOPIC: ${CONTENT_TOPIC:-/tester/2/light-pubsub-test/wakusim}
CLUSTER_ID: ${CLUSTER_ID:-66}
MIN_MESSAGE_SIZE: ${MIN_MESSAGE_SIZE:-1Kb}
diff --git a/apps/liteprotocoltester/filter_subscriber.nim b/apps/liteprotocoltester/filter_subscriber.nim
index 143e0ca80..fbb11c92e 100644
--- a/apps/liteprotocoltester/filter_subscriber.nim
+++ b/apps/liteprotocoltester/filter_subscriber.nim
@@ -130,7 +130,9 @@ proc setupAndSubscribe*(
var stats: PerPeerStatistics
actualFilterPeer = servicePeer
- let pushHandler = proc(pubsubTopic: PubsubTopic, message: WakuMessage) {.async.} =
+ let pushHandler = proc(
+ pubsubTopic: PubsubTopic, message: WakuMessage
+ ): Future[void] {.async, closure.} =
let payloadStr = string.fromBytes(message.payload)
let testerMessage = js.Json.decode(payloadStr, ProtocolTesterMessage)
let msgHash = computeMessageHash(pubsubTopic, message).to0xHex
@@ -163,7 +165,7 @@ proc setupAndSubscribe*(
if conf.numMessages > 0 and
waitFor stats.checkIfAllMessagesReceived(maxWaitForLastMessage):
- waitFor unsubscribe(wakuNode, conf.pubsubTopics[0], conf.contentTopics[0])
+ waitFor unsubscribe(wakuNode, conf.getPubsubTopic(), conf.contentTopics[0])
info "All messages received. Exiting."
## for gracefull shutdown through signal hooks
@@ -176,5 +178,5 @@ proc setupAndSubscribe*(
# Start maintaining subscription
asyncSpawn maintainSubscription(
- wakuNode, conf.pubsubTopics[0], conf.contentTopics[0], conf.fixedServicePeer
+ wakuNode, conf.getPubsubTopic(), conf.contentTopics[0], conf.fixedServicePeer
)
diff --git a/apps/liteprotocoltester/infra.env b/apps/liteprotocoltester/infra.env
index 6d4542eca..ebf614732 100644
--- a/apps/liteprotocoltester/infra.env
+++ b/apps/liteprotocoltester/infra.env
@@ -4,7 +4,7 @@ NUM_MESSAGES=300
MESSAGE_INTERVAL_MILLIS=1000
MIN_MESSAGE_SIZE=15Kb
MAX_MESSAGE_SIZE=145Kb
-PUBSUB=/waku/2/rs/16/32
+SHARD=32
CONTENT_TOPIC=/tester/2/light-pubsub-test-at-infra/status-prod
CLUSTER_ID=16
LIGHTPUSH_BOOTSTRAP=enr:-QEKuED9AJm2HGgrRpVaJY2nj68ao_QiPeUT43sK-aRM7sMJ6R4G11OSDOwnvVacgN1sTw-K7soC5dzHDFZgZkHU0u-XAYJpZIJ2NIJpcISnYxMvim11bHRpYWRkcnO4WgAqNiVib290LTAxLmRvLWFtczMuc3RhdHVzLnByb2Quc3RhdHVzLmltBnZfACw2JWJvb3QtMDEuZG8tYW1zMy5zdGF0dXMucHJvZC5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaEC3rRtFQSgc24uWewzXaxTY8hDAHB8sgnxr9k8Rjb5GeSDdGNwgnZfg3VkcIIjKIV3YWt1Mg0
diff --git a/apps/liteprotocoltester/lightpush_publisher.nim b/apps/liteprotocoltester/lightpush_publisher.nim
index 32f802fe4..d79e68590 100644
--- a/apps/liteprotocoltester/lightpush_publisher.nim
+++ b/apps/liteprotocoltester/lightpush_publisher.nim
@@ -145,13 +145,20 @@ proc publishMessages(
lightpushContentTopic,
renderMsgSize,
)
+
+ let publishStartTime = Moment.now()
+
let wlpRes = await wakuNode.legacyLightpushPublish(
some(lightpushPubsubTopic), message, actualServicePeer
)
+ let publishDuration = Moment.now() - publishStartTime
+
let msgHash = computeMessageHash(lightpushPubsubTopic, message).to0xHex
if wlpRes.isOk():
+ lpt_publish_duration_seconds.observe(publishDuration.milliseconds.float / 1000)
+
sentMessages[messagesSent] = (hash: msgHash, relayed: true)
notice "published message using lightpush",
index = messagesSent + 1,
@@ -251,7 +258,7 @@ proc setupAndPublish*(
asyncSpawn publishMessages(
wakuNode,
servicePeer,
- conf.pubsubTopics[0],
+ conf.getPubsubTopic(),
conf.contentTopics[0],
conf.numMessages,
(min: parsedMinMsgSize, max: parsedMaxMsgSize),
diff --git a/apps/liteprotocoltester/liteprotocoltester.nim b/apps/liteprotocoltester/liteprotocoltester.nim
index c23b80e72..58f6bd2e3 100644
--- a/apps/liteprotocoltester/liteprotocoltester.nim
+++ b/apps/liteprotocoltester/liteprotocoltester.nim
@@ -99,7 +99,7 @@ when isMainModule:
wakuConf.dnsAddrs = true
wakuConf.dnsAddrsNameServers = @[parseIpAddress("8.8.8.8"), parseIpAddress("1.1.1.1")]
- wakuConf.pubsubTopics = conf.pubsubTopics
+ wakuConf.shards = @[conf.shard]
wakuConf.contentTopics = conf.contentTopics
wakuConf.clusterId = conf.clusterId
## TODO: Depending on the tester needs we might extend here with shards, clusterId, etc...
@@ -118,6 +118,7 @@ when isMainModule:
wakuConf.store = false
wakuConf.rest = false
+ wakuConf.relayServiceRatio = "40:60"
# NOTE: {.threadvar.} is used to make the global variable GC safe for the closure uses it
# It will always be called from main thread anyway.
@@ -126,7 +127,7 @@ when isMainModule:
nodeHealthMonitor = WakuNodeHealthMonitor()
nodeHealthMonitor.setOverallHealth(HealthStatus.INITIALIZING)
- let restServer = rest_server_builder.startRestServerEsentials(
+ let restServer = rest_server_builder.startRestServerEssentials(
nodeHealthMonitor, wakuConf
).valueOr:
error "Starting esential REST server failed.", error = $error
@@ -202,10 +203,8 @@ when isMainModule:
var codec = WakuLightPushCodec
# mounting relevant client, for PX filter client must be mounted ahead
if conf.testFunc == TesterFunctionality.SENDER:
- wakuApp.node.mountLegacyLightPushClient()
codec = WakuLightPushCodec
else:
- waitFor wakuApp.node.mountFilterClient()
codec = WakuFilterSubscribeCodec
var lookForServiceNode = false
diff --git a/apps/liteprotocoltester/lpt_metrics.nim b/apps/liteprotocoltester/lpt_metrics.nim
index e68164d13..8b30619de 100644
--- a/apps/liteprotocoltester/lpt_metrics.nim
+++ b/apps/liteprotocoltester/lpt_metrics.nim
@@ -47,3 +47,10 @@ declarePublicGauge lpt_px_peers,
declarePublicGauge lpt_dialed_peers, "Number of peers successfully dialed", ["agent"]
declarePublicGauge lpt_dial_failures, "Number of dial failures by cause", ["agent"]
+
+declarePublicHistogram lpt_publish_duration_seconds,
+ "duration to lightpush messages",
+ buckets = [
+ 0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1.0, 2.5, 5.0, 7.5, 10.0,
+ 15.0, 20.0, 30.0, Inf,
+ ]
diff --git a/apps/liteprotocoltester/run_service_node.sh b/apps/liteprotocoltester/run_service_node.sh
index 1d36292c1..07fdbe980 100755
--- a/apps/liteprotocoltester/run_service_node.sh
+++ b/apps/liteprotocoltester/run_service_node.sh
@@ -5,10 +5,10 @@ IP=$(ip a | grep "inet " | grep -Fv 127.0.0.1 | sed 's/.*inet \([^/]*\).*/\1/')
echo "Service node IP: ${IP}"
-if [ -n "${PUBSUB}" ]; then
- PUBSUB=--pubsub-topic="${PUBSUB}"
+if [ -n "${SHARD}" ]; then
+ SHARD=--shard="${SHARD}"
else
- PUBSUB=--pubsub-topic="/waku/2/rs/66/0"
+ SHARD=--shard="0"
fi
if [ -n "${CLUSTER_ID}" ]; then
@@ -59,5 +59,5 @@ exec /usr/bin/wakunode\
--metrics-server-port=8003\
--metrics-server-address=0.0.0.0\
--nat=extip:${IP}\
- ${PUBSUB}\
+ ${SHARD}\
${CLUSTER_ID}
diff --git a/apps/liteprotocoltester/run_tester_node.sh b/apps/liteprotocoltester/run_tester_node.sh
index 8975fba91..4a80ca460 100755
--- a/apps/liteprotocoltester/run_tester_node.sh
+++ b/apps/liteprotocoltester/run_tester_node.sh
@@ -93,10 +93,10 @@ else
FULL_NODE=--bootstrap-node="${SERIVCE_NODE_ADDR}"
fi
-if [ -n "${PUBSUB}" ]; then
- PUBSUB=--pubsub-topic="${PUBSUB}"
+if [ -n "${SHARD}" ]; then
+ SHARD=--shard="${SHARD}"
else
- PUBSUB=--pubsub-topic="/waku/2/rs/66/0"
+ SHARD=--shard="0"
fi
if [ -n "${CONTENT_TOPIC}" ]; then
@@ -128,19 +128,25 @@ if [ -n "${MESSAGE_INTERVAL_MILLIS}" ]; then
MESSAGE_INTERVAL_MILLIS=--message-interval="${MESSAGE_INTERVAL_MILLIS}"
fi
+if [ -n "${LOG_LEVEL}" ]; then
+ LOG_LEVEL=--log-level=${LOG_LEVEL}
+else
+ LOG_LEVEL=--log-level=INFO
+fi
+
echo "Running binary: ${BINARY_PATH}"
echo "Tester node: ${FUNCTION}"
echo "Using service node: ${SERIVCE_NODE_ADDR}"
echo "My external IP: ${MY_EXT_IP}"
exec "${BINARY_PATH}"\
- --log-level=INFO\
--nat=extip:${MY_EXT_IP}\
--test-peers\
+ ${LOG_LEVEL}\
${FULL_NODE}\
${MESSAGE_INTERVAL_MILLIS}\
${NUM_MESSAGES}\
- ${PUBSUB}\
+ ${SHARD}\
${CONTENT_TOPIC}\
${CLUSTER_ID}\
${FUNCTION}\
diff --git a/apps/liteprotocoltester/run_tester_node_at_infra.sh b/apps/liteprotocoltester/run_tester_node_at_infra.sh
index 6cec4b006..e926875aa 100644
--- a/apps/liteprotocoltester/run_tester_node_at_infra.sh
+++ b/apps/liteprotocoltester/run_tester_node_at_infra.sh
@@ -48,10 +48,10 @@ fi
MY_EXT_IP=$(wget -qO- --no-check-certificate https://api4.ipify.org)
-if [ -n "${PUBSUB}" ]; then
- PUBSUB=--pubsub-topic="${PUBSUB}"
+if [ -n "${SHARD}" ]; then
+ SHARD=--shard="${SHARD}"
else
- PUBSUB=--pubsub-topic="/waku/2/rs/66/0"
+ SHARD=--shard="0"
fi
if [ -n "${CONTENT_TOPIC}" ]; then
@@ -83,19 +83,25 @@ if [ -n "${MESSAGE_INTERVAL_MILLIS}" ]; then
MESSAGE_INTERVAL_MILLIS=--message-interval="${MESSAGE_INTERVAL_MILLIS}"
fi
+if [ -n "${LOG_LEVEL}" ]; then
+ LOG_LEVEL=--log-level=${LOG_LEVEL}
+else
+ LOG_LEVEL=--log-level=INFO
+fi
+
echo "Running binary: ${BINARY_PATH}"
echo "Node function is: ${FUNCTION}"
echo "Using service/bootstrap node as: ${NODE_ARG}"
echo "My external IP: ${MY_EXT_IP}"
exec "${BINARY_PATH}"\
- --log-level=INFO\
--nat=extip:${MY_EXT_IP}\
--test-peers\
+ ${LOG_LEVEL}\
${NODE_ARG}\
${MESSAGE_INTERVAL_MILLIS}\
${NUM_MESSAGES}\
- ${PUBSUB}\
+ ${SHARD}\
${CONTENT_TOPIC}\
${CLUSTER_ID}\
${FUNCTION}\
diff --git a/apps/liteprotocoltester/run_tester_node_on_fleet.sh b/apps/liteprotocoltester/run_tester_node_on_fleet.sh
index f0300cef2..538a890e6 100644
--- a/apps/liteprotocoltester/run_tester_node_on_fleet.sh
+++ b/apps/liteprotocoltester/run_tester_node_on_fleet.sh
@@ -48,10 +48,10 @@ fi
MY_EXT_IP=$(wget -qO- --no-check-certificate https://api4.ipify.org)
-if [ -n "${PUBSUB}" ]; then
- PUBSUB=--pubsub-topic="${PUBSUB}"
+if [ -n "${SHARD}" ]; then
+ SHARD=--shard=${SHARD}
else
- PUBSUB=--pubsub-topic="/waku/2/rs/66/0"
+ SHARD=--shard=0
fi
if [ -n "${CONTENT_TOPIC}" ]; then
@@ -79,8 +79,14 @@ if [ -n "${NUM_MESSAGES}" ]; then
NUM_MESSAGES=--num-messages="${NUM_MESSAGES}"
fi
-if [ -n "${DELAY_MESSAGES}" ]; then
- DELAY_MESSAGES=--delay-messages="${DELAY_MESSAGES}"
+if [ -n "${MESSAGE_INTERVAL_MILLIS}" ]; then
+ MESSAGE_INTERVAL_MILLIS=--message-interval="${MESSAGE_INTERVAL_MILLIS}"
+fi
+
+if [ -n "${LOG_LEVEL}" ]; then
+ LOG_LEVEL=--log-level=${LOG_LEVEL}
+else
+ LOG_LEVEL=--log-level=INFO
fi
echo "Running binary: ${BINARY_PATH}"
@@ -89,12 +95,12 @@ echo "Using service/bootstrap node as: ${NODE_ARG}"
echo "My external IP: ${MY_EXT_IP}"
exec "${BINARY_PATH}"\
- --log-level=INFO\
--nat=extip:${MY_EXT_IP}\
+ ${LOG_LEVEL}\
${NODE_ARG}\
- ${DELAY_MESSAGES}\
+ ${MESSAGE_INTERVAL_MILLIS}\
${NUM_MESSAGES}\
- ${PUBSUB}\
+ ${SHARD}\
${CONTENT_TOPIC}\
${CLUSTER_ID}\
${FUNCTION}\
diff --git a/apps/liteprotocoltester/service_peer_management.nim b/apps/liteprotocoltester/service_peer_management.nim
index 8fd6de973..a303c3c58 100644
--- a/apps/liteprotocoltester/service_peer_management.nim
+++ b/apps/liteprotocoltester/service_peer_management.nim
@@ -61,7 +61,7 @@ proc selectRandomCapablePeer*(
elif codec.contains("filter"):
cap = Capabilities.Filter
- var supportivePeers = pm.wakuPeerStore.getPeersByCapability(cap)
+ var supportivePeers = pm.switch.peerStore.getPeersByCapability(cap)
trace "Found supportive peers count", count = supportivePeers.len()
trace "Found supportive peers", supportivePeers = $supportivePeers
@@ -102,7 +102,7 @@ proc tryCallAllPxPeers*(
elif codec.contains("filter"):
capability = Capabilities.Filter
- var supportivePeers = pm.wakuPeerStore.getPeersByCapability(capability)
+ var supportivePeers = pm.switch.peerStore.getPeersByCapability(capability)
lpt_px_peers.set(supportivePeers.len)
debug "Found supportive peers count", count = supportivePeers.len()
@@ -189,14 +189,14 @@ proc pxLookupServiceNode*(
if conf.testPeers:
let peersOpt =
- await tryCallAllPxPeers(node.peerManager, codec, conf.pubsubTopics[0])
+ await tryCallAllPxPeers(node.peerManager, codec, conf.getPubsubTopic())
if peersOpt.isSome():
info "Found service peers for codec",
codec = codec, peer_count = peersOpt.get().len()
return ok(peersOpt.get().len > 0)
else:
let peerOpt =
- await selectRandomCapablePeer(node.peerManager, codec, conf.pubsubTopics[0])
+ await selectRandomCapablePeer(node.peerManager, codec, conf.getPubsubTopic())
if peerOpt.isSome():
info "Found service peer for codec", codec = codec, peer = peerOpt.get()
return ok(true)
@@ -215,7 +215,7 @@ proc selectRandomServicePeer*(
if actualPeer.isSome():
alreadyUsedServicePeers.add(actualPeer.get())
- let supportivePeers = pm.wakuPeerStore.getPeersByProtocol(codec).filterIt(
+ let supportivePeers = pm.switch.peerStore.getPeersByProtocol(codec).filterIt(
it notin alreadyUsedServicePeers
)
if supportivePeers.len == 0:
diff --git a/apps/liteprotocoltester/tester_config.nim b/apps/liteprotocoltester/tester_config.nim
index 115686be3..eccaafc06 100644
--- a/apps/liteprotocoltester/tester_config.nim
+++ b/apps/liteprotocoltester/tester_config.nim
@@ -18,6 +18,7 @@ import
common/logging,
factory/external_config,
waku_core,
+ waku_core/topics/pubsub_topic,
]
export confTomlDefs, confTomlNet, confEnvvarDefs, confEnvvarNet
@@ -95,18 +96,9 @@ type LiteProtocolTesterConf* = object
name: "message-interval"
.}: uint32
- pubsubTopics* {.
- desc: "Default pubsub topic to subscribe to. Argument may be repeated.",
- defaultValue: @[LitePubsubTopic],
- name: "pubsub-topic"
- .}: seq[PubsubTopic]
+ shard* {.desc: "Shards index to subscribe to. ", defaultValue: 0, name: "shard".}:
+ uint16
- ## TODO: extend lite protocol tester configuration based on testing needs
- # shards* {.
- # desc: "Shards index to subscribe to [0..NUM_SHARDS_IN_NETWORK-1]. Argument may be repeated.",
- # defaultValue: @[],
- # name: "shard"
- # .}: seq[uint16]
contentTopics* {.
desc: "Default content topic to subscribe to. Argument may be repeated.",
defaultValue: @[LiteContentTopic],
@@ -195,4 +187,7 @@ proc load*(T: type LiteProtocolTesterConf, version = ""): ConfResult[T] =
except CatchableError:
err(getCurrentExceptionMsg())
+proc getPubsubTopic*(conf: LiteProtocolTesterConf): PubsubTopic =
+ return $RelayShard(clusterId: conf.clusterId, shardId: conf.shard)
+
{.pop.}
diff --git a/apps/networkmonitor/networkmonitor.nim b/apps/networkmonitor/networkmonitor.nim
index 2861c85ae..7b71a630e 100644
--- a/apps/networkmonitor/networkmonitor.nim
+++ b/apps/networkmonitor/networkmonitor.nim
@@ -462,7 +462,7 @@ proc initAndStartApp(
nodeBuilder.withNodeKey(key)
nodeBuilder.withRecord(record)
- nodeBUilder.withSwitchConfiguration(maxConnections = some(MaxConnectedPeers))
+ nodeBuilder.withSwitchConfiguration(maxConnections = some(MaxConnectedPeers))
nodeBuilder.withPeerManagerConfig(
maxConnections = MaxConnectedPeers,
@@ -554,7 +554,9 @@ proc subscribeAndHandleMessages(
else:
msgPerContentTopic[msg.contentTopic] = 1
- node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(WakuRelayHandler(handler)))
+ node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(WakuRelayHandler(handler))).isOkOr:
+ error "failed to subscribe to pubsub topic", pubsubTopic, error
+ quit(1)
when isMainModule:
# known issue: confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
@@ -619,7 +621,10 @@ when isMainModule:
let (node, discv5) = nodeRes.get()
- waitFor node.mountRelay()
+ (waitFor node.mountRelay()).isOkOr:
+ error "failed to mount waku relay protocol: ", err = error
+ quit 1
+
waitFor node.mountLibp2pPing()
var onFatalErrorAction = proc(msg: string) {.gcsafe, closure.} =
@@ -630,14 +635,13 @@ when isMainModule:
if conf.rlnRelay and conf.rlnRelayEthContractAddress != "":
let rlnConf = WakuRlnConfig(
- rlnRelayDynamic: conf.rlnRelayDynamic,
- rlnRelayCredIndex: some(uint(0)),
- rlnRelayEthContractAddress: conf.rlnRelayEthContractAddress,
- rlnRelayEthClientAddress: string(conf.rlnRelayethClientAddress),
- rlnRelayCredPath: "",
- rlnRelayCredPassword: "",
- rlnRelayTreePath: conf.rlnRelayTreePath,
- rlnEpochSizeSec: conf.rlnEpochSizeSec,
+ dynamic: conf.rlnRelayDynamic,
+ credIndex: some(uint(0)),
+ ethContractAddress: conf.rlnRelayEthContractAddress,
+ ethClientUrls: conf.ethClientUrls.mapIt(string(it)),
+ treePath: conf.rlnRelayTreePath,
+ epochSizeSec: conf.rlnEpochSizeSec,
+ creds: none(RlnRelayCreds),
onFatalErrorAction: onFatalErrorAction,
)
diff --git a/apps/networkmonitor/networkmonitor_config.nim b/apps/networkmonitor/networkmonitor_config.nim
index bf1662649..04245f9dd 100644
--- a/apps/networkmonitor/networkmonitor_config.nim
+++ b/apps/networkmonitor/networkmonitor_config.nim
@@ -8,7 +8,7 @@ import
stew/shims/net,
regex
-type EthRpcUrl = distinct string
+type EthRpcUrl* = distinct string
type NetworkMonitorConf* = object
logLevel* {.
@@ -82,11 +82,12 @@ type NetworkMonitorConf* = object
name: "rln-relay-tree-path"
.}: string
- rlnRelayEthClientAddress* {.
- desc: "HTTP address of an Ethereum testnet client e.g., http://localhost:8540/",
- defaultValue: "http://localhost:8540/",
+ ethClientUrls* {.
+ desc:
+ "HTTP address of an Ethereum testnet client e.g., http://localhost:8540/. Argument may be repeated.",
+ defaultValue: newSeq[EthRpcUrl](0),
name: "rln-relay-eth-client-address"
- .}: EthRpcUrl
+ .}: seq[EthRpcUrl]
rlnRelayEthContractAddress* {.
desc: "Address of membership contract on an Ethereum testnet",
diff --git a/apps/sonda/docker-compose.yml b/apps/sonda/docker-compose.yml
index 2141bbfc8..c6235ef32 100644
--- a/apps/sonda/docker-compose.yml
+++ b/apps/sonda/docker-compose.yml
@@ -9,7 +9,7 @@ x-logging: &logging
x-rln-relay-eth-client-address: &rln_relay_eth_client_address ${RLN_RELAY_ETH_CLIENT_ADDRESS:-} # Add your RLN_RELAY_ETH_CLIENT_ADDRESS after the "-"
x-rln-environment: &rln_env
- RLN_RELAY_CONTRACT_ADDRESS: ${RLN_RELAY_CONTRACT_ADDRESS:-0xCB33Aa5B38d79E3D9Fa8B10afF38AA201399a7e3}
+ RLN_RELAY_CONTRACT_ADDRESS: ${RLN_RELAY_CONTRACT_ADDRESS:-0xfe7a9eabcE779a090FD702346Fd0bFAc02ce6Ac8}
RLN_RELAY_CRED_PATH: ${RLN_RELAY_CRED_PATH:-} # Optional: Add your RLN_RELAY_CRED_PATH after the "-"
RLN_RELAY_CRED_PASSWORD: ${RLN_RELAY_CRED_PASSWORD:-} # Optional: Add your RLN_RELAY_CRED_PASSWORD after the "-"
diff --git a/apps/sonda/register_rln.sh b/apps/sonda/register_rln.sh
index ab660f1d8..aca1007a8 100755
--- a/apps/sonda/register_rln.sh
+++ b/apps/sonda/register_rln.sh
@@ -24,7 +24,7 @@ fi
docker run -v $(pwd)/keystore:/keystore/:Z harbor.status.im/wakuorg/nwaku:v0.30.1 generateRlnKeystore \
--rln-relay-eth-client-address=${RLN_RELAY_ETH_CLIENT_ADDRESS} \
--rln-relay-eth-private-key=${ETH_TESTNET_KEY} \
---rln-relay-eth-contract-address=0xCB33Aa5B38d79E3D9Fa8B10afF38AA201399a7e3 \
+--rln-relay-eth-contract-address=0xfe7a9eabcE779a090FD702346Fd0bFAc02ce6Ac8 \
--rln-relay-cred-path=/keystore/keystore.json \
--rln-relay-cred-password="${RLN_RELAY_CRED_PASSWORD}" \
--rln-relay-user-message-limit=20 \
diff --git a/apps/wakucanary/wakucanary.nim b/apps/wakucanary/wakucanary.nim
index 914d76e70..3565c619f 100644
--- a/apps/wakucanary/wakucanary.nim
+++ b/apps/wakucanary/wakucanary.nim
@@ -1,5 +1,5 @@
import
- std/[strutils, sequtils, tables],
+ std/[strutils, sequtils, tables, strformat],
confutils,
chronos,
stew/shims/net,
@@ -21,6 +21,14 @@ const ProtocolsTable = {
"relay": "/vac/waku/relay/",
"lightpush": "/vac/waku/lightpush/",
"filter": "/vac/waku/filter-subscribe/2",
+ "filter-push": "/vac/waku/filter-push/",
+ "ipfs-id": "/ipfs/id/",
+ "autonat": "/libp2p/autonat/",
+ "circuit-relay": "/libp2p/circuit/relay/",
+ "metadata": "/vac/waku/metadata/",
+ "rendezvous": "/rendezvous/",
+ "ipfs-ping": "/ipfs/ping/",
+ "peer-exchange": "/vac/waku/peer-exchange/",
}.toTable
const WebSocketPortOffset = 1000
@@ -105,21 +113,30 @@ proc parseCmdArg*(T: type chronos.Duration, p: string): T =
proc completeCmdArg*(T: type chronos.Duration, val: string): seq[string] =
return @[]
-# checks if rawProtocols (skipping version) are supported in nodeProtocols
proc areProtocolsSupported(
- rawProtocols: seq[string], nodeProtocols: seq[string]
+ toValidateProtocols: seq[string], nodeProtocols: seq[string]
): bool =
+ ## Checks if all toValidateProtocols are contained in nodeProtocols.
+ ## nodeProtocols contains the full list of protocols currently informed by the node under analysis.
+ ## toValidateProtocols contains the protocols, without version number, that we want to check if they are supported by the node.
var numOfSupportedProt: int = 0
- for nodeProtocol in nodeProtocols:
- for rawProtocol in rawProtocols:
- let protocolTag = ProtocolsTable[rawProtocol]
+ for rawProtocol in toValidateProtocols:
+ let protocolTag = ProtocolsTable[rawProtocol]
+ debug "Checking if protocol is supported", expected_protocol_tag = protocolTag
+
+ var protocolSupported = false
+ for nodeProtocol in nodeProtocols:
if nodeProtocol.startsWith(protocolTag):
- info "Supported protocol ok", expected = protocolTag, supported = nodeProtocol
+ info "The node supports the protocol", supported_protocol = nodeProtocol
numOfSupportedProt += 1
+ protocolSupported = true
break
- if numOfSupportedProt == rawProtocols.len:
+ if not protocolSupported:
+ error "The node does not support the protocol", expected_protocol = protocolTag
+
+ if numOfSupportedProt == toValidateProtocols.len:
return true
return false
@@ -167,7 +184,7 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
let peerRes = parsePeerInfo(conf.address)
if peerRes.isErr():
error "Couldn't parse 'conf.address'", error = peerRes.error
- return 1
+ quit(QuitFailure)
let peer = peerRes.value
@@ -195,13 +212,19 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
let netConfig = NetConfig.init(
bindIp = bindIp,
bindPort = nodeTcpPort,
- wsBindPort = wsBindPort,
+ wsBindPort = some(wsBindPort),
wsEnabled = isWs,
wssEnabled = isWss,
)
var enrBuilder = EnrBuilder.init(nodeKey)
+ enrBuilder.withWakuRelaySharding(
+ RelayShards(clusterId: conf.clusterId, shardIds: conf.shards)
+ ).isOkOr:
+ error "could not initialize ENR with shards", error
+ quit(QuitFailure)
+
let recordRes = enrBuilder.build()
let record =
if recordRes.isErr():
@@ -217,7 +240,7 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
createDir(CertsDirectory)
if generateSelfSignedCertificate(certPath, keyPath) != 0:
error "Error generating key and certificate"
- return 1
+ quit(QuitFailure)
builder.withRecord(record)
builder.withNetworkConfiguration(netConfig.tryGet())
@@ -232,7 +255,11 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
await mountLibp2pPing(node)
except CatchableError:
error "failed to mount libp2p ping protocol: " & getCurrentExceptionMsg()
- return 1
+ quit(QuitFailure)
+
+ node.mountMetadata(conf.clusterId).isOkOr:
+ error "failed to mount metadata protocol", error
+ quit(QuitFailure)
await node.start()
@@ -243,23 +270,24 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
let timedOut = not await node.connectToNodes(@[peer]).withTimeout(conf.timeout)
if timedOut:
error "Timedout after", timeout = conf.timeout
- return 1
+ quit(QuitFailure)
let lp2pPeerStore = node.switch.peerStore
- let conStatus = node.peerManager.wakuPeerStore[ConnectionBook][peer.peerId]
+ let conStatus = node.peerManager.switch.peerStore[ConnectionBook][peer.peerId]
if conf.ping:
discard await pingFut
if conStatus in [Connected, CanConnect]:
let nodeProtocols = lp2pPeerStore[ProtoBook][peer.peerId]
+
if not areProtocolsSupported(conf.protocols, nodeProtocols):
error "Not all protocols are supported",
expected = conf.protocols, supported = nodeProtocols
- return 1
+ quit(QuitFailure)
elif conStatus == CannotConnect:
error "Could not connect", peerId = peer.peerId
- return 1
+ quit(QuitFailure)
return 0
when isMainModule:
diff --git a/apps/wakunode2/wakunode2.nim b/apps/wakunode2/wakunode2.nim
index b6e94c747..a99cfcb52 100644
--- a/apps/wakunode2/wakunode2.nim
+++ b/apps/wakunode2/wakunode2.nim
@@ -38,17 +38,19 @@ when isMainModule:
const versionString = "version / git commit hash: " & waku.git_version
- var conf = WakuNodeConf.load(version = versionString).valueOr:
+ var wakuNodeConf = WakuNodeConf.load(version = versionString).valueOr:
error "failure while loading the configuration", error = error
quit(QuitFailure)
- ## Also called within Waku.new. The call to startRestServerEsentials needs the following line
- logging.setupLog(conf.logLevel, conf.logFormat)
+ ## Also called within Waku.new. The call to startRestServerEssentials needs the following line
+ logging.setupLog(wakuNodeConf.logLevel, wakuNodeConf.logFormat)
- case conf.cmd
+ case wakuNodeConf.cmd
of generateRlnKeystore:
+ let conf = wakuNodeConf.toKeystoreGeneratorConf()
doRlnKeystoreGenerator(conf)
of inspectRlnDb:
+ let conf = wakuNodeConf.toInspectRlnDbConf()
doInspectRlnDb(conf)
of noCommand:
# NOTE: {.threadvar.} is used to make the global variable GC safe for the closure uses it
@@ -58,15 +60,20 @@ when isMainModule:
nodeHealthMonitor = WakuNodeHealthMonitor()
nodeHealthMonitor.setOverallHealth(HealthStatus.INITIALIZING)
- var confCopy = conf
-
- let restServer = rest_server_builder.startRestServerEsentials(
- nodeHealthMonitor, confCopy
- ).valueOr:
- error "Starting esential REST server failed.", error = $error
+ let conf = wakuNodeConf.toWakuConf().valueOr:
+ error "Waku configuration failed", error = error
quit(QuitFailure)
- var waku = Waku.new(confCopy).valueOr:
+ var restServer: WakuRestServerRef = nil
+
+ if conf.restServerConf.isSome():
+ restServer = rest_server_builder.startRestServerEssentials(
+ nodeHealthMonitor, conf.restServerConf.get(), conf.portsShift
+ ).valueOr:
+ error "Starting essential REST server failed.", error = $error
+ quit(QuitFailure)
+
+ var waku = Waku.new(conf).valueOr:
error "Waku initialization failed", error = error
quit(QuitFailure)
@@ -78,15 +85,27 @@ when isMainModule:
error "Starting waku failed", error = error
quit(QuitFailure)
- rest_server_builder.startRestServerProtocolSupport(
- restServer, waku.node, waku.wakuDiscv5, confCopy
- ).isOkOr:
- error "Starting protocols support REST server failed.", error = $error
- quit(QuitFailure)
+ if conf.restServerConf.isSome():
+ rest_server_builder.startRestServerProtocolSupport(
+ restServer,
+ waku.node,
+ waku.wakuDiscv5,
+ conf.restServerConf.get(),
+ conf.relay,
+ conf.lightPush,
+ conf.clusterId,
+ conf.shards,
+ conf.contentTopics,
+ ).isOkOr:
+ error "Starting protocols support REST server failed.", error = $error
+ quit(QuitFailure)
- waku.metricsServer = waku_metrics.startMetricsServerAndLogging(confCopy).valueOr:
- error "Starting monitoring and external interfaces failed", error = error
- quit(QuitFailure)
+ if conf.metricsServerConf.isSome():
+ waku.metricsServer = waku_metrics.startMetricsServerAndLogging(
+ conf.metricsServerConf.get(), conf.portsShift
+ ).valueOr:
+ error "Starting monitoring and external interfaces failed", error = error
+ quit(QuitFailure)
nodeHealthMonitor.setOverallHealth(HealthStatus.READY)
diff --git a/ci/Jenkinsfile.release b/ci/Jenkinsfile.release
index fcc353be8..1a2125402 100644
--- a/ci/Jenkinsfile.release
+++ b/ci/Jenkinsfile.release
@@ -78,7 +78,7 @@ pipeline {
"--build-arg=NIMFLAGS='${params.NIMFLAGS} -d:postgres ' " +
"--build-arg=LOG_LEVEL='${params.LOWEST_LOG_LEVEL_ALLOWED}' " +
"--build-arg=DEBUG='${params.DEBUG ? "1" : "0"} ' " +
- "--target=${params.HEAPTRACK ? "prod-with-heaptrack" : "prod"} ."
+ "--target=${params.HEAPTRACK ? "heaptrack-build" : "prod"} ."
)
} }
}
diff --git a/examples/cbindings/README.md b/examples/cbindings/README.md
new file mode 100644
index 000000000..5465cf512
--- /dev/null
+++ b/examples/cbindings/README.md
@@ -0,0 +1,18 @@
+## App description
+This is a very simple example that shows how to invoke libwaku functions from a C program.
+
+## Build
+1. Open terminal
+2. cd to nwaku root folder
+3. make cwaku_example -j8
+
+This will create libwaku.so and cwaku_example binary within the build folder.
+
+## Run
+1. Open terminal
+2. cd to nwaku root folder
+3. export LD_LIBRARY_PATH=build
+4. `./build/cwaku_example --host=0.0.0.0 --port=60001`
+
+Use `./build/cwaku_example --help` to see some other options.
+
diff --git a/examples/cbindings/waku_example.c b/examples/cbindings/waku_example.c
index bbb76c862..b80b9af8f 100644
--- a/examples/cbindings/waku_example.c
+++ b/examples/cbindings/waku_example.c
@@ -14,7 +14,6 @@
#include "base64.h"
#include "../../library/libwaku.h"
-
// Shared synchronization variables
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
@@ -29,7 +28,6 @@ void waitForCallback() {
pthread_mutex_unlock(&mutex);
}
-
#define WAKU_CALL(call) \
do { \
int ret = call; \
@@ -107,6 +105,13 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
return 0;
}
+void signal_cond() {
+ pthread_mutex_lock(&mutex);
+ callback_executed = 1;
+ pthread_cond_signal(&cond);
+ pthread_mutex_unlock(&mutex);
+}
+
static struct argp argp = { options, parse_opt, args_doc, doc, 0, 0, 0 };
void event_handler(int callerRet, const char* msg, size_t len, void* userData) {
@@ -118,10 +123,7 @@ void event_handler(int callerRet, const char* msg, size_t len, void* userData) {
printf("Receiving event: %s\n", msg);
}
- pthread_mutex_lock(&mutex);
- callback_executed = 1;
- pthread_cond_signal(&cond);
- pthread_mutex_unlock(&mutex);
+ signal_cond();
}
void on_event_received(int callerRet, const char* msg, size_t len, void* userData) {
@@ -142,6 +144,7 @@ void handle_content_topic(int callerRet, const char* msg, size_t len, void* user
contentTopic = malloc(len * sizeof(char) + 1);
strcpy(contentTopic, msg);
+ signal_cond();
}
char* publishResponse = NULL;
@@ -158,33 +161,30 @@ void handle_publish_ok(int callerRet, const char* msg, size_t len, void* userDat
#define MAX_MSG_SIZE 65535
-void publish_message(char* pubsubTopic, const char* msg) {
+void publish_message(const char* msg) {
char jsonWakuMsg[MAX_MSG_SIZE];
char *msgPayload = b64_encode(msg, strlen(msg));
- WAKU_CALL( waku_content_topic(RET_OK,
+ WAKU_CALL( waku_content_topic(ctx,
"appName",
1,
"contentTopicName",
"encoding",
handle_content_topic,
userData) );
-
snprintf(jsonWakuMsg,
MAX_MSG_SIZE,
- "{\"payload\":\"%s\",\"content_topic\":\"%s\"}",
+ "{\"payload\":\"%s\",\"contentTopic\":\"%s\"}",
msgPayload, contentTopic);
free(msgPayload);
- WAKU_CALL( waku_relay_publish(&ctx,
- pubsubTopic,
+ WAKU_CALL( waku_relay_publish(ctx,
+ "/waku/2/rs/16/32",
jsonWakuMsg,
10000 /*timeout ms*/,
event_handler,
userData) );
-
- printf("waku relay response [%s]\n", publishResponse);
}
void show_help_and_exit() {
@@ -194,20 +194,12 @@ void show_help_and_exit() {
void print_default_pubsub_topic(int callerRet, const char* msg, size_t len, void* userData) {
printf("Default pubsub topic: %s\n", msg);
-
- pthread_mutex_lock(&mutex);
- callback_executed = 1;
- pthread_cond_signal(&cond);
- pthread_mutex_unlock(&mutex);
+ signal_cond();
}
void print_waku_version(int callerRet, const char* msg, size_t len, void* userData) {
printf("Git Version: %s\n", msg);
-
- pthread_mutex_lock(&mutex);
- callback_executed = 1;
- pthread_cond_signal(&cond);
- pthread_mutex_unlock(&mutex);
+ signal_cond();
}
// Beginning of UI program logic
@@ -236,9 +228,6 @@ void handle_user_input() {
return;
}
- int c;
- while ( (c = getchar()) != '\n' && c != EOF ) { }
-
switch (atoi(cmd))
{
case SUBSCRIBE_TOPIC_MENU:
@@ -247,7 +236,7 @@ void handle_user_input() {
char pubsubTopic[128];
scanf("%127s", pubsubTopic);
- WAKU_CALL( waku_relay_subscribe(&ctx,
+ WAKU_CALL( waku_relay_subscribe(ctx,
pubsubTopic,
event_handler,
userData) );
@@ -262,21 +251,17 @@ void handle_user_input() {
printf("e.g.: /ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\n");
char peerAddr[512];
scanf("%511s", peerAddr);
- WAKU_CALL(waku_connect(&ctx, peerAddr, 10000 /* timeoutMs */, event_handler, userData));
+ WAKU_CALL(waku_connect(ctx, peerAddr, 10000 /* timeoutMs */, event_handler, userData));
show_main_menu();
break;
case PUBLISH_MESSAGE_MENU:
{
- printf("Indicate the Pubsubtopic:\n");
- char pubsubTopic[128];
- scanf("%127s", pubsubTopic);
-
- printf("Type the message tp publish:\n");
+ printf("Type the message to publish:\n");
char msg[1024];
scanf("%1023s", msg);
- publish_message(pubsubTopic, msg);
+ publish_message(msg);
show_main_menu();
}
@@ -311,24 +296,24 @@ int main(int argc, char** argv) {
char jsonConfig[5000];
snprintf(jsonConfig, 5000, "{ \
+ \"clusterId\": 16, \
+ \"shards\": [ 1, 32, 64, 128, 256 ], \
\"listenAddress\": \"%s\", \
\"tcpPort\": %d, \
- \"nodekey\": \"%s\", \
\"relay\": %s, \
\"store\": %s, \
\"storeMessageDbUrl\": \"%s\", \
\"storeMessageRetentionPolicy\": \"%s\", \
\"storeMaxNumDbConnections\": %d , \
- \"logLevel\": \"DEBUG\", \
+ \"logLevel\": \"FATAL\", \
\"discv5Discovery\": true, \
\"discv5BootstrapNodes\": \
[\"enr:-QESuEB4Dchgjn7gfAvwB00CxTA-nGiyk-aALI-H4dYSZD3rUk7bZHmP8d2U6xDiQ2vZffpo45Jp7zKNdnwDUx6g4o6XAYJpZIJ2NIJpcIRA4VDAim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOvD3S3jUNICsrOILlmhENiWAMmMVlAl6-Q8wRB7hidY4N0Y3CCdl-DdWRwgiMohXdha3UyDw\", \"enr:-QEkuEBIkb8q8_mrorHndoXH9t5N6ZfD-jehQCrYeoJDPHqT0l0wyaONa2-piRQsi3oVKAzDShDVeoQhy0uwN1xbZfPZAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQKnGt-GSgqPSf3IAPM7bFgTlpczpMZZLF3geeoNNsxzSoN0Y3CCdl-DdWRwgiMohXdha3UyDw\"], \
\"discv5UdpPort\": 9999, \
- \"dnsDiscoveryUrl\": \"enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im\", \
+ \"dnsDiscoveryUrl\": \"enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.prod.status.nodes.status.im\", \
\"dnsDiscoveryNameServers\": [\"8.8.8.8\", \"1.0.0.1\"] \
}", cfgNode.host,
cfgNode.port,
- cfgNode.key,
cfgNode.relay ? "true":"false",
cfgNode.store ? "true":"false",
cfgNode.storeDbUrl,
@@ -351,14 +336,6 @@ int main(int argc, char** argv) {
WAKU_CALL( waku_listen_addresses(ctx, event_handler, userData) );
- printf("Establishing connection with: %s\n", cfgNode.peers);
-
- WAKU_CALL( waku_connect(ctx,
- cfgNode.peers,
- 10000 /* timeoutMs */,
- event_handler,
- userData) );
-
WAKU_CALL( waku_relay_subscribe(ctx,
"/waku/2/rs/0/0",
event_handler,
diff --git a/examples/cpp/README.md b/examples/cpp/README.md
new file mode 100644
index 000000000..fa8d246e0
--- /dev/null
+++ b/examples/cpp/README.md
@@ -0,0 +1,18 @@
+## App description
+This is a very simple example that shows how to invoke libwaku functions from a C++ program.
+
+## Build
+1. Open terminal
+2. cd to nwaku root folder
+3. make cppwaku_example -j8
+
+This will create libwaku.so and cppwaku_example binary within the build folder.
+
+## Run
+1. Open terminal
+2. cd to nwaku root folder
+3. export LD_LIBRARY_PATH=build
+4. `./build/cppwaku_example --host=0.0.0.0 --port=60001`
+
+Use `./build/cppwaku_example --help` to see some other options.
+
diff --git a/examples/cpp/waku.cpp b/examples/cpp/waku.cpp
index 4b601c492..c47877d02 100644
--- a/examples/cpp/waku.cpp
+++ b/examples/cpp/waku.cpp
@@ -16,12 +16,34 @@
#include "base64.h"
#include "../../library/libwaku.h"
+// Shared synchronization variables
+pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
+int callback_executed = 0;
+
+void waitForCallback() {
+ pthread_mutex_lock(&mutex);
+ while (!callback_executed) {
+ pthread_cond_wait(&cond, &mutex);
+ }
+ callback_executed = 0;
+ pthread_mutex_unlock(&mutex);
+}
+
+void signal_cond() {
+ pthread_mutex_lock(&mutex);
+ callback_executed = 1;
+ pthread_cond_signal(&cond);
+ pthread_mutex_unlock(&mutex);
+}
+
#define WAKU_CALL(call) \
do { \
int ret = call; \
if (ret != 0) { \
std::cout << "Failed the call to: " << #call << ". Code: " << ret << "\n"; \
} \
+ waitForCallback(); \
} while (0)
struct ConfigNode {
@@ -78,6 +100,24 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
return 0;
}
+void event_handler(const char* msg, size_t len) {
+ printf("Receiving event: %s\n", msg);
+}
+
+void handle_error(const char* msg, size_t len) {
+ printf("handle_error: %s\n", msg);
+ exit(1);
+}
+
+template
+auto cify(F&& f) {
+ static F fn = std::forward(f);
+ return [](int callerRet, const char* msg, size_t len, void* userData) {
+ signal_cond();
+ return fn(msg, len);
+ };
+}
+
static struct argp argp = { options, parse_opt, args_doc, doc, 0, 0, 0 };
// Beginning of UI program logic
@@ -98,7 +138,7 @@ void show_main_menu() {
printf("\t3.) Publish a message\n");
}
-void handle_user_input() {
+void handle_user_input(void* ctx) {
char cmd[1024];
memset(cmd, 0, 1024);
int numRead = read(0, cmd, 1024);
@@ -106,9 +146,6 @@ void handle_user_input() {
return;
}
- int c;
- while ( (c = getchar()) != '\n' && c != EOF ) { }
-
switch (atoi(cmd))
{
case SUBSCRIBE_TOPIC_MENU:
@@ -116,10 +153,14 @@ void handle_user_input() {
printf("Indicate the Pubsubtopic to subscribe:\n");
char pubsubTopic[128];
scanf("%127s", pubsubTopic);
- // if (!waku_relay_subscribe(pubsubTopic, &mResp)) {
- // printf("Error subscribing to PubsubTopic: %s\n", mResp->data);
- // }
- // printf("Waku Relay subscription response: %s\n", mResp->data);
+
+ WAKU_CALL( waku_relay_subscribe(ctx,
+ pubsubTopic,
+ cify([&](const char* msg, size_t len) {
+ event_handler(msg, len);
+ }),
+ nullptr) );
+ printf("The subscription went well\n");
show_main_menu();
}
@@ -130,41 +171,51 @@ void handle_user_input() {
printf("e.g.: /ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\n");
char peerAddr[512];
scanf("%511s", peerAddr);
- // if (!waku_connect(peerAddr, 10000 /* timeoutMs */, &mResp)) {
- // printf("Couldn't connect to the remote peer: %s\n", mResp->data);
- // }
+ WAKU_CALL( waku_connect(ctx,
+ peerAddr,
+ 10000 /* timeoutMs */,
+ cify([&](const char* msg, size_t len) {
+ event_handler(msg, len);
+ }),
+ nullptr));
show_main_menu();
break;
case PUBLISH_MESSAGE_MENU:
{
- printf("Indicate the Pubsubtopic:\n");
- char pubsubTopic[128];
- scanf("%127s", pubsubTopic);
-
- printf("Type the message tp publish:\n");
+ printf("Type the message to publish:\n");
char msg[1024];
scanf("%1023s", msg);
- char jsonWakuMsg[1024];
+ char jsonWakuMsg[2048];
std::vector msgPayload;
b64_encode(msg, strlen(msg), msgPayload);
- // waku_content_topic("appName",
- // 1,
- // "contentTopicName",
- // "encoding",
- // &mResp);
+ std::string contentTopic;
+ waku_content_topic(ctx,
+ "appName",
+ 1,
+ "contentTopicName",
+ "encoding",
+ cify([&contentTopic](const char* msg, size_t len) {
+ contentTopic = msg;
+ }),
+ nullptr);
- // snprintf(jsonWakuMsg,
- // 1024,
- // "{\"payload\":\"%s\",\"content_topic\":\"%s\"}",
- // msgPayload, mResp->data);
+ snprintf(jsonWakuMsg,
+ 2048,
+ "{\"payload\":\"%s\",\"contentTopic\":\"%s\"}",
+ msgPayload.data(), contentTopic.c_str());
- // free(msgPayload);
+ WAKU_CALL( waku_relay_publish(ctx,
+ "/waku/2/rs/16/32",
+ jsonWakuMsg,
+ 10000 /*timeout ms*/,
+ cify([&](const char* msg, size_t len) {
+ event_handler(msg, len);
+ }),
+ nullptr) );
- // waku_relay_publish(pubsubTopic, jsonWakuMsg, 10000 /*timeout ms*/, &mResp);
- // printf("waku relay response [%s]\n", mResp->data);
show_main_menu();
}
break;
@@ -181,23 +232,6 @@ void show_help_and_exit() {
exit(1);
}
-void event_handler(const char* msg, size_t len) {
- printf("Receiving message %s\n", msg);
-}
-
-void handle_error(const char* msg, size_t len) {
- printf("Error: %s\n", msg);
- exit(1);
-}
-
-template
-auto cify(F&& f) {
- static F fn = std::forward(f);
- return [](const char* msg, size_t len) {
- return fn(msg, len);
- };
-}
-
int main(int argc, char** argv) {
struct ConfigNode cfgNode;
// default values
@@ -212,60 +246,86 @@ int main(int argc, char** argv) {
show_help_and_exit();
}
- char jsonConfig[1024];
- snprintf(jsonConfig, 1024, "{ \
+ char jsonConfig[2048];
+ snprintf(jsonConfig, 2048, "{ \
\"host\": \"%s\", \
\"port\": %d, \
- \"key\": \"%s\", \
- \"relay\": %s, \
- \"logLevel\": \"DEBUG\" \
+ \"relay\": true, \
+ \"clusterId\": 16, \
+ \"shards\": [ 1, 32, 64, 128, 256 ], \
+ \"logLevel\": \"FATAL\", \
+ \"discv5Discovery\": true, \
+ \"discv5BootstrapNodes\": \
+ [\"enr:-QESuEB4Dchgjn7gfAvwB00CxTA-nGiyk-aALI-H4dYSZD3rUk7bZHmP8d2U6xDiQ2vZffpo45Jp7zKNdnwDUx6g4o6XAYJpZIJ2NIJpcIRA4VDAim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOvD3S3jUNICsrOILlmhENiWAMmMVlAl6-Q8wRB7hidY4N0Y3CCdl-DdWRwgiMohXdha3UyDw\", \"enr:-QEkuEBIkb8q8_mrorHndoXH9t5N6ZfD-jehQCrYeoJDPHqT0l0wyaONa2-piRQsi3oVKAzDShDVeoQhy0uwN1xbZfPZAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQKnGt-GSgqPSf3IAPM7bFgTlpczpMZZLF3geeoNNsxzSoN0Y3CCdl-DdWRwgiMohXdha3UyDw\"], \
+ \"discv5UdpPort\": 9999, \
+ \"dnsDiscoveryUrl\": \"enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.prod.status.nodes.status.im\", \
+ \"dnsDiscoveryNameServers\": [\"8.8.8.8\", \"1.0.0.1\"] \
}", cfgNode.host,
- cfgNode.port,
- cfgNode.key,
- cfgNode.relay ? "true":"false");
+ cfgNode.port);
- WAKU_CALL(waku_new(jsonConfig, cify([](const char* msg, size_t len) {
- std::cout << "Error: " << msg << std::endl;
- exit(1);
- })));
+ void* ctx =
+ waku_new(jsonConfig,
+ cify([](const char* msg, size_t len) {
+ std::cout << "waku_new feedback: " << msg << std::endl;
+ }
+ ),
+ nullptr
+ );
+ waitForCallback();
// example on how to retrieve a value from the `libwaku` callback.
std::string defaultPubsubTopic;
- WAKU_CALL(waku_default_pubsub_topic(cify([&defaultPubsubTopic](const char* msg, size_t len) {
- defaultPubsubTopic = msg;
- })));
+ WAKU_CALL(
+ waku_default_pubsub_topic(
+ ctx,
+ cify([&defaultPubsubTopic](const char* msg, size_t len) {
+ defaultPubsubTopic = msg;
+ }
+ ),
+ nullptr));
std::cout << "Default pubsub topic: " << defaultPubsubTopic << std::endl;
- WAKU_CALL(waku_version(cify([&](const char* msg, size_t len) {
- std::cout << "Git Version: " << msg << std::endl;
- })));
+ WAKU_CALL(waku_version(ctx,
+ cify([&](const char* msg, size_t len) {
+ std::cout << "Git Version: " << msg << std::endl;
+ }),
+ nullptr));
printf("Bind addr: %s:%u\n", cfgNode.host, cfgNode.port);
printf("Waku Relay enabled: %s\n", cfgNode.relay == 1 ? "YES": "NO");
std::string pubsubTopic;
- WAKU_CALL(waku_pubsub_topic("example", cify([&](const char* msg, size_t len) {
- pubsubTopic = msg;
- })));
+ WAKU_CALL(waku_pubsub_topic(ctx,
+ "example",
+ cify([&](const char* msg, size_t len) {
+ pubsubTopic = msg;
+ }),
+ nullptr));
std::cout << "Custom pubsub topic: " << pubsubTopic << std::endl;
- waku_set_event_callback(event_handler);
- waku_start();
+ waku_set_event_callback(ctx,
+ cify([&](const char* msg, size_t len) {
+ event_handler(msg, len);
+ }),
+ nullptr);
- WAKU_CALL( waku_connect(cfgNode.peers,
- 10000 /* timeoutMs */,
- handle_error) );
+ WAKU_CALL( waku_start(ctx,
+ cify([&](const char* msg, size_t len) {
+ event_handler(msg, len);
+ }),
+ nullptr));
- WAKU_CALL( waku_relay_subscribe(defaultPubsubTopic.c_str(),
- handle_error) );
-
- std::cout << "Establishing connection with: " << cfgNode.peers << std::endl;
- WAKU_CALL(waku_connect(cfgNode.peers, 10000 /* timeoutMs */, handle_error));
+ WAKU_CALL( waku_relay_subscribe(ctx,
+ defaultPubsubTopic.c_str(),
+ cify([&](const char* msg, size_t len) {
+ event_handler(msg, len);
+ }),
+ nullptr) );
show_main_menu();
while(1) {
- handle_user_input();
+ handle_user_input(ctx);
}
}
diff --git a/examples/publisher.nim b/examples/publisher.nim
index 654f40601..907ce2274 100644
--- a/examples/publisher.nim
+++ b/examples/publisher.nim
@@ -86,7 +86,10 @@ proc setupAndPublish(rng: ref HmacDrbgContext) {.async.} =
)
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ error "failed to mount relay", error = error
+ quit(1)
+
node.peerManager.start()
(await wakuDiscv5.start()).isOkOr:
@@ -95,7 +98,7 @@ proc setupAndPublish(rng: ref HmacDrbgContext) {.async.} =
# wait for a minimum of peers to be connected, otherwise messages wont be gossiped
while true:
- let numConnectedPeers = node.peerManager.wakuPeerStore[ConnectionBook].book
+ let numConnectedPeers = node.peerManager.switch.peerStore[ConnectionBook].book
.values()
.countIt(it == Connected)
if numConnectedPeers >= 6:
diff --git a/examples/subscriber.nim b/examples/subscriber.nim
index 0dd22f469..633bfa4ca 100644
--- a/examples/subscriber.nim
+++ b/examples/subscriber.nim
@@ -84,7 +84,9 @@ proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} =
)
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ error "failed to mount relay", error = error
+ quit(1)
node.peerManager.start()
(await wakuDiscv5.start()).isOkOr:
@@ -93,7 +95,7 @@ proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} =
# wait for a minimum of peers to be connected, otherwise messages wont be gossiped
while true:
- let numConnectedPeers = node.peerManager.wakuPeerStore[ConnectionBook].book
+ let numConnectedPeers = node.peerManager.switch.peerStore[ConnectionBook].book
.values()
.countIt(it == Connected)
if numConnectedPeers >= 6:
@@ -118,7 +120,9 @@ proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} =
contentTopic = msg.contentTopic,
timestamp = msg.timestamp
- node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(WakuRelayHandler(handler)))
+ node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(WakuRelayHandler(handler))).isOkOr:
+ error "failed to subscribe to pubsub topic", pubsubTopic, error
+ quit(1)
when isMainModule:
let rng = crypto.newRng()
diff --git a/examples/wakustealthcommitments/node_spec.nim b/examples/wakustealthcommitments/node_spec.nim
index dbab8a3b2..b5dafb0be 100644
--- a/examples/wakustealthcommitments/node_spec.nim
+++ b/examples/wakustealthcommitments/node_spec.nim
@@ -36,7 +36,6 @@ proc setup*(): Waku =
conf.clusterId = twnClusterConf.clusterId
conf.rlnRelayEthContractAddress = twnClusterConf.rlnRelayEthContractAddress
conf.rlnRelayDynamic = twnClusterConf.rlnRelayDynamic
- conf.rlnRelayBandwidthThreshold = twnClusterConf.rlnRelayBandwidthThreshold
conf.discv5Discovery = twnClusterConf.discv5Discovery
conf.discv5BootstrapNodes =
conf.discv5BootstrapNodes & twnClusterConf.discv5BootstrapNodes
diff --git a/examples/wakustealthcommitments/stealth_commitment_protocol.nim b/examples/wakustealthcommitments/stealth_commitment_protocol.nim
index c6e6d6b9c..7da6bff56 100644
--- a/examples/wakustealthcommitments/stealth_commitment_protocol.nim
+++ b/examples/wakustealthcommitments/stealth_commitment_protocol.nim
@@ -187,5 +187,7 @@ proc new*(
except CatchableError:
error "could not handle SCP message: ", err = getCurrentExceptionMsg()
- waku.node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(handler))
+ waku.node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(handler)).isOkOr:
+ error "could not subscribe to pubsub topic: ", err = $error
+ return err("could not subscribe to pubsub topic: " & $error)
return ok(SCP)
diff --git a/flake.lock b/flake.lock
new file mode 100644
index 000000000..359ae2579
--- /dev/null
+++ b/flake.lock
@@ -0,0 +1,49 @@
+{
+ "nodes": {
+ "nixpkgs": {
+ "locked": {
+ "lastModified": 1740603184,
+ "narHash": "sha256-t+VaahjQAWyA+Ctn2idyo1yxRIYpaDxMgHkgCNiMJa4=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "f44bd8ca21e026135061a0a57dcf3d0775b67a49",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "f44bd8ca21e026135061a0a57dcf3d0775b67a49",
+ "type": "github"
+ }
+ },
+ "root": {
+ "inputs": {
+ "nixpkgs": "nixpkgs",
+ "zerokit": "zerokit"
+ }
+ },
+ "zerokit": {
+ "inputs": {
+ "nixpkgs": [
+ "nixpkgs"
+ ]
+ },
+ "locked": {
+ "lastModified": 1743756626,
+ "narHash": "sha256-SvhfEl0bJcRsCd79jYvZbxQecGV2aT+TXjJ57WVv7Aw=",
+ "owner": "vacp2p",
+ "repo": "zerokit",
+ "rev": "c60e0c33fc6350a4b1c20e6b6727c44317129582",
+ "type": "github"
+ },
+ "original": {
+ "owner": "vacp2p",
+ "repo": "zerokit",
+ "rev": "c60e0c33fc6350a4b1c20e6b6727c44317129582",
+ "type": "github"
+ }
+ }
+ },
+ "root": "root",
+ "version": 7
+}
diff --git a/flake.nix b/flake.nix
new file mode 100644
index 000000000..760f49337
--- /dev/null
+++ b/flake.nix
@@ -0,0 +1,64 @@
+{
+ description = "NWaku build flake";
+
+ nixConfig = {
+ extra-substituters = [ "https://nix-cache.status.im/" ];
+ extra-trusted-public-keys = [ "nix-cache.status.im-1:x/93lOfLU+duPplwMSBR+OlY4+mo+dCN7n0mr4oPwgY=" ];
+ };
+
+ inputs = {
+ nixpkgs.url = "github:NixOS/nixpkgs?rev=f44bd8ca21e026135061a0a57dcf3d0775b67a49";
+ zerokit = {
+ url = "github:vacp2p/zerokit?rev=c60e0c33fc6350a4b1c20e6b6727c44317129582";
+ inputs.nixpkgs.follows = "nixpkgs";
+ };
+ };
+
+ outputs = { self, nixpkgs, zerokit }:
+ let
+ stableSystems = [
+ "x86_64-linux" "aarch64-linux"
+ "x86_64-darwin" "aarch64-darwin"
+ "x86_64-windows" "i686-linux"
+ "i686-windows"
+ ];
+
+ forAllSystems = f: nixpkgs.lib.genAttrs stableSystems (system: f system);
+
+ pkgsFor = forAllSystems (
+ system: import nixpkgs {
+ inherit system;
+ config = {
+ android_sdk.accept_license = true;
+ allowUnfree = true;
+ };
+ overlays = [
+ (final: prev: {
+ androidEnvCustom = prev.callPackage ./nix/pkgs/android-sdk { };
+ androidPkgs = final.androidEnvCustom.pkgs;
+ androidShell = final.androidEnvCustom.shell;
+ })
+ ];
+ }
+ );
+
+ in rec {
+ packages = forAllSystems (system: let
+ pkgs = pkgsFor.${system};
+ in rec {
+ libwaku-android-arm64 = pkgs.callPackage ./nix/default.nix {
+ inherit stableSystems;
+ src = self;
+ targets = ["libwaku-android-arm64"];
+ androidArch = "aarch64-linux-android";
+ abidir = "arm64-v8a";
+ zerokitPkg = zerokit.packages.${system}.zerokit-android-arm64;
+ };
+ default = libwaku-android-arm64;
+ });
+
+ devShells = forAllSystems (system: {
+ default = pkgsFor.${system}.callPackage ./nix/shell.nix {};
+ });
+ };
+}
\ No newline at end of file
diff --git a/library/events/json_connection_change_event.nim b/library/events/json_connection_change_event.nim
index 1a00237b6..ff2823640 100644
--- a/library/events/json_connection_change_event.nim
+++ b/library/events/json_connection_change_event.nim
@@ -9,9 +9,6 @@ type JsonConnectionChangeEvent* = ref object of JsonEvent
proc new*(
T: type JsonConnectionChangeEvent, peerId: string, peerEvent: PeerEventKind
): T =
- # Returns a JsonConnectionChangeEvent event as indicated in
- # https://rfc.vac.dev/spec/36/#jsonmessageevent-type
-
return JsonConnectionChangeEvent(
eventType: "connection_change", peerId: peerId, peerEvent: peerEvent
)
diff --git a/library/events/json_message_event.nim b/library/events/json_message_event.nim
index 6f9dafa9f..f79fef86f 100644
--- a/library/events/json_message_event.nim
+++ b/library/events/json_message_event.nim
@@ -71,7 +71,7 @@ type JsonMessageEvent* = ref object of JsonEvent
proc new*(T: type JsonMessageEvent, pubSubTopic: string, msg: WakuMessage): T =
# Returns a WakuMessage event as indicated in
- # https://rfc.vac.dev/spec/36/#jsonmessageevent-type
+ # https://github.com/vacp2p/rfc/blob/master/content/docs/rfcs/36/README.md#jsonmessageevent-type
var payload = newSeq[byte](len(msg.payload))
if len(msg.payload) != 0:
diff --git a/library/events/json_topic_health_change_event.nim b/library/events/json_topic_health_change_event.nim
index c735eccbf..c194e890c 100644
--- a/library/events/json_topic_health_change_event.nim
+++ b/library/events/json_topic_health_change_event.nim
@@ -10,9 +10,6 @@ type JsonTopicHealthChangeEvent* = ref object of JsonEvent
proc new*(
T: type JsonTopicHealthChangeEvent, pubsubTopic: string, topicHealth: TopicHealth
): T =
- # Returns a TopicHealthChange event as indicated in
- # https://rfc.vac.dev/spec/36/#jsonmessageevent-type
-
return JsonTopicHealthChangeEvent(
eventType: "relay_topic_health_change",
pubsubTopic: pubsubTopic,
diff --git a/library/libwaku.h b/library/libwaku.h
index bd9b6bfed..3c15b36f9 100644
--- a/library/libwaku.h
+++ b/library/libwaku.h
@@ -117,11 +117,21 @@ int waku_relay_get_num_connected_peers(void* ctx,
WakuCallBack callback,
void* userData);
+int waku_relay_get_connected_peers(void* ctx,
+ const char* pubSubTopic,
+ WakuCallBack callback,
+ void* userData);
+
int waku_relay_get_num_peers_in_mesh(void* ctx,
const char* pubSubTopic,
WakuCallBack callback,
void* userData);
+int waku_relay_get_peers_in_mesh(void* ctx,
+ const char* pubSubTopic,
+ WakuCallBack callback,
+ void* userData);
+
int waku_store_query(void* ctx,
const char* jsonQuery,
const char* peerAddr,
@@ -158,6 +168,10 @@ int waku_get_peerids_from_peerstore(void* ctx,
WakuCallBack callback,
void* userData);
+int waku_get_connected_peers_info(void* ctx,
+ WakuCallBack callback,
+ void* userData);
+
int waku_get_peerids_by_protocol(void* ctx,
const char* protocol,
WakuCallBack callback,
diff --git a/library/libwaku.nim b/library/libwaku.nim
index 258ac27b2..48df3e2c6 100644
--- a/library/libwaku.nim
+++ b/library/libwaku.nim
@@ -42,7 +42,8 @@ import
template checkLibwakuParams*(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
) =
- ctx[].userData = userData
+ if not isNil(ctx):
+ ctx[].userData = userData
if isNil(callback):
return RET_MISSING_CALLBACK
@@ -52,10 +53,6 @@ template callEventCallback(ctx: ptr WakuContext, eventName: string, body: untype
error eventName & " - eventCallback is nil"
return
- if isNil(ctx[].eventUserData):
- error eventName & " - eventUserData is nil"
- return
-
foreignThreadGc:
try:
let event = body
@@ -228,19 +225,11 @@ proc waku_content_topic(
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
- let appStr = appName.alloc()
- let ctnStr = contentTopicName.alloc()
- let encodingStr = encoding.alloc()
-
- let contentTopic = fmt"/{$appStr}/{appVersion}/{$ctnStr}/{$encodingStr}"
+ let contentTopic = fmt"/{$appName}/{$appVersion}/{$contentTopicName}/{$encoding}"
callback(
RET_OK, unsafeAddr contentTopic[0], cast[csize_t](len(contentTopic)), userData
)
- deallocShared(appStr)
- deallocShared(ctnStr)
- deallocShared(encodingStr)
-
return RET_OK
proc waku_pubsub_topic(
@@ -251,15 +240,11 @@ proc waku_pubsub_topic(
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
- let topicNameStr = topicName.alloc()
-
- let outPubsubTopic = fmt"/waku/2/{$topicNameStr}"
+ let outPubsubTopic = fmt"/waku/2/{$topicName}"
callback(
RET_OK, unsafeAddr outPubsubTopic[0], cast[csize_t](len(outPubsubTopic)), userData
)
- deallocShared(topicNameStr)
-
return RET_OK
proc waku_default_pubsub_topic(
@@ -292,12 +277,9 @@ proc waku_relay_publish(
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
- let jwm = jsonWakuMessage.alloc()
- defer:
- deallocShared(jwm)
var jsonMessage: JsonMessage
try:
- let jsonContent = parseJson($jwm)
+ let jsonContent = parseJson($jsonWakuMessage)
jsonMessage = JsonMessage.fromJsonNode(jsonContent).valueOr:
raise newException(JsonParsingError, $error)
except JsonParsingError:
@@ -310,14 +292,10 @@ proc waku_relay_publish(
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return RET_ERR
- let pst = pubSubTopic.alloc()
- defer:
- deallocShared(pst)
-
handleRequest(
ctx,
RequestType.RELAY,
- RelayRequest.createShared(RelayMsgType.PUBLISH, pst, nil, wakuMessage),
+ RelayRequest.createShared(RelayMsgType.PUBLISH, pubSubTopic, nil, wakuMessage),
callback,
userData,
)
@@ -357,15 +335,12 @@ proc waku_relay_subscribe(
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
- let pst = pubSubTopic.alloc()
- defer:
- deallocShared(pst)
var cb = onReceivedMessage(ctx)
handleRequest(
ctx,
RequestType.RELAY,
- RelayRequest.createShared(RelayMsgType.SUBSCRIBE, pst, WakuRelayHandler(cb)),
+ RelayRequest.createShared(RelayMsgType.SUBSCRIBE, pubSubTopic, WakuRelayHandler(cb)),
callback,
userData,
)
@@ -380,9 +355,6 @@ proc waku_relay_add_protected_shard(
): cint {.dynlib, exportc, cdecl.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
- let pubk = publicKey.alloc()
- defer:
- deallocShared(pubk)
handleRequest(
ctx,
@@ -391,7 +363,7 @@ proc waku_relay_add_protected_shard(
RelayMsgType.ADD_PROTECTED_SHARD,
clusterId = clusterId,
shardId = shardId,
- publicKey = pubk,
+ publicKey = publicKey,
),
callback,
userData,
@@ -406,15 +378,11 @@ proc waku_relay_unsubscribe(
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
- let pst = pubSubTopic.alloc()
- defer:
- deallocShared(pst)
-
handleRequest(
ctx,
RequestType.RELAY,
RelayRequest.createShared(
- RelayMsgType.UNSUBSCRIBE, pst, WakuRelayHandler(onReceivedMessage(ctx))
+ RelayMsgType.UNSUBSCRIBE, pubSubTopic, WakuRelayHandler(onReceivedMessage(ctx))
),
callback,
userData,
@@ -429,14 +397,27 @@ proc waku_relay_get_num_connected_peers(
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
- let pst = pubSubTopic.alloc()
- defer:
- deallocShared(pst)
+ handleRequest(
+ ctx,
+ RequestType.RELAY,
+ RelayRequest.createShared(RelayMsgType.NUM_CONNECTED_PEERS, pubSubTopic),
+ callback,
+ userData,
+ )
+
+proc waku_relay_get_connected_peers(
+ ctx: ptr WakuContext,
+ pubSubTopic: cstring,
+ callback: WakuCallBack,
+ userData: pointer,
+): cint {.dynlib, exportc.} =
+ initializeLibrary()
+ checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.RELAY,
- RelayRequest.createShared(RelayMsgType.LIST_CONNECTED_PEERS, pst),
+ RelayRequest.createShared(RelayMsgType.LIST_CONNECTED_PEERS, pubSubTopic),
callback,
userData,
)
@@ -450,14 +431,27 @@ proc waku_relay_get_num_peers_in_mesh(
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
- let pst = pubSubTopic.alloc()
- defer:
- deallocShared(pst)
+ handleRequest(
+ ctx,
+ RequestType.RELAY,
+ RelayRequest.createShared(RelayMsgType.NUM_MESH_PEERS, pubSubTopic),
+ callback,
+ userData,
+ )
+
+proc waku_relay_get_peers_in_mesh(
+ ctx: ptr WakuContext,
+ pubSubTopic: cstring,
+ callback: WakuCallBack,
+ userData: pointer,
+): cint {.dynlib, exportc.} =
+ initializeLibrary()
+ checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.RELAY,
- RelayRequest.createShared(RelayMsgType.LIST_MESH_PEERS, pst),
+ RelayRequest.createShared(RelayMsgType.LIST_MESH_PEERS, pubSubTopic),
callback,
userData,
)
@@ -527,15 +521,9 @@ proc waku_lightpush_publish(
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
- let jwm = jsonWakuMessage.alloc()
- let pst = pubSubTopic.alloc()
- defer:
- deallocShared(jwm)
- deallocShared(pst)
-
var jsonMessage: JsonMessage
try:
- let jsonContent = parseJson($jwm)
+ let jsonContent = parseJson($jsonWakuMessage)
jsonMessage = JsonMessage.fromJsonNode(jsonContent).valueOr:
raise newException(JsonParsingError, $error)
except JsonParsingError:
@@ -551,7 +539,7 @@ proc waku_lightpush_publish(
handleRequest(
ctx,
RequestType.LIGHTPUSH,
- LightpushRequest.createShared(LightpushMsgType.PUBLISH, pst, wakuMessage),
+ LightpushRequest.createShared(LightpushMsgType.PUBLISH, pubSubTopic, wakuMessage),
callback,
userData,
)
@@ -650,6 +638,20 @@ proc waku_get_peerids_from_peerstore(
userData,
)
+proc waku_get_connected_peers_info(
+ ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
+): cint {.dynlib, exportc.} =
+ initializeLibrary()
+ checkLibwakuParams(ctx, callback, userData)
+
+ handleRequest(
+ ctx,
+ RequestType.PEER_MANAGER,
+ PeerManagementRequest.createShared(PeerManagementMsgType.GET_CONNECTED_PEERS_INFO),
+ callback,
+ userData,
+ )
+
proc waku_get_connected_peers(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
diff --git a/library/waku_thread/inter_thread_communication/requests/discovery_request.nim b/library/waku_thread/inter_thread_communication/requests/discovery_request.nim
index 078a43030..4eb193728 100644
--- a/library/waku_thread/inter_thread_communication/requests/discovery_request.nim
+++ b/library/waku_thread/inter_thread_communication/requests/discovery_request.nim
@@ -143,7 +143,7 @@ proc process*(
of PEER_EXCHANGE:
let numValidPeers = (await performPeerExchangeRequestTo(self[].numPeers, waku)).valueOr:
error "PEER_EXCHANGE failed", error = error
- return err("error calling performPeerExchangeRequestTo: " & $error)
+ return err($error)
return ok($numValidPeers)
error "discovery request not handled"
diff --git a/library/waku_thread/inter_thread_communication/requests/node_lifecycle_request.nim b/library/waku_thread/inter_thread_communication/requests/node_lifecycle_request.nim
index 9bd0017ab..8d504df89 100644
--- a/library/waku_thread/inter_thread_communication/requests/node_lifecycle_request.nim
+++ b/library/waku_thread/inter_thread_communication/requests/node_lifecycle_request.nim
@@ -72,7 +72,11 @@ proc createWaku(
appCallbacks.relayHandler = nil
appCallbacks.topicHealthChangeHandler = nil
- let wakuRes = Waku.new(conf, appCallbacks).valueOr:
+ # TODO: Convert `confJson` directly to `WakuConf`
+ let wakuConf = conf.toWakuConf().valueOr:
+ return err("Configuration error: " & $error)
+
+ let wakuRes = Waku.new(wakuConf, appCallbacks).valueOr:
error "waku initialization failed", error = error
return err("Failed setting up Waku: " & $error)
@@ -88,16 +92,16 @@ proc process*(
of CREATE_NODE:
waku[] = (await createWaku(self.configJson, self.appCallbacks)).valueOr:
error "CREATE_NODE failed", error = error
- return err("error processing createWaku request: " & $error)
+ return err($error)
of START_NODE:
(await waku.startWaku()).isOkOr:
error "START_NODE failed", error = error
- return err("problem starting waku: " & $error)
+ return err($error)
of STOP_NODE:
try:
await waku[].stop()
except Exception:
error "STOP_NODE failed", error = getCurrentExceptionMsg()
- return err("exception stopping node: " & getCurrentExceptionMsg())
+ return err(getCurrentExceptionMsg())
return ok("")
diff --git a/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim b/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim
index 73b5a320d..1e5202891 100644
--- a/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim
+++ b/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim
@@ -1,5 +1,5 @@
import std/[sequtils, strutils]
-import chronicles, chronos, results, options
+import chronicles, chronos, results, options, json
import
../../../../waku/factory/waku,
../../../../waku/node/waku_node,
@@ -9,6 +9,7 @@ import
type PeerManagementMsgType* {.pure.} = enum
CONNECT_TO
GET_ALL_PEER_IDS
+ GET_CONNECTED_PEERS_INFO
GET_PEER_IDS_BY_PROTOCOL
DISCONNECT_PEER_BY_ID
DIAL_PEER
@@ -22,6 +23,10 @@ type PeerManagementRequest* = object
protocol: cstring
peerId: cstring
+type PeerInfo = object
+ protocols: seq[string]
+ addresses: seq[string]
+
proc createShared*(
T: type PeerManagementRequest,
op: PeerManagementMsgType,
@@ -81,11 +86,29 @@ proc process*(
of GET_ALL_PEER_IDS:
## returns a comma-separated string of peerIDs
let peerIDs =
- waku.node.peerManager.wakuPeerStore.peers().mapIt($it.peerId).join(",")
+ waku.node.peerManager.switch.peerStore.peers().mapIt($it.peerId).join(",")
return ok(peerIDs)
+ of GET_CONNECTED_PEERS_INFO:
+ ## returns a JSON string mapping peerIDs to objects with protocols and addresses
+
+ var peersMap = initTable[string, PeerInfo]()
+ let peers = waku.node.peerManager.switch.peerStore.peers().filterIt(
+ it.connectedness == Connected
+ )
+
+ # Build a map of peer IDs to peer info objects
+ for peer in peers:
+ let peerIdStr = $peer.peerId
+ peersMap[peerIdStr] =
+ PeerInfo(protocols: peer.protocols, addresses: peer.addrs.mapIt($it))
+
+ # Convert the map to JSON string
+ let jsonObj = %*peersMap
+ let jsonStr = $jsonObj
+ return ok(jsonStr)
of GET_PEER_IDS_BY_PROTOCOL:
## returns a comma-separated string of peerIDs that mount the given protocol
- let connectedPeers = waku.node.peerManager.wakuPeerStore
+ let connectedPeers = waku.node.peerManager.switch.peerStore
.peers($self[].protocol)
.filterIt(it.connectedness == Connected)
.mapIt($it.peerId)
diff --git a/library/waku_thread/inter_thread_communication/requests/protocols/lightpush_request.nim b/library/waku_thread/inter_thread_communication/requests/protocols/lightpush_request.nim
index e7006ad06..f167cd239 100644
--- a/library/waku_thread/inter_thread_communication/requests/protocols/lightpush_request.nim
+++ b/library/waku_thread/inter_thread_communication/requests/protocols/lightpush_request.nim
@@ -104,6 +104,6 @@ proc process*(
)
).valueOr:
error "PUBLISH failed", error = error
- return err("LightpushRequest error publishing: " & $error)
+ return err($error)
return ok(msgHashHex)
diff --git a/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim b/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim
index 232630591..c2f002c44 100644
--- a/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim
+++ b/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim
@@ -1,4 +1,4 @@
-import std/net
+import std/[net, sequtils, strutils]
import chronicles, chronos, stew/byteutils, results
import
../../../../../waku/waku_core/message/message,
@@ -7,15 +7,19 @@ import
../../../../../waku/waku_core/message,
../../../../../waku/waku_core/time, # Timestamp
../../../../../waku/waku_core/topics/pubsub_topic,
+ ../../../../../waku/waku_core/topics,
../../../../../waku/waku_relay/protocol,
+ ../../../../../waku/node/peer_manager,
../../../../alloc
type RelayMsgType* = enum
SUBSCRIBE
UNSUBSCRIBE
PUBLISH
+ NUM_CONNECTED_PEERS
LIST_CONNECTED_PEERS
## to return the list of all connected peers to an specific pubsub topic
+ NUM_MESH_PEERS
LIST_MESH_PEERS
## to return the list of only the peers that conform the mesh for a particular pubsub topic
ADD_PROTECTED_SHARD ## Protects a shard with a public key
@@ -105,33 +109,48 @@ proc process*(
case self.operation
of SUBSCRIBE:
- # TO DO: properly perform 'subscribe'
- waku.node.registerRelayDefaultHandler($self.pubsubTopic)
- discard waku.node.wakuRelay.subscribe($self.pubsubTopic, self.relayEventCallback)
+ waku.node.subscribe(
+ (kind: SubscriptionKind.PubsubSub, topic: $self.pubsubTopic),
+ handler = some(self.relayEventCallback),
+ ).isOkOr:
+ error "SUBSCRIBE failed", error
+ return err($error)
of UNSUBSCRIBE:
- # TODO: properly perform 'unsubscribe'
- waku.node.wakuRelay.unsubscribeAll($self.pubsubTopic)
+ waku.node.unsubscribe((kind: SubscriptionKind.PubsubSub, topic: $self.pubsubTopic)).isOkOr:
+ error "UNSUBSCRIBE failed", error
+ return err($error)
of PUBLISH:
let msg = self.message.toWakuMessage()
let pubsubTopic = $self.pubsubTopic
(await waku.node.wakuRelay.publish(pubsubTopic, msg)).isOkOr:
- let errorMsg = "Message not sent." & $error
- error "PUBLISH failed", error = errorMsg
- return err(errorMsg)
+ error "PUBLISH failed", error
+ return err($error)
let msgHash = computeMessageHash(pubSubTopic, msg).to0xHex
return ok(msgHash)
- of LIST_CONNECTED_PEERS:
+ of NUM_CONNECTED_PEERS:
let numConnPeers = waku.node.wakuRelay.getNumConnectedPeers($self.pubsubTopic).valueOr:
- error "LIST_CONNECTED_PEERS failed", error = error
+ error "NUM_CONNECTED_PEERS failed", error
return err($error)
return ok($numConnPeers)
- of LIST_MESH_PEERS:
+ of LIST_CONNECTED_PEERS:
+ let connPeers = waku.node.wakuRelay.getConnectedPeers($self.pubsubTopic).valueOr:
+ error "LIST_CONNECTED_PEERS failed", error = error
+ return err($error)
+ ## returns a comma-separated string of peerIDs
+ return ok(connPeers.mapIt($it).join(","))
+ of NUM_MESH_PEERS:
let numPeersInMesh = waku.node.wakuRelay.getNumPeersInMesh($self.pubsubTopic).valueOr:
- error "LIST_MESH_PEERS failed", error = error
+ error "NUM_MESH_PEERS failed", error = error
return err($error)
return ok($numPeersInMesh)
+ of LIST_MESH_PEERS:
+ let meshPeers = waku.node.wakuRelay.getPeersInMesh($self.pubsubTopic).valueOr:
+ error "LIST_MESH_PEERS failed", error = error
+ return err($error)
+ ## returns a comma-separated string of peerIDs
+ return ok(meshPeers.mapIt($it).join(","))
of ADD_PROTECTED_SHARD:
try:
let relayShard =
@@ -142,5 +161,5 @@ proc process*(
@[protectedShard], uint16(self.clusterId)
)
except ValueError:
- return err("ADD_PROTECTED_SHARD exception: " & getCurrentExceptionMsg())
+ return err(getCurrentExceptionMsg())
return ok("")
diff --git a/library/waku_thread/inter_thread_communication/requests/protocols/store_request.nim b/library/waku_thread/inter_thread_communication/requests/protocols/store_request.nim
index aa4071fcf..57786a581 100644
--- a/library/waku_thread/inter_thread_communication/requests/protocols/store_request.nim
+++ b/library/waku_thread/inter_thread_communication/requests/protocols/store_request.nim
@@ -24,50 +24,49 @@ func fromJsonNode(
T: type StoreRequest, jsonContent: JsonNode
): Result[StoreQueryRequest, string] =
var contentTopics: seq[string]
- if jsonContent.contains("content_topics"):
+ if jsonContent.contains("contentTopics"):
contentTopics = collect(newSeq):
- for cTopic in jsonContent["content_topics"].getElems():
+ for cTopic in jsonContent["contentTopics"].getElems():
cTopic.getStr()
var msgHashes: seq[WakuMessageHash]
- if jsonContent.contains("message_hashes"):
- for hashJsonObj in jsonContent["message_hashes"].getElems():
+ if jsonContent.contains("messageHashes"):
+ for hashJsonObj in jsonContent["messageHashes"].getElems():
let hash = hashJsonObj.getStr().hexToHash().valueOr:
return err("Failed converting message hash hex string to bytes: " & error)
msgHashes.add(hash)
let pubsubTopic =
- if jsonContent.contains("pubsub_topic"):
- some(jsonContent["pubsub_topic"].getStr())
+ if jsonContent.contains("pubsubTopic"):
+ some(jsonContent["pubsubTopic"].getStr())
else:
none(string)
let paginationCursor =
- if jsonContent.contains("pagination_cursor"):
- let hash = jsonContent["pagination_cursor"].getStr().hexToHash().valueOr:
- return
- err("Failed converting pagination_cursor hex string to bytes: " & error)
+ if jsonContent.contains("paginationCursor"):
+ let hash = jsonContent["paginationCursor"].getStr().hexToHash().valueOr:
+ return err("Failed converting paginationCursor hex string to bytes: " & error)
some(hash)
else:
none(WakuMessageHash)
- let paginationForwardBool = jsonContent["pagination_forward"].getBool()
+ let paginationForwardBool = jsonContent["paginationForward"].getBool()
let paginationForward =
if paginationForwardBool: PagingDirection.FORWARD else: PagingDirection.BACKWARD
let paginationLimit =
- if jsonContent.contains("pagination_limit"):
- some(uint64(jsonContent["pagination_limit"].getInt()))
+ if jsonContent.contains("paginationLimit"):
+ some(uint64(jsonContent["paginationLimit"].getInt()))
else:
none(uint64)
- let startTime = ?jsonContent.getProtoInt64("time_start")
- let endTime = ?jsonContent.getProtoInt64("time_end")
+ let startTime = ?jsonContent.getProtoInt64("timeStart")
+ let endTime = ?jsonContent.getProtoInt64("timeEnd")
return ok(
StoreQueryRequest(
- requestId: jsonContent["request_id"].getStr(),
- includeData: jsonContent["include_data"].getBool(),
+ requestId: jsonContent["requestId"].getStr(),
+ includeData: jsonContent["includeData"].getBool(),
pubsubTopic: pubsubTopic,
contentTopics: contentTopics,
startTime: startTime,
diff --git a/metrics/waku-fleet-dashboard.json b/metrics/waku-fleet-dashboard.json
index ad9ef040a..1d8be0b1b 100644
--- a/metrics/waku-fleet-dashboard.json
+++ b/metrics/waku-fleet-dashboard.json
@@ -55,7 +55,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -139,7 +140,8 @@
"mode": "absolute",
"steps": [
{
- "color": "blue"
+ "color": "blue",
+ "value": null
}
]
},
@@ -209,7 +211,8 @@
"mode": "percentage",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -292,7 +295,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -310,7 +314,7 @@
"properties": [
{
"id": "custom.width",
- "value": 122
+ "value": 166
}
]
},
@@ -430,7 +434,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -445,7 +450,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 145
+ "y": 1074
},
"id": 81,
"options": {
@@ -526,7 +531,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -541,7 +547,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 145
+ "y": 1074
},
"id": 82,
"options": {
@@ -624,7 +630,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -640,7 +647,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 154
+ "y": 1083
},
"id": 78,
"interval": "15s",
@@ -727,7 +734,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -743,7 +751,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 154
+ "y": 1083
},
"id": 79,
"options": {
@@ -827,7 +835,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -843,7 +852,7 @@
"h": 10,
"w": 12,
"x": 0,
- "y": 192
+ "y": 1092
},
"id": 124,
"options": {
@@ -931,7 +940,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -947,7 +957,7 @@
"h": 10,
"w": 12,
"x": 12,
- "y": 192
+ "y": 1092
},
"id": 126,
"options": {
@@ -982,6 +992,204 @@
"title": "Relay traffic per shard (out) - average of all peers",
"type": "timeseries"
},
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "P6693426190CB2316"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 12,
+ "x": 0,
+ "y": 1102
+ },
+ "id": 169,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true,
+ "sortBy": "Mean",
+ "sortDesc": true
+ },
+ "tooltip": {
+ "hideZeros": false,
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "11.5.2",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "avg by (protocol)(waku_connected_peers{direction=\"In\", instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"})",
+ "instant": false,
+ "legendFormat": "{{protocol}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "# peers per protocol (in)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "P6693426190CB2316"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 12,
+ "x": 12,
+ "y": 1102
+ },
+ "id": 170,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true,
+ "sortBy": "Mean",
+ "sortDesc": true
+ },
+ "tooltip": {
+ "hideZeros": false,
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "11.5.2",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "PBFA97CFB590B2093"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "avg by (protocol)(waku_connected_peers{direction=\"Out\", instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"})",
+ "instant": false,
+ "legendFormat": "{{protocol}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "# peers per protocol (out)",
+ "type": "timeseries"
+ },
{
"datasource": {
"type": "prometheus",
@@ -1032,7 +1240,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -1048,7 +1257,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 202
+ "y": 1112
},
"id": 11,
"options": {
@@ -1131,7 +1340,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -1146,7 +1356,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 202
+ "y": 1112
},
"id": 54,
"options": {
@@ -1229,7 +1439,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -1244,7 +1455,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 211
+ "y": 1121
},
"id": 66,
"options": {
@@ -1325,7 +1536,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -1340,7 +1552,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 211
+ "y": 1121
},
"id": 122,
"options": {
@@ -1454,7 +1666,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -1463,38 +1676,13 @@
]
}
},
- "overrides": [
- {
- "__systemRef": "hideSeriesFrom",
- "matcher": {
- "id": "byNames",
- "options": {
- "mode": "exclude",
- "names": [
- "store-02.ac-cn-hongkong-c.status.staging:v0.34.0-rc.1"
- ],
- "prefix": "All except:",
- "readOnly": true
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": false,
- "tooltip": false,
- "viz": true
- }
- }
- ]
- }
- ]
+ "overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
- "y": 220
+ "y": 1130
},
"id": 68,
"options": {
@@ -1592,7 +1780,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -1607,7 +1796,7 @@
"h": 6,
"w": 12,
"x": 0,
- "y": 396
+ "y": 2
},
"id": 48,
"options": {
@@ -1688,7 +1877,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -1703,7 +1893,7 @@
"h": 6,
"w": 12,
"x": 12,
- "y": 396
+ "y": 2
},
"id": 50,
"options": {
@@ -1784,7 +1974,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -1799,7 +1990,7 @@
"h": 6,
"w": 12,
"x": 0,
- "y": 402
+ "y": 670
},
"id": 60,
"options": {
@@ -1905,7 +2096,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -1920,7 +2112,7 @@
"h": 6,
"w": 12,
"x": 12,
- "y": 402
+ "y": 670
},
"id": 8,
"options": {
@@ -2004,7 +2196,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -2019,7 +2212,7 @@
"h": 6,
"w": 12,
"x": 0,
- "y": 408
+ "y": 676
},
"id": 2,
"options": {
@@ -2106,7 +2299,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -2122,7 +2316,7 @@
"h": 6,
"w": 12,
"x": 12,
- "y": 408
+ "y": 676
},
"id": 83,
"options": {
@@ -2205,7 +2399,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -2220,7 +2415,7 @@
"h": 6,
"w": 12,
"x": 0,
- "y": 414
+ "y": 682
},
"id": 3,
"options": {
@@ -2304,7 +2499,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -2319,7 +2515,7 @@
"h": 6,
"w": 12,
"x": 12,
- "y": 414
+ "y": 682
},
"id": 9,
"options": {
@@ -2444,7 +2640,7 @@
"h": 6,
"w": 12,
"x": 0,
- "y": 420
+ "y": 688
},
"id": 6,
"options": {
@@ -2541,7 +2737,7 @@
"h": 6,
"w": 12,
"x": 12,
- "y": 420
+ "y": 688
},
"id": 7,
"options": {
@@ -2666,7 +2862,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 426
+ "y": 694
},
"id": 44,
"options": {
@@ -2788,7 +2984,7 @@
"h": 6,
"w": 12,
"x": 12,
- "y": 426
+ "y": 694
},
"id": 10,
"options": {
@@ -2881,38 +3077,13 @@
},
"unit": "decbytes"
},
- "overrides": [
- {
- "__systemRef": "hideSeriesFrom",
- "matcher": {
- "id": "byNames",
- "options": {
- "mode": "exclude",
- "names": [
- "boot-01.ac-cn-hongkong-c.status.staging seq[byte]"
- ],
- "prefix": "All except:",
- "readOnly": true
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": false,
- "tooltip": false,
- "viz": true
- }
- }
- ]
- }
- ]
+ "overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
- "y": 432
+ "y": 700
},
"id": 64,
"options": {
@@ -3014,7 +3185,7 @@
"h": 6,
"w": 12,
"x": 0,
- "y": 434
+ "y": 702
},
"id": 4,
"options": {
@@ -3111,7 +3282,7 @@
"h": 6,
"w": 12,
"x": 12,
- "y": 440
+ "y": 708
},
"id": 5,
"options": {
@@ -3207,7 +3378,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -3222,7 +3394,7 @@
"h": 8,
"w": 8,
"x": 0,
- "y": 447
+ "y": 3
},
"id": 159,
"options": {
@@ -3246,7 +3418,7 @@
"uid": "P6693426190CB2316"
},
"editorMode": "code",
- "expr": "waku_rln_proofs_generated_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}",
+ "expr": "waku_rln_total_generated_proofs",
"legendFormat": "{{instance}}",
"range": true,
"refId": "A"
@@ -3303,7 +3475,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -3318,7 +3491,7 @@
"h": 8,
"w": 8,
"x": 8,
- "y": 447
+ "y": 3
},
"id": 117,
"options": {
@@ -3400,7 +3573,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -3415,7 +3589,7 @@
"h": 8,
"w": 8,
"x": 16,
- "y": 447
+ "y": 3
},
"id": 160,
"options": {
@@ -3439,7 +3613,7 @@
"uid": "P6693426190CB2316"
},
"editorMode": "code",
- "expr": "waku_rln_proofs_remaining{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}",
+ "expr": "waku_rln_remaining_proofs_per_epoch",
"legendFormat": "{{instance}}",
"range": true,
"refId": "A"
@@ -3496,7 +3670,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -3512,7 +3687,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 455
+ "y": 27
},
"id": 119,
"options": {
@@ -3593,7 +3768,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -3608,7 +3784,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 455
+ "y": 27
},
"id": 121,
"options": {
@@ -3689,7 +3865,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -3704,7 +3881,7 @@
"h": 8,
"w": 8,
"x": 0,
- "y": 463
+ "y": 35
},
"id": 113,
"options": {
@@ -3786,7 +3963,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -3801,7 +3979,7 @@
"h": 8,
"w": 8,
"x": 8,
- "y": 463
+ "y": 35
},
"id": 115,
"options": {
@@ -3908,7 +4086,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -3923,7 +4102,7 @@
"h": 6,
"w": 12,
"x": 0,
- "y": 472
+ "y": 355
},
"id": 36,
"options": {
@@ -4004,7 +4183,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -4019,7 +4199,7 @@
"h": 6,
"w": 12,
"x": 12,
- "y": 472
+ "y": 355
},
"id": 38,
"options": {
@@ -4115,7 +4295,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -4130,7 +4311,7 @@
"h": 6,
"w": 12,
"x": 0,
- "y": 478
+ "y": 361
},
"id": 62,
"options": {
@@ -4213,7 +4394,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -4228,7 +4410,7 @@
"h": 6,
"w": 12,
"x": 12,
- "y": 478
+ "y": 361
},
"id": 40,
"options": {
@@ -4329,7 +4511,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4341,7 +4524,7 @@
"h": 11,
"w": 12,
"x": 0,
- "y": 484
+ "y": 367
},
"id": 144,
"options": {
@@ -4433,7 +4616,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -4445,7 +4629,7 @@
"h": 11,
"w": 12,
"x": 12,
- "y": 484
+ "y": 367
},
"id": 145,
"options": {
@@ -4550,7 +4734,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 495
+ "y": 378
},
"id": 146,
"options": {
@@ -4654,7 +4838,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 495
+ "y": 378
},
"id": 148,
"options": {
@@ -4758,7 +4942,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 503
+ "y": 386
},
"id": 158,
"options": {
@@ -4861,7 +5045,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 503
+ "y": 386
},
"id": 157,
"options": {
@@ -4963,7 +5147,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 511
+ "y": 394
},
"id": 149,
"options": {
@@ -5086,7 +5270,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 511
+ "y": 394
},
"id": 147,
"options": {
@@ -5148,7 +5332,7 @@
"h": 7,
"w": 12,
"x": 0,
- "y": 519
+ "y": 402
},
"id": 77,
"maxDataPoints": 60,
@@ -5247,7 +5431,7 @@
"h": 7,
"w": 12,
"x": 12,
- "y": 519
+ "y": 402
},
"id": 75,
"maxDataPoints": 60,
@@ -5409,7 +5593,7 @@
"h": 13,
"w": 12,
"x": 0,
- "y": 526
+ "y": 409
},
"id": 142,
"options": {
@@ -5510,7 +5694,7 @@
"h": 13,
"w": 12,
"x": 12,
- "y": 526
+ "y": 409
},
"id": 130,
"options": {
@@ -5558,11 +5742,13 @@
"mode": "palette-classic"
},
"custom": {
+ "axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -5571,6 +5757,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -5608,7 +5795,7 @@
"h": 13,
"w": 12,
"x": 0,
- "y": 539
+ "y": 422
},
"id": 132,
"options": {
@@ -5624,10 +5811,12 @@
"sortDesc": false
},
"tooltip": {
+ "hideZeros": false,
"mode": "single",
"sort": "none"
}
},
+ "pluginVersion": "11.5.2",
"targets": [
{
"datasource": {
@@ -5656,11 +5845,13 @@
"mode": "palette-classic"
},
"custom": {
+ "axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -5669,6 +5860,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -5706,7 +5898,7 @@
"h": 13,
"w": 12,
"x": 12,
- "y": 539
+ "y": 422
},
"id": 143,
"options": {
@@ -5722,10 +5914,12 @@
"sortDesc": true
},
"tooltip": {
+ "hideZeros": false,
"mode": "single",
"sort": "none"
}
},
+ "pluginVersion": "11.5.2",
"targets": [
{
"datasource": {
@@ -5754,11 +5948,13 @@
"mode": "palette-classic"
},
"custom": {
+ "axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -5767,6 +5963,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -5804,7 +6001,7 @@
"h": 13,
"w": 12,
"x": 0,
- "y": 552
+ "y": 435
},
"id": 128,
"options": {
@@ -5820,10 +6017,12 @@
"sortDesc": true
},
"tooltip": {
+ "hideZeros": false,
"mode": "single",
"sort": "none"
}
},
+ "pluginVersion": "11.5.2",
"targets": [
{
"datasource": {
@@ -5852,11 +6051,13 @@
"mode": "palette-classic"
},
"custom": {
+ "axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -5865,6 +6066,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -5902,7 +6104,7 @@
"h": 13,
"w": 12,
"x": 12,
- "y": 552
+ "y": 435
},
"id": 141,
"options": {
@@ -5918,10 +6120,12 @@
"sortDesc": true
},
"tooltip": {
+ "hideZeros": false,
"mode": "single",
"sort": "none"
}
},
+ "pluginVersion": "11.5.2",
"targets": [
{
"datasource": {
@@ -6006,14 +6210,10 @@
{
"color": "green",
"value": null
- },
- {
- "color": "red",
- "value": 80
}
]
},
- "unit": "binBps"
+ "unit": "deckbytes"
},
"overrides": []
},
@@ -6046,7 +6246,7 @@
},
"disableTextWrap": false,
"editorMode": "builder",
- "expr": "sum by(direction, protocol) (rate(total_bytes_exchanged_sum[$__rate_interval]))",
+ "expr": "sum by(direction, protocol) (total_bytes_exchanged_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"})",
"fullMetaSearch": false,
"includeNullMetadata": false,
"legendFormat": "__auto",
@@ -6055,7 +6255,7 @@
"useBackend": false
}
],
- "title": "Bytes Exchanged Rate",
+ "title": "Bytes Exchanged",
"type": "timeseries"
},
{
@@ -6063,7 +6263,7 @@
"type": "prometheus",
"uid": "P6693426190CB2316"
},
- "description": "the number of messages sent and received by the transfer protocol per second.",
+ "description": "the number of messages sent and received by the transfer protocol.",
"fieldConfig": {
"defaults": {
"color": {
@@ -6109,10 +6309,6 @@
{
"color": "green",
"value": null
- },
- {
- "color": "red",
- "value": 80
}
]
}
@@ -6147,8 +6343,8 @@
"uid": "P6693426190CB2316"
},
"disableTextWrap": false,
- "editorMode": "builder",
- "expr": "sum by(direction) (rate(total_transfer_messages_exchanged_total[$__rate_interval]))",
+ "editorMode": "code",
+ "expr": "sum by(direction) (total_transfer_messages_exchanged_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"})",
"fullMetaSearch": false,
"includeNullMetadata": true,
"legendFormat": "__auto",
@@ -6157,7 +6353,7 @@
"useBackend": false
}
],
- "title": "Messages Exchanged Rate",
+ "title": "Messages Exchanged",
"type": "timeseries"
},
{
@@ -6225,7 +6421,7 @@
"disableTextWrap": false,
"editorMode": "builder",
"exemplar": false,
- "expr": "sum by(le) (reconciliation_roundtrips_bucket)",
+ "expr": "sum by(le) (reconciliation_roundtrips_bucket{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"})",
"format": "heatmap",
"fullMetaSearch": false,
"hide": false,
@@ -6240,13 +6436,179 @@
"title": "Distribution of Round-Trips per Reconciliation",
"type": "bargauge"
},
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "P6693426190CB2316"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 13
+ },
+ "id": 171,
+ "options": {
+ "displayMode": "lcd",
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": false
+ },
+ "maxVizHeight": 300,
+ "minVizHeight": 16,
+ "minVizWidth": 8,
+ "namePlacement": "left",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "showUnfilled": true,
+ "sizing": "auto",
+ "valueMode": "hidden"
+ },
+ "pluginVersion": "11.5.2",
+ "targets": [
+ {
+ "disableTextWrap": false,
+ "editorMode": "builder",
+ "expr": "sum by(le) (reconciliation_differences_bucket{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"})",
+ "format": "heatmap",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "title": "Distribution of differences per reconciliation.",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "P6693426190CB2316"
+ },
+ "description": "The total number of messages cached by nodes.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 21
+ },
+ "id": 172,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "hideZeros": false,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "11.5.2",
+ "targets": [
+ {
+ "disableTextWrap": false,
+ "editorMode": "code",
+ "expr": "total_messages_cached_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}",
+ "fullMetaSearch": false,
+ "includeNullMetadata": true,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A",
+ "useBackend": false
+ }
+ ],
+ "title": "Messages Cached",
+ "type": "timeseries"
+ },
{
"collapsed": true,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 21
+ "y": 29
},
"id": 87,
"panels": [
@@ -6313,7 +6675,7 @@
"h": 8,
"w": 8,
"x": 0,
- "y": 156
+ "y": 2595
},
"id": 93,
"options": {
@@ -6412,7 +6774,7 @@
"h": 8,
"w": 8,
"x": 8,
- "y": 156
+ "y": 2595
},
"id": 89,
"options": {
@@ -6508,7 +6870,7 @@
"h": 8,
"w": 8,
"x": 16,
- "y": 156
+ "y": 2595
},
"id": 91,
"options": {
@@ -6565,7 +6927,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 164
+ "y": 2603
},
"id": 95,
"options": {
@@ -6647,7 +7009,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 164
+ "y": 2603
},
"id": 97,
"options": {
@@ -6763,39 +7125,13 @@
},
"unit": "reqps"
},
- "overrides": [
- {
- "__systemRef": "hideSeriesFrom",
- "matcher": {
- "id": "byNames",
- "options": {
- "mode": "exclude",
- "names": [
- "boot-01.ac-cn-hongkong-c.status.prod - rejected",
- "boot-01.ac-cn-hongkong-c.status.prod - served"
- ],
- "prefix": "All except:",
- "readOnly": true
- }
- },
- "properties": [
- {
- "id": "custom.hideFrom",
- "value": {
- "legend": false,
- "tooltip": false,
- "viz": true
- }
- }
- ]
- }
- ]
+ "overrides": []
},
"gridPos": {
"h": 13,
"w": 12,
"x": 0,
- "y": 172
+ "y": 2611
},
"id": 134,
"options": {
@@ -6898,7 +7234,7 @@
"h": 13,
"w": 12,
"x": 12,
- "y": 172
+ "y": 2611
},
"id": 136,
"options": {
@@ -6944,7 +7280,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 22
+ "y": 30
},
"id": 28,
"panels": [
@@ -7011,7 +7347,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 82
+ "y": 2670
},
"id": 30,
"options": {
@@ -7107,7 +7443,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 82
+ "y": 2670
},
"id": 32,
"options": {
@@ -7205,7 +7541,7 @@
"h": 12,
"w": 12,
"x": 0,
- "y": 129
+ "y": 2678
},
"id": 138,
"options": {
@@ -7308,7 +7644,7 @@
"h": 12,
"w": 12,
"x": 12,
- "y": 129
+ "y": 2678
},
"id": 140,
"options": {
@@ -7355,7 +7691,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 23
+ "y": 31
},
"id": 151,
"panels": [
@@ -7371,11 +7707,13 @@
"mode": "palette-classic"
},
"custom": {
+ "axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -7384,6 +7722,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -7421,7 +7760,7 @@
"h": 12,
"w": 12,
"x": 0,
- "y": 760
+ "y": 2691
},
"id": 153,
"options": {
@@ -7437,10 +7776,12 @@
"sortDesc": true
},
"tooltip": {
+ "hideZeros": false,
"mode": "single",
"sort": "none"
}
},
+ "pluginVersion": "11.5.2",
"targets": [
{
"datasource": {
@@ -7469,11 +7810,13 @@
"mode": "palette-classic"
},
"custom": {
+ "axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -7482,6 +7825,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -7519,7 +7863,7 @@
"h": 12,
"w": 12,
"x": 12,
- "y": 760
+ "y": 2691
},
"id": 154,
"options": {
@@ -7535,10 +7879,12 @@
"sortDesc": true
},
"tooltip": {
+ "hideZeros": false,
"mode": "single",
"sort": "none"
}
},
+ "pluginVersion": "11.5.2",
"targets": [
{
"datasource": {
@@ -7587,13 +7933,21 @@
"h": 12,
"w": 12,
"x": 0,
- "y": 772
+ "y": 2703
},
"id": 156,
"options": {
"displayMode": "basic",
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": false
+ },
+ "maxVizHeight": 300,
"minVizHeight": 10,
"minVizWidth": 0,
+ "namePlacement": "auto",
"orientation": "horizontal",
"reduceOptions": {
"calcs": [
@@ -7602,9 +7956,11 @@
"fields": "",
"values": false
},
- "showUnfilled": true
+ "showUnfilled": true,
+ "sizing": "auto",
+ "valueMode": "color"
},
- "pluginVersion": "9.2.5",
+ "pluginVersion": "11.5.2",
"targets": [
{
"datasource": {
@@ -7633,11 +7989,13 @@
"mode": "palette-classic"
},
"custom": {
+ "axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
+ "barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
@@ -7646,6 +8004,7 @@
"tooltip": false,
"viz": false
},
+ "insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
@@ -7683,7 +8042,7 @@
"h": 12,
"w": 12,
"x": 12,
- "y": 772
+ "y": 2703
},
"id": 155,
"options": {
@@ -7699,10 +8058,12 @@
"sortDesc": true
},
"tooltip": {
+ "hideZeros": false,
"mode": "single",
"sort": "none"
}
},
+ "pluginVersion": "11.5.2",
"targets": [
{
"datasource": {
@@ -7729,7 +8090,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 24
+ "y": 32
},
"id": 15,
"panels": [
@@ -7797,7 +8158,7 @@
"h": 7,
"w": 8,
"x": 0,
- "y": 84
+ "y": 2716
},
"id": 13,
"options": {
@@ -7895,7 +8256,7 @@
"h": 7,
"w": 8,
"x": 8,
- "y": 84
+ "y": 2716
},
"id": 18,
"options": {
@@ -8071,7 +8432,7 @@
"h": 7,
"w": 8,
"x": 16,
- "y": 84
+ "y": 2716
},
"id": 42,
"options": {
@@ -8164,7 +8525,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 91
+ "y": 2723
},
"id": 103,
"options": {
@@ -8260,7 +8621,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 91
+ "y": 2723
},
"id": 102,
"options": {
@@ -8323,7 +8684,7 @@
"h": 8,
"w": 24,
"x": 0,
- "y": 99
+ "y": 2731
},
"id": 101,
"options": {
@@ -8397,7 +8758,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 107
+ "y": 2739
},
"id": 105,
"options": {
@@ -8468,7 +8829,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 107
+ "y": 2739
},
"id": 104,
"options": {
@@ -8522,7 +8883,7 @@
"h": 1,
"w": 24,
"x": 0,
- "y": 25
+ "y": 33
},
"id": 107,
"panels": [
@@ -8590,7 +8951,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 116
+ "y": 2748
},
"id": 109,
"options": {
@@ -8742,6 +9103,6 @@
"timezone": "browser",
"title": "Nim-Waku V2",
"uid": "qrp_ZCTGz",
- "version": 169,
+ "version": 180,
"weekStart": ""
}
\ No newline at end of file
diff --git a/nix/README.md b/nix/README.md
new file mode 100644
index 000000000..e928b7938
--- /dev/null
+++ b/nix/README.md
@@ -0,0 +1,35 @@
+# Usage
+
+## Shell
+
+A development shell can be started using:
+```sh
+nix develop
+```
+
+## Building
+
+To build a Codex you can use:
+```sh
+nix build '.?submodules=1#default'
+```
+The `?submodules=1` part should eventually not be necessary.
+For more details see:
+https://github.com/NixOS/nix/issues/4423
+
+It can be also done without even cloning the repo:
+```sh
+nix build 'git+https://github.com/waku-org/nwaku?submodules=1#'
+```
+
+## Running
+
+```sh
+nix run 'git+https://github.com/waku-org/nwaku?submodules=1#''
+```
+
+## Testing
+
+```sh
+nix flake check ".?submodules=1#"
+```
diff --git a/nix/atlas.nix b/nix/atlas.nix
new file mode 100644
index 000000000..43336e07a
--- /dev/null
+++ b/nix/atlas.nix
@@ -0,0 +1,12 @@
+{ pkgs ? import { } }:
+
+let
+ tools = pkgs.callPackage ./tools.nix {};
+ sourceFile = ../vendor/nimbus-build-system/vendor/Nim/koch.nim;
+in pkgs.fetchFromGitHub {
+ owner = "nim-lang";
+ repo = "atlas";
+ rev = tools.findKeyValue "^ +AtlasStableCommit = \"([a-f0-9]+)\"$" sourceFile;
+ # WARNING: Requires manual updates when Nim compiler version changes.
+ hash = "sha256-G1TZdgbRPSgxXZ3VsBP2+XFCLHXVb3an65MuQx67o/k=";
+}
\ No newline at end of file
diff --git a/nix/checksums.nix b/nix/checksums.nix
new file mode 100644
index 000000000..d79345d24
--- /dev/null
+++ b/nix/checksums.nix
@@ -0,0 +1,12 @@
+{ pkgs ? import { } }:
+
+let
+ tools = pkgs.callPackage ./tools.nix {};
+ sourceFile = ../vendor/nimbus-build-system/vendor/Nim/koch.nim;
+in pkgs.fetchFromGitHub {
+ owner = "nim-lang";
+ repo = "checksums";
+ rev = tools.findKeyValue "^ +ChecksumsStableCommit = \"([a-f0-9]+)\"$" sourceFile;
+ # WARNING: Requires manual updates when Nim compiler version changes.
+ hash = "sha256-Bm5iJoT2kAvcTexiLMFBa9oU5gf7d4rWjo3OiN7obWQ=";
+}
diff --git a/nix/csources.nix b/nix/csources.nix
new file mode 100644
index 000000000..5aa90fd6f
--- /dev/null
+++ b/nix/csources.nix
@@ -0,0 +1,12 @@
+{ pkgs ? import { } }:
+
+let
+ tools = pkgs.callPackage ./tools.nix {};
+ sourceFile = ../vendor/nimbus-build-system/vendor/Nim/config/build_config.txt;
+in pkgs.fetchFromGitHub {
+ owner = "nim-lang";
+ repo = "csources_v2";
+ rev = tools.findKeyValue "^nim_csourcesHash=([a-f0-9]+)$" sourceFile;
+ # WARNING: Requires manual updates when Nim compiler version changes.
+ hash = "sha256-UCLtoxOcGYjBdvHx7A47x6FjLMi6VZqpSs65MN7fpBs=";
+}
\ No newline at end of file
diff --git a/nix/default.nix b/nix/default.nix
new file mode 100644
index 000000000..29eec844d
--- /dev/null
+++ b/nix/default.nix
@@ -0,0 +1,116 @@
+{
+ config ? {},
+ pkgs ? import { },
+ src ? ../.,
+ targets ? ["libwaku-android-arm64"],
+ verbosity ? 2,
+ useSystemNim ? true,
+ quickAndDirty ? true,
+ stableSystems ? [
+ "x86_64-linux" "aarch64-linux"
+ ],
+ androidArch,
+ abidir,
+ zerokitPkg,
+}:
+
+assert pkgs.lib.assertMsg ((src.submodules or true) == true)
+ "Unable to build without submodules. Append '?submodules=1#' to the URI.";
+
+let
+ inherit (pkgs) stdenv lib writeScriptBin callPackage;
+
+ revision = lib.substring 0 8 (src.rev or "dirty");
+
+in stdenv.mkDerivation rec {
+
+ pname = "nwaku";
+
+ version = "1.0.0-${revision}";
+
+ inherit src;
+
+ buildInputs = with pkgs; [
+ openssl
+ gmp
+ zip
+ ];
+
+ # Dependencies that should only exist in the build environment.
+ nativeBuildInputs = let
+ # Fix for Nim compiler calling 'git rev-parse' and 'lsb_release'.
+ fakeGit = writeScriptBin "git" "echo ${version}";
+ # Fix for the zerokit package that is built with cargo/rustup/cross.
+ fakeCargo = writeScriptBin "cargo" "echo ${version}";
+ # Fix for the zerokit package that is built with cargo/rustup/cross.
+ fakeRustup = writeScriptBin "rustup" "echo ${version}";
+ # Fix for the zerokit package that is built with cargo/rustup/cross.
+ fakeCross = writeScriptBin "cross" "echo ${version}";
+ in
+ with pkgs; [
+ cmake
+ which
+ lsb-release
+ zerokitPkg
+ nim-unwrapped-2_0
+ fakeGit
+ fakeCargo
+ fakeRustup
+ fakeCross
+ ];
+
+ # Environment variables required for Android builds
+ ANDROID_SDK_ROOT="${pkgs.androidPkgs.sdk}";
+ ANDROID_NDK_HOME="${pkgs.androidPkgs.ndk}";
+ NIMFLAGS = "-d:disableMarchNative -d:git_revision_override=${revision}";
+ XDG_CACHE_HOME = "/tmp";
+ androidManifest = "";
+
+ makeFlags = targets ++ [
+ "V=${toString verbosity}"
+ "QUICK_AND_DIRTY_COMPILER=${if quickAndDirty then "1" else "0"}"
+ "QUICK_AND_DIRTY_NIMBLE=${if quickAndDirty then "1" else "0"}"
+ "USE_SYSTEM_NIM=${if useSystemNim then "1" else "0"}"
+ ];
+
+ configurePhase = ''
+ patchShebangs . vendor/nimbus-build-system > /dev/null
+ make nimbus-build-system-paths
+ make nimbus-build-system-nimble-dir
+ '';
+
+ preBuild = ''
+ ln -s waku.nimble waku.nims
+ pushd vendor/nimbus-build-system/vendor/Nim
+ mkdir dist
+ cp -r ${callPackage ./nimble.nix {}} dist/nimble
+ chmod 777 -R dist/nimble
+ mkdir -p dist/nimble/dist
+ cp -r ${callPackage ./checksums.nix {}} dist/checksums # need both
+ cp -r ${callPackage ./checksums.nix {}} dist/nimble/dist/checksums
+ cp -r ${callPackage ./atlas.nix {}} dist/atlas
+ chmod 777 -R dist/atlas
+ mkdir dist/atlas/dist
+ cp -r ${callPackage ./sat.nix {}} dist/nimble/dist/sat
+ cp -r ${callPackage ./sat.nix {}} dist/atlas/dist/sat
+ cp -r ${callPackage ./csources.nix {}} csources_v2
+ chmod 777 -R dist/nimble csources_v2
+ popd
+ mkdir -p vendor/zerokit/target/${androidArch}/release
+ cp ${zerokitPkg}/librln.so vendor/zerokit/target/${androidArch}/release/
+ '';
+
+ installPhase = ''
+ mkdir -p $out/jni
+ cp -r ./build/android/${abidir}/* $out/jni/
+ echo '${androidManifest}' > $out/jni/AndroidManifest.xml
+ cd $out && zip -r libwaku.aar *
+ '';
+
+ meta = with pkgs.lib; {
+ description = "NWaku derivation to build libwaku for mobile targets using Android NDK and Rust.";
+ homepage = "https://github.com/status-im/nwaku";
+ license = licenses.mit;
+ platforms = stableSystems;
+ };
+}
diff --git a/nix/nimble.nix b/nix/nimble.nix
new file mode 100644
index 000000000..5bd7b0f32
--- /dev/null
+++ b/nix/nimble.nix
@@ -0,0 +1,12 @@
+{ pkgs ? import { } }:
+
+let
+ tools = pkgs.callPackage ./tools.nix {};
+ sourceFile = ../vendor/nimbus-build-system/vendor/Nim/koch.nim;
+in pkgs.fetchFromGitHub {
+ owner = "nim-lang";
+ repo = "nimble";
+ rev = tools.findKeyValue "^ +NimbleStableCommit = \"([a-f0-9]+)\".+" sourceFile;
+ # WARNING: Requires manual updates when Nim compiler version changes.
+ hash = "sha256-MVHf19UbOWk8Zba2scj06PxdYYOJA6OXrVyDQ9Ku6Us=";
+}
\ No newline at end of file
diff --git a/nix/pkgs/android-sdk/compose.nix b/nix/pkgs/android-sdk/compose.nix
new file mode 100644
index 000000000..c73aaee43
--- /dev/null
+++ b/nix/pkgs/android-sdk/compose.nix
@@ -0,0 +1,26 @@
+#
+# This Nix expression centralizes the configuration
+# for the Android development environment.
+#
+
+{ androidenv, lib, stdenv }:
+
+assert lib.assertMsg (stdenv.system != "aarch64-darwin")
+ "aarch64-darwin not supported for Android SDK. Use: NIXPKGS_SYSTEM_OVERRIDE=x86_64-darwin";
+
+# The "android-sdk-license" license is accepted
+# by setting android_sdk.accept_license = true.
+androidenv.composeAndroidPackages {
+ cmdLineToolsVersion = "9.0";
+ toolsVersion = "26.1.1";
+ platformToolsVersion = "33.0.3";
+ buildToolsVersions = [ "34.0.0" ];
+ platformVersions = [ "34" ];
+ cmakeVersions = [ "3.22.1" ];
+ ndkVersion = "25.2.9519653";
+ includeNDK = true;
+ includeExtras = [
+ "extras;android;m2repository"
+ "extras;google;m2repository"
+ ];
+}
diff --git a/nix/pkgs/android-sdk/default.nix b/nix/pkgs/android-sdk/default.nix
new file mode 100644
index 000000000..f3f795251
--- /dev/null
+++ b/nix/pkgs/android-sdk/default.nix
@@ -0,0 +1,14 @@
+#
+# This Nix expression centralizes the configuration
+# for the Android development environment.
+#
+
+{ callPackage }:
+
+let
+ compose = callPackage ./compose.nix { };
+ pkgs = callPackage ./pkgs.nix { inherit compose; };
+ shell = callPackage ./shell.nix { androidPkgs = pkgs; };
+in {
+ inherit compose pkgs shell;
+}
diff --git a/nix/pkgs/android-sdk/pkgs.nix b/nix/pkgs/android-sdk/pkgs.nix
new file mode 100644
index 000000000..645987b3a
--- /dev/null
+++ b/nix/pkgs/android-sdk/pkgs.nix
@@ -0,0 +1,17 @@
+{ stdenv, compose }:
+
+#
+# This derivation simply symlinks some stuff to get
+# shorter paths as libexec/android-sdk is quite the mouthful.
+# With this you can just do `androidPkgs.sdk` and `androidPkgs.ndk`.
+#
+stdenv.mkDerivation {
+ name = "${compose.androidsdk.name}-mod";
+ phases = [ "symlinkPhase" ];
+ outputs = [ "out" "sdk" "ndk" ];
+ symlinkPhase = ''
+ ln -s ${compose.androidsdk} $out
+ ln -s ${compose.androidsdk}/libexec/android-sdk $sdk
+ ln -s ${compose.androidsdk}/libexec/android-sdk/ndk-bundle $ndk
+ '';
+}
diff --git a/nix/pkgs/android-sdk/shell.nix b/nix/pkgs/android-sdk/shell.nix
new file mode 100644
index 000000000..b5397763f
--- /dev/null
+++ b/nix/pkgs/android-sdk/shell.nix
@@ -0,0 +1,19 @@
+{ mkShell, openjdk, androidPkgs }:
+
+mkShell {
+ name = "android-sdk-shell";
+ buildInputs = [ openjdk ];
+
+ shellHook = ''
+ export ANDROID_HOME="${androidPkgs.sdk}"
+ export ANDROID_NDK_ROOT="${androidPkgs.ndk}"
+ export ANDROID_SDK_ROOT="$ANDROID_HOME"
+ export ANDROID_NDK_HOME="${androidPkgs.ndk}"
+
+ export PATH="$ANDROID_NDK_ROOT:$PATH"
+ export PATH="$ANDROID_SDK_ROOT/tools:$PATH"
+ export PATH="$ANDROID_SDK_ROOT/tools/bin:$PATH"
+ export PATH="$(echo $ANDROID_SDK_ROOT/cmdline-tools/*/bin):$PATH"
+ export PATH="$ANDROID_SDK_ROOT/platform-tools:$PATH"
+ '';
+}
diff --git a/nix/sat.nix b/nix/sat.nix
new file mode 100644
index 000000000..31f264468
--- /dev/null
+++ b/nix/sat.nix
@@ -0,0 +1,12 @@
+{ pkgs ? import { } }:
+
+let
+ tools = pkgs.callPackage ./tools.nix {};
+ sourceFile = ../vendor/nimbus-build-system/vendor/Nim/koch.nim;
+in pkgs.fetchFromGitHub {
+ owner = "nim-lang";
+ repo = "sat";
+ rev = tools.findKeyValue "^ +SatStableCommit = \"([a-f0-9]+)\"$" sourceFile;
+ # WARNING: Requires manual updates when Nim compiler version changes.
+ hash = "sha256-JFrrSV+mehG0gP7NiQ8hYthL0cjh44HNbXfuxQNhq7c=";
+}
\ No newline at end of file
diff --git a/nix/shell.nix b/nix/shell.nix
new file mode 100644
index 000000000..26086a26e
--- /dev/null
+++ b/nix/shell.nix
@@ -0,0 +1,26 @@
+{
+ pkgs ? import { },
+}:
+let
+ optionalDarwinDeps = pkgs.lib.optionals pkgs.stdenv.isDarwin [
+ pkgs.libiconv
+ pkgs.darwin.apple_sdk.frameworks.Security
+ ];
+in
+pkgs.mkShell {
+ inputsFrom = [
+ pkgs.androidShell
+ ] ++ optionalDarwinDeps;
+
+ buildInputs = with pkgs; [
+ git
+ cargo
+ rustup
+ cmake
+ nim-unwrapped-2_0
+ ];
+
+ LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath [
+ pkgs.pcre
+ ];
+}
diff --git a/nix/tools.nix b/nix/tools.nix
new file mode 100644
index 000000000..108d38606
--- /dev/null
+++ b/nix/tools.nix
@@ -0,0 +1,15 @@
+{ pkgs ? import { } }:
+
+let
+
+ inherit (pkgs.lib) fileContents last splitString flatten remove;
+ inherit (builtins) map match;
+in {
+ findKeyValue = regex: sourceFile:
+ let
+ linesFrom = file: splitString "\n" (fileContents file);
+ matching = regex: lines: map (line: match regex line) lines;
+ extractMatch = matches: last (flatten (remove null matches));
+ in
+ extractMatch (matching regex (linesFrom sourceFile));
+}
diff --git a/scripts/generate_nimble_links.sh b/scripts/generate_nimble_links.sh
new file mode 100755
index 000000000..e01e6db46
--- /dev/null
+++ b/scripts/generate_nimble_links.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+
+# This script is used for building Nix derivation which doesn't allow Git commands.
+# It implements similar logic as $(NIMBLE_DIR) target in nimbus-build-system Makefile.
+
+create_nimble_link_script_path="$(pwd)/${BUILD_SYSTEM_DIR}/scripts/create_nimble_link.sh"
+
+process_gitmodules() {
+ local gitmodules_file="$1"
+ local gitmodules_dir=$(dirname "$gitmodules_file")
+
+ # Extract all submodule paths from the .gitmodules file
+ grep "path" $gitmodules_file | awk '{print $3}' | while read submodule_path; do
+ # Change pwd to the submodule dir and execute script
+ pushd "$gitmodules_dir/$submodule_path" > /dev/null
+ NIMBLE_DIR=$NIMBLE_DIR PWD_CMD=$PWD_CMD EXCLUDED_NIM_PACKAGES=$EXCLUDED_NIM_PACKAGES \
+ "$create_nimble_link_script_path" "$submodule_path"
+ popd > /dev/null
+ done
+}
+
+# Create the base directory if it doesn't exist
+mkdir -p "${NIMBLE_DIR}/pkgs"
+
+# Find all .gitmodules files and process them
+for gitmodules_file in $(find . -name '.gitmodules'); do
+ echo "Processing .gitmodules file: $gitmodules_file"
+ process_gitmodules "$gitmodules_file"
+done
diff --git a/shell.nix b/shell.nix
deleted file mode 100644
index ae2426a78..000000000
--- a/shell.nix
+++ /dev/null
@@ -1,22 +0,0 @@
-{ pkgs ? import (builtins.fetchTarball {
- url = "https://github.com/NixOS/nixpkgs/archive/dbf1d73cd1a17276196afeee169b4cf7834b7a96.tar.gz";
- sha256 = "sha256:1k5nvn2yzw370cqsfh62lncsgydq2qkbjrx34cprzf0k6b93v7ch";
-}) {} }:
-
-pkgs.mkShell {
- name = "nim-waku-build-shell";
-
- # Versions dependent on nixpkgs commit. Update manually.
- buildInputs = with pkgs; [
- git # 2.37.3
- which # 2.21
- rustc # 1.63.0
- ] ++ lib.optionals stdenv.isDarwin [
- libiconv
- darwin.apple_sdk.frameworks.Security
- ];
-
- LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath [
- pkgs.pcre
- ];
-}
diff --git a/tests/all_tests_waku.nim b/tests/all_tests_waku.nim
index 3e847ae86..07e0cd895 100644
--- a/tests/all_tests_waku.nim
+++ b/tests/all_tests_waku.nim
@@ -85,7 +85,8 @@ import
./test_waku_noise_sessions,
./test_waku_netconfig,
./test_waku_switch,
- ./test_waku_rendezvous
+ ./test_waku_rendezvous,
+ ./waku_discv5/test_waku_discv5
# Waku Keystore test suite
import ./test_waku_keystore_keyfile, ./test_waku_keystore
@@ -98,6 +99,7 @@ import
./wakunode_rest/test_rest_relay_serdes,
./wakunode_rest/test_rest_serdes,
./wakunode_rest/test_rest_filter,
+ ./wakunode_rest/test_rest_lightpush,
./wakunode_rest/test_rest_lightpush_legacy,
./wakunode_rest/test_rest_admin,
./wakunode_rest/test_rest_cors,
@@ -106,4 +108,4 @@ import
import ./waku_rln_relay/test_all
# Node Factory
-import ./factory/test_config
+import ./factory/test_external_config
diff --git a/tests/common/test_base64_codec.nim b/tests/common/test_base64_codec.nim
index fd3b23c76..1c2d04c45 100644
--- a/tests/common/test_base64_codec.nim
+++ b/tests/common/test_base64_codec.nim
@@ -1,6 +1,6 @@
{.used.}
-import std/strutils, stew/[results, byteutils], testutils/unittests
+import std/strutils, results, stew/byteutils, testutils/unittests
import waku/common/base64
suite "Waku Common - stew base64 wrapper":
diff --git a/tests/common/test_confutils_envvar.nim b/tests/common/test_confutils_envvar.nim
index 6916f52a8..676a35ae1 100644
--- a/tests/common/test_confutils_envvar.nim
+++ b/tests/common/test_confutils_envvar.nim
@@ -2,7 +2,7 @@
import
std/[os, options],
- stew/results,
+ results,
stew/shims/net as stewNet,
testutils/unittests,
confutils,
diff --git a/tests/common/test_enr_builder.nim b/tests/common/test_enr_builder.nim
index b95828bb4..9fe8f6807 100644
--- a/tests/common/test_enr_builder.nim
+++ b/tests/common/test_enr_builder.nim
@@ -1,6 +1,6 @@
{.used.}
-import std/options, stew/results, stew/shims/net, testutils/unittests
+import std/options, results, stew/shims/net, testutils/unittests
import waku/common/enr, ../testlib/wakucore
suite "nim-eth ENR - builder and typed record":
diff --git a/tests/common/test_parse_size.nim b/tests/common/test_parse_size.nim
index dd1f2c7af..009cb9637 100644
--- a/tests/common/test_parse_size.nim
+++ b/tests/common/test_parse_size.nim
@@ -1,6 +1,6 @@
{.used.}
-import testutils/unittests, stew/results
+import testutils/unittests, results
import waku/common/utils/parse_size_units
suite "Size serialization test":
diff --git a/tests/common/test_ratelimit_setting.nim b/tests/common/test_ratelimit_setting.nim
index 6f6ac8d38..97d69e06a 100644
--- a/tests/common/test_ratelimit_setting.nim
+++ b/tests/common/test_ratelimit_setting.nim
@@ -10,7 +10,7 @@
import testutils/unittests
import chronos, libp2p/stream/connection
-import std/[sequtils, options, tables]
+import std/[options, tables]
import ../../waku/common/rate_limit/request_limiter
import ../../waku/common/rate_limit/timed_map
diff --git a/tests/common/test_requestratelimiter.nim b/tests/common/test_requestratelimiter.nim
index 0b494c1be..be910b38e 100644
--- a/tests/common/test_requestratelimiter.nim
+++ b/tests/common/test_requestratelimiter.nim
@@ -10,7 +10,7 @@
import testutils/unittests
import chronos, libp2p/stream/connection
-import std/[sequtils, options]
+import std/options
import ../../waku/common/rate_limit/request_limiter
import ../../waku/common/rate_limit/timed_map
diff --git a/tests/common/test_sqlite_migrations.nim b/tests/common/test_sqlite_migrations.nim
index 58708ce21..9e67fb9c8 100644
--- a/tests/common/test_sqlite_migrations.nim
+++ b/tests/common/test_sqlite_migrations.nim
@@ -1,6 +1,6 @@
{.used.}
-import std/[strutils, os], stew/results, testutils/unittests
+import std/[strutils, os], results, testutils/unittests
import waku/common/databases/db_sqlite {.all.}, ../waku_archive/archive_utils
template sourceDir(): string =
diff --git a/tests/factory/test_config.nim b/tests/factory/test_config.nim
deleted file mode 100644
index 1d8bf6e37..000000000
--- a/tests/factory/test_config.nim
+++ /dev/null
@@ -1,157 +0,0 @@
-{.used.}
-
-import
- std/options,
- testutils/unittests,
- chronos,
- libp2p/crypto/[crypto, secp],
- libp2p/multiaddress,
- nimcrypto/utils,
- secp256k1,
- confutils
-import
- ../../waku/factory/external_config,
- ../../waku/factory/internal_config,
- ../../waku/factory/networks_config,
- ../../waku/common/logging
-
-suite "Waku config - apply preset":
- test "Default preset is TWN":
- ## Setup
- let expectedConf = ClusterConf.TheWakuNetworkConf()
-
- ## Given
- let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn")
-
- ## When
- let res = applyPresetConfiguration(preConfig)
- assert res.isOk(), $res.error
-
- ## Then
- let conf = res.get()
- assert conf.maxMessageSize == expectedConf.maxMessageSize
- assert conf.clusterId == expectedConf.clusterId
- assert conf.rlnRelay == expectedConf.rlnRelay
- assert conf.rlnRelayEthContractAddress == expectedConf.rlnRelayEthContractAddress
- assert conf.rlnRelayDynamic == expectedConf.rlnRelayDynamic
- assert conf.rlnRelayChainId == expectedConf.rlnRelayChainId
- assert conf.rlnRelayBandwidthThreshold == expectedConf.rlnRelayBandwidthThreshold
- assert conf.rlnEpochSizeSec == expectedConf.rlnEpochSizeSec
- assert conf.rlnRelayUserMessageLimit == expectedConf.rlnRelayUserMessageLimit
- assert conf.numShardsInNetwork == expectedConf.numShardsInNetwork
- assert conf.discv5BootstrapNodes == expectedConf.discv5BootstrapNodes
-
- test "Subscribes to all valid shards in twn":
- ## Setup
- let expectedConf = ClusterConf.TheWakuNetworkConf()
-
- ## Given
- let shards: seq[uint16] = @[0, 1, 2, 3, 4, 5, 6, 7]
- let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards)
-
- ## When
- let res = applyPresetConfiguration(preConfig)
- assert res.isOk(), $res.error
-
- ## Then
- let conf = res.get()
- assert conf.shards.len == expectedConf.numShardsInNetwork.int
-
- test "Subscribes to some valid shards in twn":
- ## Setup
- let expectedConf = ClusterConf.TheWakuNetworkConf()
-
- ## Given
- let shards: seq[uint16] = @[0, 4, 7]
- let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards)
-
- ## When
- let resConf = applyPresetConfiguration(preConfig)
- let res = validateShards(resConf.get())
- assert res.isOk(), $res.error
-
- ## Then
- let conf = resConf.get()
- assert conf.shards.len() == shards.len()
- for index, shard in shards:
- assert shard in conf.shards
-
- test "Subscribes to invalid shards in twn":
- ## Setup
-
- ## Given
- let shards: seq[uint16] = @[0, 4, 7, 10]
- let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards)
- let postConfig = applyPresetConfiguration(preConfig)
-
- ## When
- let res = validateShards(postConfig.get())
-
- ## Then
- assert res.isErr(), "Invalid shard was accepted"
-
-suite "Waku config - node key":
- test "Passed node key is used":
- ## Setup
- let nodeKeyStr =
- "0011223344556677889900aabbccddeeff0011223344556677889900aabbccddeeff"
- let nodekey = block:
- let key = SkPrivateKey.init(utils.fromHex(nodeKeyStr)).tryGet()
- crypto.PrivateKey(scheme: Secp256k1, skkey: key)
-
- ## Given
- let config = WakuNodeConf.load(version = "", cmdLine = @["--nodekey=" & nodeKeyStr])
-
- ## When
- let res = getNodeKey(config)
- assert res.isOk(), $res.error
-
- ## Then
- let resKey = res.get()
- assert utils.toHex(resKey.getRawBytes().get()) ==
- utils.toHex(nodekey.getRawBytes().get())
-
-suite "Waku config - Shards":
- test "Shards are valid":
- ## Setup
-
- ## Given
- let shards: seq[uint16] = @[0, 2, 4]
- let numShardsInNetwork = 5.uint32
- let config = WakuNodeConf(
- cmd: noCommand, shards: shards, numShardsInNetwork: numShardsInNetwork
- )
-
- ## When
- let res = validateShards(config)
-
- ## Then
- assert res.isOk(), $res.error
-
- test "Shards are not in range":
- ## Setup
-
- ## Given
- let shards: seq[uint16] = @[0, 2, 5]
- let numShardsInNetwork = 5.uint32
- let config = WakuNodeConf(
- cmd: noCommand, shards: shards, numShardsInNetwork: numShardsInNetwork
- )
-
- ## When
- let res = validateShards(config)
-
- ## Then
- assert res.isErr(), "Invalid shard was accepted"
-
- test "Shard is passed without num shards":
- ## Setup
-
- ## Given
- let config = WakuNodeConf.load(version = "", cmdLine = @["--shard=32"])
-
- ## When
- let res = validateShards(config)
-
- ## Then
- assert res.isOk(), $res.error
diff --git a/tests/factory/test_external_config.nim b/tests/factory/test_external_config.nim
new file mode 100644
index 000000000..5bd4e2c86
--- /dev/null
+++ b/tests/factory/test_external_config.nim
@@ -0,0 +1,208 @@
+{.used.}
+
+import
+ std/options,
+ testutils/unittests,
+ chronos,
+ libp2p/crypto/[crypto, secp],
+ libp2p/multiaddress,
+ nimcrypto/utils,
+ secp256k1,
+ confutils
+import
+ ../../waku/factory/external_config,
+ ../../waku/factory/networks_config,
+ ../../waku/factory/waku_conf,
+ ../../waku/common/logging,
+ ../../waku/common/utils/parse_size_units
+
+suite "Waku config - apply preset":
+ test "Default preset is TWN":
+ ## Setup
+ let expectedConf = ClusterConf.TheWakuNetworkConf()
+
+ ## Given
+ let preConfig = WakuNodeConf(
+ cmd: noCommand,
+ preset: "twn",
+ relay: true,
+ ethClientUrls: @["http://someaddress".EthRpcUrl],
+ rlnRelayTreePath: "/tmp/sometreepath",
+ )
+
+ ## When
+ let res = preConfig.toWakuConf()
+ assert res.isOk(), $res.error
+
+ ## Then
+ let conf = res.get()
+ check conf.maxMessageSizeBytes ==
+ uint64(parseCorrectMsgSize(expectedConf.maxMessageSize))
+ check conf.clusterId == expectedConf.clusterId
+ check conf.rlnRelayConf.isSome() == expectedConf.rlnRelay
+ if conf.rlnRelayConf.isSome():
+ let rlnRelayConf = conf.rlnRelayConf.get()
+ check rlnRelayConf.ethContractAddress == expectedConf.rlnRelayEthContractAddress
+ check rlnRelayConf.dynamic == expectedConf.rlnRelayDynamic
+ check rlnRelayConf.chainId == expectedConf.rlnRelayChainId
+ check rlnRelayConf.epochSizeSec == expectedConf.rlnEpochSizeSec
+ check rlnRelayConf.userMessageLimit == expectedConf.rlnRelayUserMessageLimit
+ check conf.numShardsInNetwork == expectedConf.numShardsInNetwork
+ check conf.discv5Conf.isSome() == expectedConf.discv5Discovery
+ if conf.discv5Conf.isSome():
+ let discv5Conf = conf.discv5Conf.get()
+ check discv5Conf.bootstrapNodes == expectedConf.discv5BootstrapNodes
+
+ test "Subscribes to all valid shards in twn":
+ ## Setup
+ let expectedConf = ClusterConf.TheWakuNetworkConf()
+
+ ## Given
+ let shards: seq[uint16] = @[0, 1, 2, 3, 4, 5, 6, 7]
+ let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards)
+
+ ## When
+ let res = preConfig.toWakuConf()
+ assert res.isOk(), $res.error
+
+ ## Then
+ let conf = res.get()
+ check conf.shards.len == expectedConf.numShardsInNetwork.int
+
+ test "Subscribes to some valid shards in twn":
+ ## Setup
+ let expectedConf = ClusterConf.TheWakuNetworkConf()
+
+ ## Given
+ let shards: seq[uint16] = @[0, 4, 7]
+ let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards)
+
+ ## When
+ let resConf = preConfig.toWakuConf()
+ assert resConf.isOk(), $resConf.error
+
+ ## Then
+ let conf = resConf.get()
+ assert conf.shards.len() == shards.len()
+ for index, shard in shards:
+ assert shard in conf.shards
+
+ test "Subscribes to invalid shards in twn":
+ ## Setup
+
+ ## Given
+ let shards: seq[uint16] = @[0, 4, 7, 10]
+ let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards)
+
+ ## When
+ let res = preConfig.toWakuConf()
+
+ ## Then
+ assert res.isErr(), "Invalid shard was accepted"
+
+ test "Apply TWN preset when cluster id = 1":
+ ## Setup
+ let expectedConf = ClusterConf.TheWakuNetworkConf()
+
+ ## Given
+ let preConfig = WakuNodeConf(
+ cmd: noCommand,
+ clusterId: 1.uint16,
+ relay: true,
+ ethClientUrls: @["http://someaddress".EthRpcUrl],
+ rlnRelayTreePath: "/tmp/sometreepath",
+ )
+
+ ## When
+ let res = preConfig.toWakuConf()
+ assert res.isOk(), $res.error
+
+ ## Then
+ let conf = res.get()
+ check conf.maxMessageSizeBytes ==
+ uint64(parseCorrectMsgSize(expectedConf.maxMessageSize))
+ check conf.clusterId == expectedConf.clusterId
+ check conf.rlnRelayConf.isSome() == expectedConf.rlnRelay
+ if conf.rlnRelayConf.isSome():
+ let rlnRelayConf = conf.rlnRelayConf.get()
+ check rlnRelayConf.ethContractAddress == expectedConf.rlnRelayEthContractAddress
+ check rlnRelayConf.dynamic == expectedConf.rlnRelayDynamic
+ check rlnRelayConf.chainId == expectedConf.rlnRelayChainId
+ check rlnRelayConf.epochSizeSec == expectedConf.rlnEpochSizeSec
+ check rlnRelayConf.userMessageLimit == expectedConf.rlnRelayUserMessageLimit
+ check conf.numShardsInNetwork == expectedConf.numShardsInNetwork
+ check conf.discv5Conf.isSome() == expectedConf.discv5Discovery
+ if conf.discv5Conf.isSome():
+ let discv5Conf = conf.discv5Conf.get()
+ check discv5Conf.bootstrapNodes == expectedConf.discv5BootstrapNodes
+
+suite "Waku config - node key":
+ test "Passed node key is used":
+ ## Setup
+ let nodeKeyStr =
+ "0011223344556677889900aabbccddeeff0011223344556677889900aabbccddeeff"
+ let nodekey = block:
+ let key = SkPrivateKey.init(utils.fromHex(nodeKeyStr)).tryGet()
+ crypto.PrivateKey(scheme: Secp256k1, skkey: key)
+
+ ## Given
+ let config = WakuNodeConf.load(version = "", cmdLine = @["--nodekey=" & nodeKeyStr])
+
+ ## When
+ let res = config.toWakuConf()
+ assert res.isOk(), $res.error
+
+ ## Then
+ let resKey = res.get().nodeKey
+ assert utils.toHex(resKey.getRawBytes().get()) ==
+ utils.toHex(nodekey.getRawBytes().get())
+
+suite "Waku config - Shards":
+ test "Shards are valid":
+ ## Setup
+
+ ## Given
+ let shards: seq[uint16] = @[0, 2, 4]
+ let numShardsInNetwork = 5.uint32
+ let wakuNodeConf = WakuNodeConf(
+ cmd: noCommand, shards: shards, numShardsInNetwork: numShardsInNetwork
+ )
+
+ ## When
+ let res = wakuNodeConf.toWakuConf()
+ assert res.isOk(), $res.error
+
+ ## Then
+ let wakuConf = res.get()
+ let vRes = wakuConf.validate()
+ assert vRes.isOk(), $vRes.error
+
+ test "Shards are not in range":
+ ## Setup
+
+ ## Given
+ let shards: seq[uint16] = @[0, 2, 5]
+ let numShardsInNetwork = 5.uint32
+ let wakuNodeConf = WakuNodeConf(
+ cmd: noCommand, shards: shards, numShardsInNetwork: numShardsInNetwork
+ )
+
+ ## When
+ let res = wakuNodeConf.toWakuConf()
+
+ ## Then
+ assert res.isErr(), "Invalid shard was accepted"
+
+ test "Shard is passed without num shards":
+ ## Setup
+
+ ## Given
+ let wakuNodeConf = WakuNodeConf.load(version = "", cmdLine = @["--shard=32"])
+
+ ## When
+ let res = wakuNodeConf.toWakuConf()
+
+ ## Then
+ let wakuConf = res.get()
+ let vRes = wakuConf.validate()
+ assert vRes.isOk(), $vRes.error
diff --git a/tests/factory/test_node_factory.nim b/tests/factory/test_node_factory.nim
index c575c2b81..f3d11b1a2 100644
--- a/tests/factory/test_node_factory.nim
+++ b/tests/factory/test_node_factory.nim
@@ -2,11 +2,15 @@
import testutils/unittests, chronos, libp2p/protocols/connectivity/relay/relay
-import ../testlib/wakunode, waku/factory/node_factory, waku/waku_node
+import
+ ../testlib/wakunode,
+ waku/factory/node_factory,
+ waku/waku_node,
+ waku/factory/conf_builder/conf_builder
suite "Node Factory":
test "Set up a node based on default configurations":
- let conf = defaultTestWakuNodeConf()
+ let conf = defaultTestWakuConf()
let node = setupNode(conf, relay = Relay.new()).valueOr:
raiseAssert error
@@ -20,8 +24,10 @@ suite "Node Factory":
not node.wakuRendezvous.isNil()
test "Set up a node with Store enabled":
- var conf = defaultTestWakuNodeConf()
- conf.store = true
+ var confBuilder = defaultTestWakuConfBuilder()
+ confBuilder.storeServiceConf.withEnabled(true)
+ confBuilder.storeServiceConf.withDbUrl("sqlite://store.sqlite3")
+ let conf = confBuilder.build().value
let node = setupNode(conf, relay = Relay.new()).valueOr:
raiseAssert error
@@ -32,8 +38,9 @@ suite "Node Factory":
not node.wakuArchive.isNil()
test "Set up a node with Filter enabled":
- var conf = defaultTestWakuNodeConf()
- conf.filter = true
+ var confBuilder = defaultTestWakuConfBuilder()
+ confBuilder.filterServiceConf.withEnabled(true)
+ let conf = confBuilder.build().value
let node = setupNode(conf, relay = Relay.new()).valueOr:
raiseAssert error
@@ -43,7 +50,7 @@ test "Set up a node with Filter enabled":
not node.wakuFilter.isNil()
test "Start a node based on default configurations":
- let conf = defaultTestWakuNodeConf()
+ let conf = defaultTestWakuConf()
let node = setupNode(conf, relay = Relay.new()).valueOr:
raiseAssert error
diff --git a/tests/factory/test_waku_conf.nim b/tests/factory/test_waku_conf.nim
new file mode 100644
index 000000000..6b7040dd5
--- /dev/null
+++ b/tests/factory/test_waku_conf.nim
@@ -0,0 +1,272 @@
+{.used.}
+
+import
+ libp2p/crypto/[crypto, secp],
+ libp2p/multiaddress,
+ nimcrypto/utils,
+ std/[options, sequtils],
+ results,
+ testutils/unittests
+import
+ waku/factory/waku_conf,
+ waku/factory/waku_conf_builder,
+ waku/factory/networks_config,
+ waku/common/utils/parse_size_units
+
+suite "Waku Conf - build with cluster conf":
+ test "Cluster Conf is passed and relay is enabled":
+ ## Setup
+ let clusterConf = ClusterConf.TheWakuNetworkConf()
+ var builder = WakuConfBuilder.init()
+ builder.discv5Conf.withUdpPort(9000)
+ builder.withRelayServiceRatio("50:50")
+ # Mount all shards in network
+ let expectedShards = toSeq[0.uint16 .. 7.uint16]
+
+ ## Given
+ builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
+ builder.withClusterConf(clusterConf)
+ builder.withRelay(true)
+ builder.rlnRelayConf.withTreePath("/tmp/test-tree-path")
+
+ ## When
+ let resConf = builder.build()
+ assert resConf.isOk(), $resConf.error
+ let conf = resConf.get()
+
+ ## Then
+ let resValidate = conf.validate()
+ assert resValidate.isOk(), $resValidate.error
+ check conf.clusterId == clusterConf.clusterId
+ check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
+ check conf.shards == expectedShards
+ check conf.maxMessageSizeBytes ==
+ uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
+ check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
+
+ if clusterConf.rlnRelay:
+ assert conf.rlnRelayConf.isSome(), "RLN Relay conf is disabled"
+
+ let rlnRelayConf = conf.rlnRelayConf.get()
+ check rlnRelayConf.ethContractAddress.string ==
+ clusterConf.rlnRelayEthContractAddress
+ check rlnRelayConf.dynamic == clusterConf.rlnRelayDynamic
+ check rlnRelayConf.chainId == clusterConf.rlnRelayChainId
+ check rlnRelayConf.epochSizeSec == clusterConf.rlnEpochSizeSec
+ check rlnRelayConf.userMessageLimit == clusterConf.rlnRelayUserMessageLimit
+
+ test "Cluster Conf is passed, but relay is disabled":
+ ## Setup
+ let clusterConf = ClusterConf.TheWakuNetworkConf()
+ var builder = WakuConfBuilder.init()
+ builder.withRelayServiceRatio("50:50")
+ builder.discv5Conf.withUdpPort(9000)
+ # Mount all shards in network
+ let expectedShards = toSeq[0.uint16 .. 7.uint16]
+
+ ## Given
+ builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
+ builder.withClusterConf(clusterConf)
+ builder.withRelay(false)
+
+ ## When
+ let resConf = builder.build()
+ assert resConf.isOk(), $resConf.error
+ let conf = resConf.get()
+
+ ## Then
+ let resValidate = conf.validate()
+ assert resValidate.isOk(), $resValidate.error
+ check conf.clusterId == clusterConf.clusterId
+ check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
+ check conf.shards == expectedShards
+ check conf.maxMessageSizeBytes ==
+ uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
+ check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
+
+ assert conf.rlnRelayConf.isNone
+
+ test "Cluster Conf is passed, but rln relay is disabled":
+ ## Setup
+ let clusterConf = ClusterConf.TheWakuNetworkConf()
+ var builder = WakuConfBuilder.init()
+
+ let # Mount all shards in network
+ expectedShards = toSeq[0.uint16 .. 7.uint16]
+
+ ## Given
+ builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
+ builder.withClusterConf(clusterConf)
+ builder.rlnRelayConf.withEnabled(false)
+
+ ## When
+ let resConf = builder.build()
+ assert resConf.isOk(), $resConf.error
+ let conf = resConf.get()
+
+ ## Then
+ let resValidate = conf.validate()
+ assert resValidate.isOk(), $resValidate.error
+ check conf.clusterId == clusterConf.clusterId
+ check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
+ check conf.shards == expectedShards
+ check conf.maxMessageSizeBytes ==
+ uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
+ check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
+ assert conf.rlnRelayConf.isNone
+
+ test "Cluster Conf is passed and valid shards are specified":
+ ## Setup
+ let clusterConf = ClusterConf.TheWakuNetworkConf()
+ var builder = WakuConfBuilder.init()
+ let shards = @[2.uint16, 3.uint16]
+
+ ## Given
+ builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
+ builder.withClusterConf(clusterConf)
+ builder.withShards(shards)
+
+ ## When
+ let resConf = builder.build()
+ assert resConf.isOk(), $resConf.error
+ let conf = resConf.get()
+
+ ## Then
+ let resValidate = conf.validate()
+ assert resValidate.isOk(), $resValidate.error
+ check conf.clusterId == clusterConf.clusterId
+ check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
+ check conf.shards == shards
+ check conf.maxMessageSizeBytes ==
+ uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
+ check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
+
+ test "Cluster Conf is passed and invalid shards are specified":
+ ## Setup
+ let clusterConf = ClusterConf.TheWakuNetworkConf()
+ var builder = WakuConfBuilder.init()
+ let shards = @[2.uint16, 10.uint16]
+
+ ## Given
+ builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
+ builder.withClusterConf(clusterConf)
+ builder.withShards(shards)
+
+ ## When
+ let resConf = builder.build()
+
+ ## Then
+ assert resConf.isErr(), "Invalid shard was accepted"
+
+ test "Cluster Conf is passed and RLN contract is overridden":
+ ## Setup
+ let clusterConf = ClusterConf.TheWakuNetworkConf()
+ var builder = WakuConfBuilder.init()
+ builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
+
+ # Mount all shards in network
+ let expectedShards = toSeq[0.uint16 .. 7.uint16]
+ let contractAddress = "0x0123456789ABCDEF"
+
+ ## Given
+ builder.rlnRelayConf.withEthContractAddress(contractAddress)
+ builder.withClusterConf(clusterConf)
+ builder.withRelay(true)
+ builder.rlnRelayConf.withTreePath("/tmp/test")
+
+ ## When
+ let resConf = builder.build()
+ assert resConf.isOk(), $resConf.error
+ let conf = resConf.get()
+
+ ## Then
+ let resValidate = conf.validate()
+ assert resValidate.isOk(), $resValidate.error
+ check conf.clusterId == clusterConf.clusterId
+ check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
+ check conf.shards == expectedShards
+ check conf.maxMessageSizeBytes ==
+ uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
+ check conf.discv5Conf.isSome == clusterConf.discv5Discovery
+ check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
+
+ if clusterConf.rlnRelay:
+ assert conf.rlnRelayConf.isSome
+
+ let rlnRelayConf = conf.rlnRelayConf.get()
+ check rlnRelayConf.ethContractAddress.string == contractAddress
+ check rlnRelayConf.dynamic == clusterConf.rlnRelayDynamic
+ check rlnRelayConf.chainId == clusterConf.rlnRelayChainId
+ check rlnRelayConf.epochSizeSec == clusterConf.rlnEpochSizeSec
+ check rlnRelayConf.userMessageLimit == clusterConf.rlnRelayUserMessageLimit
+
+suite "Waku Conf - node key":
+ test "Node key is generated":
+ ## Setup
+ var builder = WakuConfBuilder.init()
+ builder.withClusterId(1)
+
+ ## Given
+
+ ## When
+ let resConf = builder.build()
+ assert resConf.isOk(), $resConf.error
+ let conf = resConf.get()
+
+ ## Then
+ let resValidate = conf.validate()
+ assert resValidate.isOk(), $resValidate.error
+ let pubkey = getPublicKey(conf.nodeKey)
+ assert pubkey.isOk()
+
+ test "Passed node key is used":
+ ## Setup
+ let nodeKeyStr =
+ "0011223344556677889900aabbccddeeff0011223344556677889900aabbccddeeff"
+ let nodeKey = block:
+ let key = SkPrivateKey.init(utils.fromHex(nodeKeyStr)).tryGet()
+ crypto.PrivateKey(scheme: Secp256k1, skkey: key)
+ var builder = WakuConfBuilder.init()
+ builder.withClusterId(1)
+
+ ## Given
+ builder.withNodeKey(nodeKey)
+
+ ## When
+ let resConf = builder.build()
+ assert resConf.isOk(), $resConf.error
+ let conf = resConf.get()
+
+ ## Then
+ let resValidate = conf.validate()
+ assert resValidate.isOk(), $resValidate.error
+ assert utils.toHex(conf.nodeKey.getRawBytes().get()) ==
+ utils.toHex(nodeKey.getRawBytes().get()),
+ "Passed node key isn't in config:" & $nodeKey & $conf.nodeKey
+
+suite "Waku Conf - extMultiaddrs":
+ test "Valid multiaddresses are passed and accepted":
+ ## Setup
+ var builder = WakuConfBuilder.init()
+ builder.withClusterId(1)
+
+ ## Given
+ let multiaddrs =
+ @["/ip4/127.0.0.1/udp/9090/quic", "/ip6/::1/tcp/3217", "/dns4/foo.com/tcp/80"]
+ builder.withExtMultiAddrs(multiaddrs)
+
+ ## When
+ let resConf = builder.build()
+ assert resConf.isOk(), $resConf.error
+ let conf = resConf.get()
+
+ ## Then
+ let resValidate = conf.validate()
+ assert resValidate.isOk(), $resValidate.error
+ check multiaddrs.len == conf.networkConf.extMultiAddrs.len
+ let resMultiaddrs = conf.networkConf.extMultiAddrs.map(
+ proc(m: MultiAddress): string =
+ $m
+ )
+ for m in multiaddrs:
+ check m in resMultiaddrs
diff --git a/tests/incentivization/test_poc_eligibility.nim b/tests/incentivization/test_poc_eligibility.nim
index 7490c2304..be9018898 100644
--- a/tests/incentivization/test_poc_eligibility.nim
+++ b/tests/incentivization/test_poc_eligibility.nim
@@ -1,17 +1,9 @@
{.used.}
-import
- std/options,
- testutils/unittests,
- chronos,
- web3,
- stew/byteutils,
- stint,
- strutils,
- tests/testlib/testasync
+import std/options, testutils/unittests, chronos, web3, stint, tests/testlib/testasync
import
- waku/[node/peer_manager, waku_core],
+ waku/node/peer_manager,
waku/incentivization/[rpc, eligibility_manager],
../waku_rln_relay/[utils_onchain, utils]
diff --git a/tests/incentivization/test_poc_reputation.nim b/tests/incentivization/test_poc_reputation.nim
index 6e21330a2..a74ebd102 100644
--- a/tests/incentivization/test_poc_reputation.nim
+++ b/tests/incentivization/test_poc_reputation.nim
@@ -13,6 +13,9 @@ import
waku/[node/peer_manager, waku_core],
waku/incentivization/[rpc, reputation_manager],
waku/waku_lightpush/[rpc, common]
+import std/options, testutils/unittests, chronos, web3
+
+import waku/incentivization/reputation_manager, waku/waku_lightpush_legacy/rpc
suite "Waku Incentivization PoC Reputation":
var manager {.threadvar.}: ReputationManager
diff --git a/tests/node/peer_manager/peer_store/utils.nim b/tests/node/peer_manager/peer_store/utils.nim
index 1d5dc6e22..891c5fdab 100644
--- a/tests/node/peer_manager/peer_store/utils.nim
+++ b/tests/node/peer_manager/peer_store/utils.nim
@@ -1,4 +1,4 @@
-import std/options, stew/results, libp2p/peerstore
+import std/options, results
import
waku/node/peer_manager/[waku_peer_store, peer_store/waku_peer_storage],
@@ -7,6 +7,3 @@ import
proc newTestWakuPeerStorage*(path: Option[string] = string.none()): WakuPeerStorage =
let db = newSqliteDatabase(path)
WakuPeerStorage.new(db).value()
-
-proc peerExists*(peerStore: PeerStore, peerId: PeerId): bool =
- return peerStore[AddressBook].contains(peerId)
diff --git a/tests/node/peer_manager/test_peer_manager.nim b/tests/node/peer_manager/test_peer_manager.nim
index 57acf13df..6eddda0d6 100644
--- a/tests/node/peer_manager/test_peer_manager.nim
+++ b/tests/node/peer_manager/test_peer_manager.nim
@@ -76,8 +76,10 @@ suite "Peer Manager":
# And both mount metadata and relay
discard client.mountMetadata(0) # clusterId irrelevant, overridden by topic
discard server.mountMetadata(0) # clusterId irrelevant, overridden by topic
- await client.mountRelay()
- await server.mountRelay()
+ (await client.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
+ (await server.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
# And both nodes are started
await allFutures(server.start(), client.start())
@@ -89,7 +91,8 @@ suite "Peer Manager":
await sleepAsync(FUTURE_TIMEOUT)
# When making an operation that triggers onPeerMetadata
- client.subscribe((kind: SubscriptionKind.PubsubSub, topic: "newTopic"))
+ client.subscribe((kind: SubscriptionKind.PubsubSub, topic: "newTopic")).isOkOr:
+ assert false, "Failed to subscribe to relay"
await sleepAsync(FUTURE_TIMEOUT)
check:
@@ -109,8 +112,10 @@ suite "Peer Manager":
# And both mount metadata and relay
discard client.mountMetadata(0) # clusterId irrelevant, overridden by topic
discard server.mountMetadata(0) # clusterId irrelevant, overridden by topic
- await client.mountRelay()
- await server.mountRelay()
+ (await client.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
+ (await server.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
# And both nodes are started
await allFutures(server.start(), client.start())
@@ -122,7 +127,8 @@ suite "Peer Manager":
await sleepAsync(FUTURE_TIMEOUT)
# When making an operation that triggers onPeerMetadata
- client.subscribe((kind: SubscriptionKind.PubsubSub, topic: "newTopic"))
+ client.subscribe((kind: SubscriptionKind.PubsubSub, topic: "newTopic")).isOkOr:
+ assert false, "Failed to subscribe to relay"
await sleepAsync(FUTURE_TIMEOUT)
check:
diff --git a/tests/node/test_wakunode_filter.nim b/tests/node/test_wakunode_filter.nim
index c9ea12f17..bf9f2495b 100644
--- a/tests/node/test_wakunode_filter.nim
+++ b/tests/node/test_wakunode_filter.nim
@@ -6,7 +6,6 @@ import
testutils/unittests,
chronos,
chronicles,
- os,
libp2p/[peerstore, crypto/crypto]
import
@@ -136,7 +135,8 @@ suite "Waku Filter - End to End":
asyncTest "Client Node can't receive Push from Server Node, via Relay":
# Given the server node has Relay enabled
- await server.mountRelay()
+ (await server.mountRelay()).isOkOr:
+ assert false, "error mounting relay: " & $error
# And valid filter subscription
let subscribeResponse = await client.filterSubscribe(
@@ -160,7 +160,8 @@ suite "Waku Filter - End to End":
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
await server.start()
- await server.mountRelay()
+ (await server.mountRelay()).isOkOr:
+ assert false, "error mounting relay: " & $error
let serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo()
@@ -223,7 +224,8 @@ suite "Waku Filter - End to End":
pushedMsg == msg
asyncTest "Filter Client Node can't receive messages after subscribing and restarting, via Relay":
- await server.mountRelay()
+ (await server.mountRelay()).isOkOr:
+ assert false, "error mounting relay: " & $error
# Given a valid filter subscription
let subscribeResponse = await client.filterSubscribe(
diff --git a/tests/node/test_wakunode_legacy_lightpush.nim b/tests/node/test_wakunode_legacy_lightpush.nim
index ab23921a0..dfc306de8 100644
--- a/tests/node/test_wakunode_legacy_lightpush.nim
+++ b/tests/node/test_wakunode_legacy_lightpush.nim
@@ -1,31 +1,24 @@
{.used.}
import
- std/[options, tables, sequtils, tempfiles, strutils],
+ std/[options, tempfiles],
stew/shims/net as stewNet,
testutils/unittests,
chronos,
- chronicles,
std/strformat,
- os,
- libp2p/[peerstore, crypto/crypto]
+ libp2p/crypto/crypto
import
waku/[
waku_core,
node/peer_manager,
node/waku_node,
- waku_filter_v2,
- waku_filter_v2/client,
- waku_filter_v2/subscriptions,
waku_lightpush_legacy,
waku_lightpush_legacy/common,
- waku_lightpush_legacy/client,
waku_lightpush_legacy/protocol_metrics,
- waku_lightpush_legacy/rpc,
waku_rln_relay,
],
- ../testlib/[assertions, common, wakucore, wakunode, testasync, futures, testutils],
+ ../testlib/[wakucore, wakunode, testasync, futures],
../resources/payloads
suite "Waku Legacy Lightpush - End To End":
@@ -59,7 +52,9 @@ suite "Waku Legacy Lightpush - End To End":
await allFutures(server.start(), client.start())
await server.start()
- await server.mountRelay()
+ (await server.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
+
await server.mountLegacyLightpush() # without rln-relay
client.mountLegacyLightpushClient()
@@ -139,17 +134,18 @@ suite "RLN Proofs as a Lightpush Service":
# mount rln-relay
let wakuRlnConfig = WakuRlnConfig(
- rlnRelayDynamic: false,
- rlnRelayCredIndex: some(1.uint),
- rlnRelayUserMessageLimit: 1,
- rlnEpochSizeSec: 1,
- rlnRelayTreePath: genTempPath("rln_tree", "wakunode"),
+ dynamic: false,
+ credIndex: some(1.uint),
+ userMessageLimit: 1,
+ epochSizeSec: 1,
+ treePath: genTempPath("rln_tree", "wakunode"),
)
await allFutures(server.start(), client.start())
await server.start()
- await server.mountRelay()
+ (await server.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
await server.mountRlnRelay(wakuRlnConfig)
await server.mountLegacyLightPush()
client.mountLegacyLightPushClient()
@@ -194,8 +190,10 @@ suite "Waku Legacy Lightpush message delivery":
await allFutures(destNode.start(), bridgeNode.start(), lightNode.start())
- await destNode.mountRelay(@[DefaultRelayShard])
- await bridgeNode.mountRelay(@[DefaultRelayShard])
+ (await destNode.mountRelay(@[DefaultRelayShard])).isOkOr:
+ assert false, "Failed to mount relay"
+ (await bridgeNode.mountRelay(@[DefaultRelayShard])).isOkOr:
+ assert false, "Failed to mount relay"
await bridgeNode.mountLegacyLightPush()
lightNode.mountLegacyLightPushClient()
@@ -206,24 +204,25 @@ suite "Waku Legacy Lightpush message delivery":
await destNode.connectToNodes(@[bridgeNode.peerInfo.toRemotePeerInfo()])
## Given
+ const CustomPubsubTopic = "/waku/2/rs/0/1"
let message = fakeWakuMessage()
-
var completionFutRelay = newFuture[bool]()
proc relayHandler(
topic: PubsubTopic, msg: WakuMessage
): Future[void] {.async, gcsafe.} =
check:
- topic == DefaultPubsubTopic
+ topic == CustomPubsubTopic
msg == message
completionFutRelay.complete(true)
- destNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler))
+ destNode.subscribe((kind: PubsubSub, topic: CustomPubsubTopic), some(relayHandler)).isOkOr:
+ assert false, "Failed to subscribe to topic:" & $error
# Wait for subscription to take effect
await sleepAsync(100.millis)
## When
- let res = await lightNode.legacyLightpushPublish(some(DefaultPubsubTopic), message)
+ let res = await lightNode.legacyLightpushPublish(some(CustomPubsubTopic), message)
assert res.isOk(), $res.error
## Then
diff --git a/tests/node/test_wakunode_legacy_store.nim b/tests/node/test_wakunode_legacy_store.nim
index 5b0409d86..8ede3f6f2 100644
--- a/tests/node/test_wakunode_legacy_store.nim
+++ b/tests/node/test_wakunode_legacy_store.nim
@@ -14,14 +14,11 @@ import
node/peer_manager,
waku_core,
waku_store_legacy,
- waku_store_legacy/client,
waku_archive_legacy,
- waku_archive_legacy/driver/sqlite_driver,
- common/databases/db_sqlite,
],
../waku_store_legacy/store_utils,
../waku_archive_legacy/archive_utils,
- ../testlib/[common, wakucore, wakunode, testasync, futures, testutils]
+ ../testlib/[wakucore, wakunode, testasync, testutils]
suite "Waku Store - End to End - Sorted Archive":
var pubsubTopic {.threadvar.}: PubsubTopic
diff --git a/tests/node/test_wakunode_lightpush.nim b/tests/node/test_wakunode_lightpush.nim
index 865fb38ff..8d48c8cb7 100644
--- a/tests/node/test_wakunode_lightpush.nim
+++ b/tests/node/test_wakunode_lightpush.nim
@@ -1,27 +1,16 @@
{.used.}
import
- std/[options, tables, sequtils, tempfiles, strutils],
+ std/[options, tempfiles],
stew/shims/net as stewNet,
testutils/unittests,
chronos,
- chronicles,
std/strformat,
- os,
- libp2p/[peerstore, crypto/crypto]
+ libp2p/crypto/crypto
import
- waku/[
- waku_core,
- node/peer_manager,
- node/waku_node,
- waku_filter_v2,
- waku_filter_v2/client,
- waku_filter_v2/subscriptions,
- waku_lightpush,
- waku_rln_relay,
- ],
- ../testlib/[assertions, common, wakucore, wakunode, testasync, futures, testutils],
+ waku/[waku_core, node/peer_manager, node/waku_node, waku_lightpush, waku_rln_relay],
+ ../testlib/[wakucore, wakunode, testasync, futures],
../resources/payloads
const PublishedToOnePeer = 1
@@ -57,7 +46,8 @@ suite "Waku Lightpush - End To End":
await allFutures(server.start(), client.start())
await server.start()
- await server.mountRelay()
+ (await server.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
await server.mountLightpush() # without rln-relay
client.mountLightpushClient()
@@ -138,17 +128,18 @@ suite "RLN Proofs as a Lightpush Service":
# mount rln-relay
let wakuRlnConfig = WakuRlnConfig(
- rlnRelayDynamic: false,
- rlnRelayCredIndex: some(1.uint),
- rlnRelayUserMessageLimit: 1,
- rlnEpochSizeSec: 1,
- rlnRelayTreePath: genTempPath("rln_tree", "wakunode"),
+ dynamic: false,
+ credIndex: some(1.uint),
+ userMessageLimit: 1,
+ epochSizeSec: 1,
+ treePath: genTempPath("rln_tree", "wakunode"),
)
await allFutures(server.start(), client.start())
await server.start()
- await server.mountRelay()
+ (await server.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
await server.mountRlnRelay(wakuRlnConfig)
await server.mountLightPush()
client.mountLightPushClient()
@@ -193,8 +184,10 @@ suite "Waku Lightpush message delivery":
await allFutures(destNode.start(), bridgeNode.start(), lightNode.start())
- await destNode.mountRelay(@[DefaultRelayShard])
- await bridgeNode.mountRelay(@[DefaultRelayShard])
+ (await destNode.mountRelay(@[DefaultRelayShard])).isOkOr:
+ assert false, "Failed to mount relay"
+ (await bridgeNode.mountRelay(@[DefaultRelayShard])).isOkOr:
+ assert false, "Failed to mount relay"
await bridgeNode.mountLightPush()
lightNode.mountLightPushClient()
@@ -205,6 +198,7 @@ suite "Waku Lightpush message delivery":
await destNode.connectToNodes(@[bridgeNode.peerInfo.toRemotePeerInfo()])
## Given
+ const CustomPubsubTopic = "/waku/2/rs/0/1"
let message = fakeWakuMessage()
var completionFutRelay = newFuture[bool]()
@@ -212,17 +206,18 @@ suite "Waku Lightpush message delivery":
topic: PubsubTopic, msg: WakuMessage
): Future[void] {.async, gcsafe.} =
check:
- topic == DefaultPubsubTopic
+ topic == CustomPubsubTopic
msg == message
completionFutRelay.complete(true)
- destNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler))
+ destNode.subscribe((kind: PubsubSub, topic: CustomPubsubTopic), some(relayHandler)).isOkOr:
+ assert false, "Failed to subscribe to relay"
# Wait for subscription to take effect
await sleepAsync(100.millis)
## When
- let res = await lightNode.lightpushPublish(some(DefaultPubsubTopic), message)
+ let res = await lightNode.lightpushPublish(some(CustomPubsubTopic), message)
assert res.isOk(), $res.error
assert res.get() == 1, "Expected to relay the message to 1 node"
diff --git a/tests/node/test_wakunode_peer_exchange.nim b/tests/node/test_wakunode_peer_exchange.nim
index edb262b0e..26837869d 100644
--- a/tests/node/test_wakunode_peer_exchange.nim
+++ b/tests/node/test_wakunode_peer_exchange.nim
@@ -13,14 +13,8 @@ import
eth/p2p/discoveryv5/enr
import
- waku/[
- waku_node,
- discovery/waku_discv5,
- waku_peer_exchange,
- node/peer_manager,
- waku_relay/protocol,
- waku_core,
- ],
+ waku/
+ [waku_node, discovery/waku_discv5, waku_peer_exchange, node/peer_manager, waku_core],
../waku_peer_exchange/utils,
../testlib/[wakucore, wakunode, testasync]
@@ -83,7 +77,7 @@ suite "Waku Peer Exchange":
# Then no peers are fetched
check:
- node.peerManager.wakuPeerStore.peers.len == 0
+ node.peerManager.switch.peerStore.peers.len == 0
res.error.status_code == SERVICE_UNAVAILABLE
res.error.status_desc == some("PeerExchange is not mounted")
@@ -98,12 +92,12 @@ suite "Waku Peer Exchange":
res.error.status_desc == some("peer_not_found_failure")
# Then no peers are fetched
- check node.peerManager.wakuPeerStore.peers.len == 0
+ check node.peerManager.switch.peerStore.peers.len == 0
asyncTest "Node succesfully exchanges px peers with faked discv5":
# Given both nodes mount peer exchange
await allFutures([node.mountPeerExchange(), node2.mountPeerExchange()])
- check node.peerManager.wakuPeerStore.peers.len == 0
+ check node.peerManager.switch.peerStore.peers.len == 0
# Mock that we discovered a node (to avoid running discv5)
var enr = enr.Record()
@@ -124,8 +118,8 @@ suite "Waku Peer Exchange":
# Check that the peer ended up in the peerstore
let rpInfo = enr.toRemotePeerInfo.get()
check:
- node.peerManager.wakuPeerStore.peers.anyIt(it.peerId == rpInfo.peerId)
- node.peerManager.wakuPeerStore.peers.anyIt(it.addrs == rpInfo.addrs)
+ node.peerManager.switch.peerStore.peers.anyIt(it.peerId == rpInfo.peerId)
+ node.peerManager.switch.peerStore.peers.anyIt(it.addrs == rpInfo.addrs)
suite "setPeerExchangePeer":
var node2 {.threadvar.}: WakuNode
@@ -142,7 +136,7 @@ suite "Waku Peer Exchange":
asyncTest "peer set successfully":
# Given a node with peer exchange mounted
await node.mountPeerExchange()
- let initialPeers = node.peerManager.wakuPeerStore.peers.len
+ let initialPeers = node.peerManager.switch.peerStore.peers.len
# And a valid peer info
let remotePeerInfo2 = node2.peerInfo.toRemotePeerInfo()
@@ -152,12 +146,12 @@ suite "Waku Peer Exchange":
# Then the peer is added to the peer store
check:
- node.peerManager.wakuPeerStore.peers.len == (initialPeers + 1)
+ node.peerManager.switch.peerStore.peers.len == (initialPeers + 1)
asyncTest "peer exchange not mounted":
# Given a node without peer exchange mounted
check node.wakuPeerExchange == nil
- let initialPeers = node.peerManager.wakuPeerStore.peers.len
+ let initialPeers = node.peerManager.switch.peerStore.peers.len
# And a valid peer info
let invalidMultiAddress = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
@@ -167,12 +161,12 @@ suite "Waku Peer Exchange":
# Then no peer is added to the peer store
check:
- node.peerManager.wakuPeerStore.peers.len == initialPeers
+ node.peerManager.switch.peerStore.peers.len == initialPeers
asyncTest "peer info parse error":
# Given a node with peer exchange mounted
await node.mountPeerExchange()
- let initialPeers = node.peerManager.wakuPeerStore.peers.len
+ let initialPeers = node.peerManager.switch.peerStore.peers.len
# And given a peer info with an invalid peer id
var remotePeerInfo2 = node2.peerInfo.toRemotePeerInfo()
@@ -183,7 +177,7 @@ suite "Waku Peer Exchange":
# Then no peer is added to the peer store
check:
- node.peerManager.wakuPeerStore.peers.len == initialPeers
+ node.peerManager.switch.peerStore.peers.len == initialPeers
suite "Waku Peer Exchange with discv5":
asyncTest "Node successfully exchanges px peers with real discv5":
@@ -286,13 +280,13 @@ suite "Waku Peer Exchange with discv5":
let
requestPeers = 1
- currentPeers = node3.peerManager.wakuPeerStore.peers.len
+ currentPeers = node3.peerManager.switch.peerStore.peers.len
let res = await node3.fetchPeerExchangePeers(1)
check res.tryGet() == 1
# Then node3 has received 1 peer from node1
check:
- node3.peerManager.wakuPeerStore.peers.len == currentPeers + requestPeers
+ node3.peerManager.switch.peerStore.peers.len == currentPeers + requestPeers
await allFutures(
[node1.stop(), node2.stop(), node3.stop(), disc1.stop(), disc2.stop()]
diff --git a/tests/node/test_wakunode_peer_manager.nim b/tests/node/test_wakunode_peer_manager.nim
index 0fd80271b..88fcc827f 100644
--- a/tests/node/test_wakunode_peer_manager.nim
+++ b/tests/node/test_wakunode_peer_manager.nim
@@ -18,18 +18,15 @@ import
waku_core,
node/peer_manager,
node/waku_node,
- waku_enr/sharding,
discovery/waku_discv5,
waku_filter_v2/common,
waku_relay/protocol,
],
- ../testlib/
- [wakucore, wakunode, testasync, testutils, assertions, comparisons, futures],
+ ../testlib/[wakucore, wakunode, testasync, testutils, comparisons],
../waku_enr/utils,
../waku_archive/archive_utils,
../waku_discv5/utils,
- ./peer_manager/peer_store/utils,
- ./utils
+ ./peer_manager/peer_store/utils
const DEFAULT_PROTOCOLS: seq[string] =
@["/ipfs/id/1.0.0", "/libp2p/autonat/1.0.0", "/libp2p/circuit/relay/0.2.0/hop"]
@@ -45,9 +42,9 @@ suite "Peer Manager":
var
server {.threadvar.}: WakuNode
- serverPeerStore {.threadvar.}: WakuPeerStore
+ serverPeerStore {.threadvar.}: PeerStore
client {.threadvar.}: WakuNode
- clientPeerStore {.threadvar.}: WakuPeerStore
+ clientPeerStore {.threadvar.}: PeerStore
var
serverRemotePeerInfo {.threadvar.}: RemotePeerInfo
@@ -64,9 +61,9 @@ suite "Peer Manager":
clientKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, listenIp, Port(3000))
- serverPeerStore = server.peerManager.wakuPeerStore
+ serverPeerStore = server.peerManager.switch.peerStore
client = newTestWakuNode(clientKey, listenIp, Port(3001))
- clientPeerStore = client.peerManager.wakuPeerStore
+ clientPeerStore = client.peerManager.switch.peerStore
await allFutures(server.start(), client.start())
@@ -140,7 +137,7 @@ suite "Peer Manager":
clientPeerStore.peers().len == 1
# Given the server is marked as CannotConnect
- client.peerManager.wakuPeerStore[ConnectionBook].book[serverPeerId] =
+ client.peerManager.switch.peerStore[ConnectionBook].book[serverPeerId] =
CannotConnect
# When pruning the client's store
@@ -177,7 +174,7 @@ suite "Peer Manager":
clientPeerStore.peers().len == 1
# Given the server is marked as having 1 failed connection
- client.peerManager.wakuPeerStore[NumberFailedConnBook].book[serverPeerId] = 1
+ client.peerManager.switch.peerStore[NumberFailedConnBook].book[serverPeerId] = 1
# When pruning the client's store
client.peerManager.prunePeerStore()
@@ -196,7 +193,7 @@ suite "Peer Manager":
clientPeerStore.peers().len == 1
# Given the server is marked as not connected
- client.peerManager.wakuPeerStore[ConnectionBook].book[serverPeerId] =
+ client.peerManager.switch.peerStore[ConnectionBook].book[serverPeerId] =
CannotConnect
# When pruning the client's store
@@ -220,7 +217,7 @@ suite "Peer Manager":
# Given the server is marked as not connected
# (There's only one shard in the ENR so avg shards will be the same as the shard count; hence it will be purged.)
- client.peerManager.wakuPeerStore[ConnectionBook].book[serverPeerId] =
+ client.peerManager.switch.peerStore[ConnectionBook].book[serverPeerId] =
CannotConnect
# When pruning the client's store
@@ -311,7 +308,8 @@ suite "Peer Manager":
asyncTest "Peer Protocol Support Verification (Before Connection)":
# Given the server has mounted some Waku protocols
- await server.mountRelay()
+ (await server.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
await server.mountFilter()
# When connecting to the server
@@ -338,7 +336,8 @@ suite "Peer Manager":
server2RemotePeerInfo = server2.switch.peerInfo.toRemotePeerInfo()
server2PeerId = server2RemotePeerInfo.peerId
- await server2.mountRelay()
+ (await server2.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
# When connecting to both servers
await client.connectToNodes(@[serverRemotePeerInfo, server2RemotePeerInfo])
@@ -536,8 +535,10 @@ suite "Peer Manager":
suite "Peer Connectivity States":
asyncTest "State Tracking & Transition":
# Given two correctly initialised nodes, but not connected
- await server.mountRelay()
- await client.mountRelay()
+ (await server.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
+ (await client.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
# Then their connectedness should be NotConnected
check:
@@ -590,8 +591,10 @@ suite "Peer Manager":
suite "Automatic Reconnection":
asyncTest "Automatic Reconnection Implementation":
# Given two correctly initialised nodes, that are available for reconnection
- await server.mountRelay()
- await client.mountRelay()
+ (await server.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
+ (await client.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
await client.connectToNodes(@[serverRemotePeerInfo])
waitActive:
@@ -714,8 +717,8 @@ suite "Persistence Check":
client = newTestWakuNode(
clientKey, listenIp, listenPort, peerStorage = clientPeerStorage
)
- serverPeerStore = server.peerManager.wakuPeerStore
- clientPeerStore = client.peerManager.wakuPeerStore
+ serverPeerStore = server.peerManager.switch.peerStore
+ clientPeerStore = client.peerManager.switch.peerStore
await allFutures(server.start(), client.start())
@@ -731,7 +734,7 @@ suite "Persistence Check":
newClient = newTestWakuNode(
clientKey, listenIp, listenPort, peerStorage = newClientPeerStorage
)
- newClientPeerStore = newClient.peerManager.wakuPeerStore
+ newClientPeerStore = newClient.peerManager.switch.peerStore
await newClient.start()
@@ -756,8 +759,8 @@ suite "Persistence Check":
client = newTestWakuNode(
clientKey, listenIp, listenPort, peerStorage = clientPeerStorage
)
- serverPeerStore = server.peerManager.wakuPeerStore
- clientPeerStore = client.peerManager.wakuPeerStore
+ serverPeerStore = server.peerManager.switch.peerStore
+ clientPeerStore = client.peerManager.switch.peerStore
await allFutures(server.start(), client.start())
@@ -776,8 +779,8 @@ suite "Persistence Check":
clientKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, listenIp, listenPort)
client = newTestWakuNode(clientKey, listenIp, listenPort)
- serverPeerStore = server.peerManager.wakuPeerStore
- clientPeerStore = client.peerManager.wakuPeerStore
+ serverPeerStore = server.peerManager.switch.peerStore
+ clientPeerStore = client.peerManager.switch.peerStore
await allFutures(server.start(), client.start())
@@ -792,13 +795,13 @@ suite "Mount Order":
var
client {.threadvar.}: WakuNode
clientRemotePeerInfo {.threadvar.}: RemotePeerInfo
- clientPeerStore {.threadvar.}: WakuPeerStore
+ clientPeerStore {.threadvar.}: PeerStore
asyncSetup:
let clientKey = generateSecp256k1Key()
client = newTestWakuNode(clientKey, listenIp, listenPort)
- clientPeerStore = client.peerManager.wakuPeerStore
+ clientPeerStore = client.peerManager.switch.peerStore
await client.start()
@@ -813,7 +816,8 @@ suite "Mount Order":
serverKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, listenIp, listenPort)
- await server.mountRelay()
+ (await server.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
await server.start()
let
serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo()
@@ -837,7 +841,8 @@ suite "Mount Order":
serverKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, listenIp, listenPort)
- await server.mountRelay()
+ (await server.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
let
serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo()
serverPeerId = serverRemotePeerInfo.peerId
@@ -862,7 +867,8 @@ suite "Mount Order":
server = newTestWakuNode(serverKey, listenIp, listenPort)
await server.start()
- await server.mountRelay()
+ (await server.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
let
serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo()
serverPeerId = serverRemotePeerInfo.peerId
@@ -889,7 +895,8 @@ suite "Mount Order":
let
serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo()
serverPeerId = serverRemotePeerInfo.peerId
- await server.mountRelay()
+ (await server.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
# When connecting to the server
await client.connectToNodes(@[serverRemotePeerInfo])
@@ -913,7 +920,8 @@ suite "Mount Order":
serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo()
serverPeerId = serverRemotePeerInfo.peerId
await server.start()
- await server.mountRelay()
+ (await server.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
# When connecting to the server
await client.connectToNodes(@[serverRemotePeerInfo])
@@ -935,7 +943,8 @@ suite "Mount Order":
let
serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo()
serverPeerId = serverRemotePeerInfo.peerId
- await server.mountRelay()
+ (await server.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
await server.start()
# When connecting to the server
diff --git a/tests/node/test_wakunode_relay_rln.nim b/tests/node/test_wakunode_relay_rln.nim
index 0bf608d12..afc282d50 100644
--- a/tests/node/test_wakunode_relay_rln.nim
+++ b/tests/node/test_wakunode_relay_rln.nim
@@ -83,16 +83,15 @@ proc getWakuRlnConfigOnChain*(
ethClientAddress: Option[string] = none(string),
): WakuRlnConfig =
return WakuRlnConfig(
- rlnRelayDynamic: true,
- rlnRelayCredIndex: some(credIndex),
- rlnRelayEthContractAddress: rlnRelayEthContractAddress,
- rlnRelayEthClientAddress: ethClientAddress.get(EthClient),
- rlnRelayTreePath: genTempPath("rln_tree", "wakunode_" & $credIndex),
- rlnEpochSizeSec: 1,
+ dynamic: true,
+ credIndex: some(credIndex),
+ ethContractAddress: rlnRelayEthContractAddress,
+ ethClientAddress: ethClientAddress.get(EthClient),
+ treePath: genTempPath("rln_tree", "wakunode_" & $credIndex),
+ epochSizeSec: 1,
onFatalErrorAction: fatalErrorHandler.get(fatalErrorVoidHandler),
# If these are used, initialisation fails with "failed to mount WakuRlnRelay: could not initialize the group manager: the commitment does not have a membership"
- rlnRelayCredPath: keystorePath,
- rlnRelayCredPassword: password,
+ creds: some(RlnRelayCreds(path: keystorePath, password: password)),
)
proc setupRelayWithOnChainRln*(
@@ -227,13 +226,13 @@ suite "Waku RlnRelay - End to End - Static":
let contractAddress = await uploadRLNContract(EthClient)
let wakuRlnConfig = WakuRlnConfig(
- rlnRelayDynamic: true,
- rlnRelayCredIndex: some(0.uint),
- rlnRelayUserMessageLimit: 111,
- rlnRelayTreepath: genTempPath("rln_tree", "wakunode_0"),
- rlnRelayEthClientAddress: EthClient,
- rlnRelayEthContractAddress: $contractAddress,
- rlnRelayChainId: 1337,
+ dynamic: true,
+ credIndex: some(0.uint),
+ userMessageLimit: 111,
+ treepath: genTempPath("rln_tree", "wakunode_0"),
+ ethClientAddress: EthClient,
+ ethContractAddress: $contractAddress,
+ chainId: 1337,
onFatalErrorAction: proc(errStr: string) =
raiseAssert errStr
,
@@ -263,7 +262,9 @@ suite "Waku RlnRelay - End to End - Static":
completionFut.complete((topic, msg))
let subscriptionEvent = (kind: PubsubSub, topic: pubsubTopic)
- server.subscribe(subscriptionEvent, some(relayHandler))
+ server.subscribe(subscriptionEvent, some(relayHandler)).isOkOr:
+ assert false, "Failed to subscribe to pubsub topic"
+
await sleepAsync(FUTURE_TIMEOUT)
# Generate Messages
@@ -357,7 +358,9 @@ suite "Waku RlnRelay - End to End - Static":
completionFut.complete((topic, msg))
let subscriptionEvent = (kind: PubsubSub, topic: pubsubTopic)
- server.subscribe(subscriptionEvent, some(relayHandler))
+ server.subscribe(subscriptionEvent, some(relayHandler)).isOkOr:
+ assert false, "Failed to subscribe to pubsub topic"
+
await sleepAsync(FUTURE_TIMEOUT)
# Generate Messages
diff --git a/tests/node/test_wakunode_store.nim b/tests/node/test_wakunode_store.nim
index 49c24c6d8..622322d92 100644
--- a/tests/node/test_wakunode_store.nim
+++ b/tests/node/test_wakunode_store.nim
@@ -1,7 +1,7 @@
{.used.}
import
- std/[options, sequtils, algorithm, sets],
+ std/[options, sequtils, sets],
stew/shims/net as stewNet,
testutils/unittests,
chronos,
@@ -15,14 +15,11 @@ import
waku_core,
waku_core/message/digest,
waku_store,
- waku_store/client,
waku_archive,
- waku_archive/driver/sqlite_driver,
- common/databases/db_sqlite,
],
../waku_store/store_utils,
../waku_archive/archive_utils,
- ../testlib/[common, wakucore, wakunode, testasync, futures, testutils]
+ ../testlib/[wakucore, wakunode, testasync, testutils]
suite "Waku Store - End to End - Sorted Archive":
var pubsubTopic {.threadvar.}: PubsubTopic
diff --git a/tests/test_message_cache.nim b/tests/test_message_cache.nim
index b6bb91b86..cd2e882c1 100644
--- a/tests/test_message_cache.nim
+++ b/tests/test_message_cache.nim
@@ -1,6 +1,6 @@
{.used.}
-import std/[sets, random], stew/[results, byteutils], testutils/unittests
+import std/[sets, random], results, stew/byteutils, testutils/unittests
import waku/waku_core, waku/waku_api/message_cache, ./testlib/wakucore
randomize()
diff --git a/tests/test_peer_manager.nim b/tests/test_peer_manager.nim
index 46edfc447..c38ca23b8 100644
--- a/tests/test_peer_manager.nim
+++ b/tests/test_peer_manager.nim
@@ -1,7 +1,7 @@
{.used.}
import
- std/[options, sequtils, times, sugar, net],
+ std/[sequtils, times, sugar, net],
stew/shims/net as stewNet,
testutils/unittests,
chronos,
@@ -27,7 +27,6 @@ import
waku_relay/protocol,
waku_filter_v2/common,
waku_store/common,
- waku_lightpush/common,
waku_peer_exchange,
waku_metadata,
incentivization/reputation_manager,
@@ -51,10 +50,10 @@ procSuite "Peer Manager":
check:
connOk == true
- nodes[0].peerManager.wakuPeerStore.peers().anyIt(
+ nodes[0].peerManager.switch.peerStore.peers().anyIt(
it.peerId == nodes[1].peerInfo.peerId
)
- nodes[0].peerManager.wakuPeerStore.connectedness(nodes[1].peerInfo.peerId) ==
+ nodes[0].peerManager.switch.peerStore.connectedness(nodes[1].peerInfo.peerId) ==
Connectedness.Connected
asyncTest "dialPeer() works":
@@ -81,13 +80,13 @@ procSuite "Peer Manager":
# Check that node2 is being managed in node1
check:
- nodes[0].peerManager.wakuPeerStore.peers().anyIt(
+ nodes[0].peerManager.switch.peerStore.peers().anyIt(
it.peerId == nodes[1].peerInfo.peerId
)
# Check connectedness
check:
- nodes[0].peerManager.wakuPeerStore.connectedness(nodes[1].peerInfo.peerId) ==
+ nodes[0].peerManager.switch.peerStore.connectedness(nodes[1].peerInfo.peerId) ==
Connectedness.Connected
await allFutures(nodes.mapIt(it.stop()))
@@ -142,12 +141,12 @@ procSuite "Peer Manager":
# Check peers were successfully added to peer manager
check:
- node.peerManager.wakuPeerStore.peers().len == 2
- node.peerManager.wakuPeerStore.peers(WakuFilterSubscribeCodec).allIt(
+ node.peerManager.switch.peerStore.peers().len == 2
+ node.peerManager.switch.peerStore.peers(WakuFilterSubscribeCodec).allIt(
it.peerId == filterPeer.peerId and it.addrs.contains(filterLoc) and
it.protocols.contains(WakuFilterSubscribeCodec)
)
- node.peerManager.wakuPeerStore.peers(WakuStoreCodec).allIt(
+ node.peerManager.switch.peerStore.peers(WakuStoreCodec).allIt(
it.peerId == storePeer.peerId and it.addrs.contains(storeLoc) and
it.protocols.contains(WakuStoreCodec)
)
@@ -167,7 +166,7 @@ procSuite "Peer Manager":
nodes[0].peerManager.addPeer(nodes[1].peerInfo.toRemotePeerInfo())
check:
# No information about node2's connectedness
- nodes[0].peerManager.wakuPeerStore.connectedness(nodes[1].peerInfo.peerId) ==
+ nodes[0].peerManager.switch.peerStore.connectedness(nodes[1].peerInfo.peerId) ==
NotConnected
# Failed connection
@@ -184,7 +183,7 @@ procSuite "Peer Manager":
check:
# Cannot connect to node2
- nodes[0].peerManager.wakuPeerStore.connectedness(nonExistentPeer.peerId) ==
+ nodes[0].peerManager.switch.peerStore.connectedness(nonExistentPeer.peerId) ==
CannotConnect
# Successful connection
@@ -195,14 +194,14 @@ procSuite "Peer Manager":
check:
# Currently connected to node2
- nodes[0].peerManager.wakuPeerStore.connectedness(nodes[1].peerInfo.peerId) ==
+ nodes[0].peerManager.switch.peerStore.connectedness(nodes[1].peerInfo.peerId) ==
Connected
# Stop node. Gracefully disconnect from all peers.
await nodes[0].stop()
check:
# Not currently connected to node2, but had recent, successful connection.
- nodes[0].peerManager.wakuPeerStore.connectedness(nodes[1].peerInfo.peerId) ==
+ nodes[0].peerManager.switch.peerStore.connectedness(nodes[1].peerInfo.peerId) ==
CanConnect
await nodes[1].stop()
@@ -233,12 +232,13 @@ procSuite "Peer Manager":
let conn1Ok = await nodes[0].peerManager.connectPeer(nonExistentPeer)
check:
# Cannot connect to node2
- nodes[0].peerManager.wakuPeerStore.connectedness(nonExistentPeer.peerId) ==
+ nodes[0].peerManager.switch.peerStore.connectedness(nonExistentPeer.peerId) ==
CannotConnect
- nodes[0].peerManager.wakuPeerStore[ConnectionBook][nonExistentPeer.peerId] ==
+ nodes[0].peerManager.switch.peerStore[ConnectionBook][nonExistentPeer.peerId] ==
CannotConnect
- nodes[0].peerManager.wakuPeerStore[NumberFailedConnBook][nonExistentPeer.peerId] ==
- 1
+ nodes[0].peerManager.switch.peerStore[NumberFailedConnBook][
+ nonExistentPeer.peerId
+ ] == 1
# Connection attempt failed
conn1Ok == false
@@ -254,14 +254,17 @@ procSuite "Peer Manager":
nodes[0].peerManager.canBeConnected(nodes[1].peerInfo.peerId) == true
# After a successful connection, the number of failed connections is reset
- nodes[0].peerManager.wakuPeerStore[NumberFailedConnBook][nodes[1].peerInfo.peerId] =
- 4
+
+ nodes[0].peerManager.switch.peerStore[NumberFailedConnBook][
+ nodes[1].peerInfo.peerId
+ ] = 4
let conn2Ok =
await nodes[0].peerManager.connectPeer(nodes[1].peerInfo.toRemotePeerInfo())
check:
conn2Ok == true
- nodes[0].peerManager.wakuPeerStore[NumberFailedConnBook][nodes[1].peerInfo.peerId] ==
- 0
+ nodes[0].peerManager.switch.peerStore[NumberFailedConnBook][
+ nodes[1].peerInfo.peerId
+ ] == 0
await allFutures(nodes.mapIt(it.stop()))
@@ -280,8 +283,10 @@ procSuite "Peer Manager":
await node1.start()
await node2.start()
- await node1.mountRelay()
- await node2.mountRelay()
+ (await node1.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
+ (await node2.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
let peerInfo2 = node2.switch.peerInfo
var remotePeerInfo2 = peerInfo2.toRemotePeerInfo()
@@ -291,7 +296,7 @@ procSuite "Peer Manager":
assert is12Connected == true, "Node 1 and 2 not connected"
check:
- node1.peerManager.wakuPeerStore[AddressBook][remotePeerInfo2.peerId] ==
+ node1.peerManager.switch.peerStore[AddressBook][remotePeerInfo2.peerId] ==
remotePeerInfo2.addrs
# wait for the peer store update
@@ -299,9 +304,9 @@ procSuite "Peer Manager":
check:
# Currently connected to node2
- node1.peerManager.wakuPeerStore.peers().len == 1
- node1.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
- node1.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == Connected
+ node1.peerManager.switch.peerStore.peers().len == 1
+ node1.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
+ node1.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == Connected
# Simulate restart by initialising a new node using the same storage
let node3 = newTestWakuNode(
@@ -317,11 +322,12 @@ procSuite "Peer Manager":
check:
# Node2 has been loaded after "restart", but we have not yet reconnected
- node3.peerManager.wakuPeerStore.peers().len == 1
- node3.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
- node3.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == NotConnected
+ node3.peerManager.switch.peerStore.peers().len == 1
+ node3.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
+ node3.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == NotConnected
- await node3.mountRelay()
+ (await node3.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
await node3.peerManager.connectToRelayPeers()
@@ -329,9 +335,9 @@ procSuite "Peer Manager":
check:
# Reconnected to node2 after "restart"
- node3.peerManager.wakuPeerStore.peers().len == 1
- node3.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
- node3.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == Connected
+ node3.peerManager.switch.peerStore.peers().len == 1
+ node3.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
+ node3.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == Connected
await allFutures([node1.stop(), node2.stop(), node3.stop()])
@@ -350,8 +356,10 @@ procSuite "Peer Manager":
await node1.start()
await node2.start()
- await node1.mountRelay()
- await node2.mountRelay()
+ (await node1.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
+ (await node2.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
let peerInfo2 = node2.switch.peerInfo
var remotePeerInfo2 = peerInfo2.toRemotePeerInfo()
@@ -361,7 +369,7 @@ procSuite "Peer Manager":
assert is12Connected == true, "Node 1 and 2 not connected"
check:
- node1.peerManager.wakuPeerStore[AddressBook][remotePeerInfo2.peerId] ==
+ node1.peerManager.switch.peerStore[AddressBook][remotePeerInfo2.peerId] ==
remotePeerInfo2.addrs
# wait for the peer store update
@@ -369,9 +377,9 @@ procSuite "Peer Manager":
check:
# Currently connected to node2
- node1.peerManager.wakuPeerStore.peers().len == 1
- node1.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
- node1.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == Connected
+ node1.peerManager.switch.peerStore.peers().len == 1
+ node1.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
+ node1.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == Connected
# Simulate restart by initialising a new node using the same storage
let node3 = newTestWakuNode(
@@ -387,11 +395,12 @@ procSuite "Peer Manager":
check:
# Node2 has been loaded after "restart", but we have not yet reconnected
- node3.peerManager.wakuPeerStore.peers().len == 1
- node3.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
- node3.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == NotConnected
+ node3.peerManager.switch.peerStore.peers().len == 1
+ node3.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
+ node3.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == NotConnected
- await node3.mountRelay()
+ (await node3.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
await node3.peerManager.manageRelayPeers()
@@ -399,9 +408,9 @@ procSuite "Peer Manager":
check:
# Reconnected to node2 after "restart"
- node3.peerManager.wakuPeerStore.peers().len == 1
- node3.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
- node3.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == Connected
+ node3.peerManager.switch.peerStore.peers().len == 1
+ node3.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
+ node3.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == Connected
await allFutures([node1.stop(), node2.stop(), node3.stop()])
@@ -480,21 +489,23 @@ procSuite "Peer Manager":
await node1.start()
await node2.start()
- await node1.mountRelay()
+ (await node1.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
node1.wakuRelay.codec = betaCodec
- await node2.mountRelay()
+ (await node2.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
node2.wakuRelay.codec = betaCodec
require:
(await node1.peerManager.connectPeer(peerInfo2.toRemotePeerInfo())) == true
check:
# Currently connected to node2
- node1.peerManager.wakuPeerStore.peers().len == 1
- node1.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
- node1.peerManager.wakuPeerStore.peers().anyIt(
+ node1.peerManager.switch.peerStore.peers().len == 1
+ node1.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
+ node1.peerManager.switch.peerStore.peers().anyIt(
it.protocols.contains(node2.wakuRelay.codec)
)
- node1.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == Connected
+ node1.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == Connected
# Simulate restart by initialising a new node using the same storage
let node3 = newTestWakuNode(
@@ -504,27 +515,30 @@ procSuite "Peer Manager":
peerStorage = storage,
)
- await node3.mountRelay()
+ (await node3.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
node3.wakuRelay.codec = stableCodec
check:
# Node 2 and 3 have differing codecs
node2.wakuRelay.codec == betaCodec
node3.wakuRelay.codec == stableCodec
# Node2 has been loaded after "restart", but we have not yet reconnected
- node3.peerManager.wakuPeerStore.peers().len == 1
- node3.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
- node3.peerManager.wakuPeerStore.peers().anyIt(it.protocols.contains(betaCodec))
- node3.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == NotConnected
+ node3.peerManager.switch.peerStore.peers().len == 1
+ node3.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
+ node3.peerManager.switch.peerStore.peers().anyIt(it.protocols.contains(betaCodec))
+ node3.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == NotConnected
await node3.start() # This should trigger a reconnect
check:
# Reconnected to node2 after "restart"
- node3.peerManager.wakuPeerStore.peers().len == 1
- node3.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
- node3.peerManager.wakuPeerStore.peers().anyIt(it.protocols.contains(betaCodec))
- node3.peerManager.wakuPeerStore.peers().anyIt(it.protocols.contains(stableCodec))
- node3.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == Connected
+ node3.peerManager.switch.peerStore.peers().len == 1
+ node3.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
+ node3.peerManager.switch.peerStore.peers().anyIt(it.protocols.contains(betaCodec))
+ node3.peerManager.switch.peerStore.peers().anyIt(
+ it.protocols.contains(stableCodec)
+ )
+ node3.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == Connected
await allFutures([node1.stop(), node2.stop(), node3.stop()])
@@ -561,38 +575,38 @@ procSuite "Peer Manager":
check:
# Peerstore track all three peers
- nodes[0].peerManager.wakuPeerStore.peers().len == 3
+ nodes[0].peerManager.switch.peerStore.peers().len == 3
# All peer ids are correct
- nodes[0].peerManager.wakuPeerStore.peers().anyIt(
+ nodes[0].peerManager.switch.peerStore.peers().anyIt(
it.peerId == nodes[1].switch.peerInfo.peerId
)
- nodes[0].peerManager.wakuPeerStore.peers().anyIt(
+ nodes[0].peerManager.switch.peerStore.peers().anyIt(
it.peerId == nodes[2].switch.peerInfo.peerId
)
- nodes[0].peerManager.wakuPeerStore.peers().anyIt(
+ nodes[0].peerManager.switch.peerStore.peers().anyIt(
it.peerId == nodes[3].switch.peerInfo.peerId
)
# All peers support the relay protocol
- nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains(
+ nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains(
WakuRelayCodec
)
- nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains(
+ nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains(
WakuRelayCodec
)
- nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains(
+ nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains(
WakuRelayCodec
)
# All peers are connected
- nodes[0].peerManager.wakuPeerStore[ConnectionBook][
+ nodes[0].peerManager.switch.peerStore[ConnectionBook][
nodes[1].switch.peerInfo.peerId
] == Connected
- nodes[0].peerManager.wakuPeerStore[ConnectionBook][
+ nodes[0].peerManager.switch.peerStore[ConnectionBook][
nodes[2].switch.peerInfo.peerId
] == Connected
- nodes[0].peerManager.wakuPeerStore[ConnectionBook][
+ nodes[0].peerManager.switch.peerStore[ConnectionBook][
nodes[3].switch.peerInfo.peerId
] == Connected
@@ -631,38 +645,38 @@ procSuite "Peer Manager":
check:
# Peerstore track all three peers
- nodes[0].peerManager.wakuPeerStore.peers().len == 3
+ nodes[0].peerManager.switch.peerStore.peers().len == 3
# All peer ids are correct
- nodes[0].peerManager.wakuPeerStore.peers().anyIt(
+ nodes[0].peerManager.switch.peerStore.peers().anyIt(
it.peerId == nodes[1].switch.peerInfo.peerId
)
- nodes[0].peerManager.wakuPeerStore.peers().anyIt(
+ nodes[0].peerManager.switch.peerStore.peers().anyIt(
it.peerId == nodes[2].switch.peerInfo.peerId
)
- nodes[0].peerManager.wakuPeerStore.peers().anyIt(
+ nodes[0].peerManager.switch.peerStore.peers().anyIt(
it.peerId == nodes[3].switch.peerInfo.peerId
)
# All peers support the relay protocol
- nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains(
+ nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains(
WakuRelayCodec
)
- nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains(
+ nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains(
WakuRelayCodec
)
- nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains(
+ nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains(
WakuRelayCodec
)
# All peers are connected
- nodes[0].peerManager.wakuPeerStore[ConnectionBook][
+ nodes[0].peerManager.switch.peerStore[ConnectionBook][
nodes[1].switch.peerInfo.peerId
] == Connected
- nodes[0].peerManager.wakuPeerStore[ConnectionBook][
+ nodes[0].peerManager.switch.peerStore[ConnectionBook][
nodes[2].switch.peerInfo.peerId
] == Connected
- nodes[0].peerManager.wakuPeerStore[ConnectionBook][
+ nodes[0].peerManager.switch.peerStore[ConnectionBook][
nodes[3].switch.peerInfo.peerId
] == Connected
@@ -691,66 +705,72 @@ procSuite "Peer Manager":
check:
# Peerstore track all three peers
- nodes[0].peerManager.wakuPeerStore.peers().len == 3
+ nodes[0].peerManager.switch.peerStore.peers().len == 3
# Inbound/Outbound number of peers match
- nodes[0].peerManager.wakuPeerStore.getPeersByDirection(Inbound).len == 3
- nodes[0].peerManager.wakuPeerStore.getPeersByDirection(Outbound).len == 0
- nodes[1].peerManager.wakuPeerStore.getPeersByDirection(Inbound).len == 0
- nodes[1].peerManager.wakuPeerStore.getPeersByDirection(Outbound).len == 1
- nodes[2].peerManager.wakuPeerStore.getPeersByDirection(Inbound).len == 0
- nodes[2].peerManager.wakuPeerStore.getPeersByDirection(Outbound).len == 1
- nodes[3].peerManager.wakuPeerStore.getPeersByDirection(Inbound).len == 0
- nodes[3].peerManager.wakuPeerStore.getPeersByDirection(Outbound).len == 1
+ nodes[0].peerManager.switch.peerStore.getPeersByDirection(Inbound).len == 3
+ nodes[0].peerManager.switch.peerStore.getPeersByDirection(Outbound).len == 0
+ nodes[1].peerManager.switch.peerStore.getPeersByDirection(Inbound).len == 0
+ nodes[1].peerManager.switch.peerStore.getPeersByDirection(Outbound).len == 1
+ nodes[2].peerManager.switch.peerStore.getPeersByDirection(Inbound).len == 0
+ nodes[2].peerManager.switch.peerStore.getPeersByDirection(Outbound).len == 1
+ nodes[3].peerManager.switch.peerStore.getPeersByDirection(Inbound).len == 0
+ nodes[3].peerManager.switch.peerStore.getPeersByDirection(Outbound).len == 1
# All peer ids are correct
- nodes[0].peerManager.wakuPeerStore.peers().anyIt(
+ nodes[0].peerManager.switch.peerStore.peers().anyIt(
it.peerId == nodes[1].switch.peerInfo.peerId
)
- nodes[0].peerManager.wakuPeerStore.peers().anyIt(
+ nodes[0].peerManager.switch.peerStore.peers().anyIt(
it.peerId == nodes[2].switch.peerInfo.peerId
)
- nodes[0].peerManager.wakuPeerStore.peers().anyIt(
+ nodes[0].peerManager.switch.peerStore.peers().anyIt(
it.peerId == nodes[3].switch.peerInfo.peerId
)
# All peers support the relay protocol
- nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains(
+ nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains(
WakuRelayCodec
)
- nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains(
+ nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains(
WakuRelayCodec
)
- nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains(
+ nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains(
WakuRelayCodec
)
# All peers are connected
- nodes[0].peerManager.wakuPeerStore[ConnectionBook][
+ nodes[0].peerManager.switch.peerStore[ConnectionBook][
nodes[1].switch.peerInfo.peerId
] == Connected
- nodes[0].peerManager.wakuPeerStore[ConnectionBook][
+ nodes[0].peerManager.switch.peerStore[ConnectionBook][
nodes[2].switch.peerInfo.peerId
] == Connected
- nodes[0].peerManager.wakuPeerStore[ConnectionBook][
+ nodes[0].peerManager.switch.peerStore[ConnectionBook][
nodes[3].switch.peerInfo.peerId
] == Connected
# All peers are Inbound in peer 0
- nodes[0].peerManager.wakuPeerStore[DirectionBook][nodes[1].switch.peerInfo.peerId] ==
- Inbound
- nodes[0].peerManager.wakuPeerStore[DirectionBook][nodes[2].switch.peerInfo.peerId] ==
- Inbound
- nodes[0].peerManager.wakuPeerStore[DirectionBook][nodes[3].switch.peerInfo.peerId] ==
- Inbound
+ nodes[0].peerManager.switch.peerStore[DirectionBook][
+ nodes[1].switch.peerInfo.peerId
+ ] == Inbound
+ nodes[0].peerManager.switch.peerStore[DirectionBook][
+ nodes[2].switch.peerInfo.peerId
+ ] == Inbound
+ nodes[0].peerManager.switch.peerStore[DirectionBook][
+ nodes[3].switch.peerInfo.peerId
+ ] == Inbound
# All peers have an Outbound connection with peer 0
- nodes[1].peerManager.wakuPeerStore[DirectionBook][nodes[0].switch.peerInfo.peerId] ==
- Outbound
- nodes[2].peerManager.wakuPeerStore[DirectionBook][nodes[0].switch.peerInfo.peerId] ==
- Outbound
- nodes[3].peerManager.wakuPeerStore[DirectionBook][nodes[0].switch.peerInfo.peerId] ==
- Outbound
+ nodes[1].peerManager.switch.peerStore[DirectionBook][
+ nodes[0].switch.peerInfo.peerId
+ ] == Outbound
+ nodes[2].peerManager.switch.peerStore[DirectionBook][
+ nodes[0].switch.peerInfo.peerId
+ ] == Outbound
+ nodes[3].peerManager.switch.peerStore[DirectionBook][
+ nodes[0].switch.peerInfo.peerId
+ ] == Outbound
await allFutures(nodes.mapIt(it.stop()))
@@ -779,12 +799,13 @@ procSuite "Peer Manager":
# all peers are stored in the peerstore
check:
- node.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peers[0].peerId)
- node.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peers[1].peerId)
- node.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peers[2].peerId)
+ node.peerManager.switch.peerStore.peers().anyIt(it.peerId == peers[0].peerId)
+ node.peerManager.switch.peerStore.peers().anyIt(it.peerId == peers[1].peerId)
+ node.peerManager.switch.peerStore.peers().anyIt(it.peerId == peers[2].peerId)
# but the relay peer is not
- node.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peers[3].peerId) == false
+ node.peerManager.switch.peerStore.peers().anyIt(it.peerId == peers[3].peerId) ==
+ false
# all service peers are added to its service slot
check:
@@ -901,8 +922,8 @@ procSuite "Peer Manager":
peers.len == 3
# Add a peer[0] to the peerstore
- pm.wakuPeerStore[AddressBook][peers[0].peerId] = peers[0].addrs
- pm.wakuPeerStore[ProtoBook][peers[0].peerId] =
+ pm.switch.peerStore[AddressBook][peers[0].peerId] = peers[0].addrs
+ pm.switch.peerStore[ProtoBook][peers[0].peerId] =
@[WakuRelayCodec, WakuStoreCodec, WakuFilterSubscribeCodec, WakuLightPushCodec]
# When no service peers, we get one from the peerstore
@@ -998,44 +1019,44 @@ procSuite "Peer Manager":
# Check that we have 30 peers in the peerstore
check:
- pm.wakuPeerStore.peers.len == 30
+ pm.switch.peerStore.peers.len == 30
# fake that some peers failed to connected
- pm.wakuPeerStore[NumberFailedConnBook][peers[0].peerId] = 2
- pm.wakuPeerStore[NumberFailedConnBook][peers[1].peerId] = 2
- pm.wakuPeerStore[NumberFailedConnBook][peers[2].peerId] = 2
- pm.wakuPeerStore[NumberFailedConnBook][peers[3].peerId] = 2
- pm.wakuPeerStore[NumberFailedConnBook][peers[4].peerId] = 2
+ pm.switch.peerStore[NumberFailedConnBook][peers[0].peerId] = 2
+ pm.switch.peerStore[NumberFailedConnBook][peers[1].peerId] = 2
+ pm.switch.peerStore[NumberFailedConnBook][peers[2].peerId] = 2
+ pm.switch.peerStore[NumberFailedConnBook][peers[3].peerId] = 2
+ pm.switch.peerStore[NumberFailedConnBook][peers[4].peerId] = 2
# fake that some peers are connected
- pm.wakuPeerStore[ConnectionBook][peers[5].peerId] = Connected
- pm.wakuPeerStore[ConnectionBook][peers[8].peerId] = Connected
- pm.wakuPeerStore[ConnectionBook][peers[15].peerId] = Connected
- pm.wakuPeerStore[ConnectionBook][peers[18].peerId] = Connected
- pm.wakuPeerStore[ConnectionBook][peers[24].peerId] = Connected
- pm.wakuPeerStore[ConnectionBook][peers[29].peerId] = Connected
+ pm.switch.peerStore[ConnectionBook][peers[5].peerId] = Connected
+ pm.switch.peerStore[ConnectionBook][peers[8].peerId] = Connected
+ pm.switch.peerStore[ConnectionBook][peers[15].peerId] = Connected
+ pm.switch.peerStore[ConnectionBook][peers[18].peerId] = Connected
+ pm.switch.peerStore[ConnectionBook][peers[24].peerId] = Connected
+ pm.switch.peerStore[ConnectionBook][peers[29].peerId] = Connected
# Prune the peerstore (current=30, target=25)
pm.prunePeerStore()
check:
# ensure peerstore was pruned
- pm.wakuPeerStore.peers.len == 25
+ pm.switch.peerStore.peers.len == 25
# ensure connected peers were not pruned
- pm.wakuPeerStore.peers.anyIt(it.peerId == peers[5].peerId)
- pm.wakuPeerStore.peers.anyIt(it.peerId == peers[8].peerId)
- pm.wakuPeerStore.peers.anyIt(it.peerId == peers[15].peerId)
- pm.wakuPeerStore.peers.anyIt(it.peerId == peers[18].peerId)
- pm.wakuPeerStore.peers.anyIt(it.peerId == peers[24].peerId)
- pm.wakuPeerStore.peers.anyIt(it.peerId == peers[29].peerId)
+ pm.switch.peerStore.peers.anyIt(it.peerId == peers[5].peerId)
+ pm.switch.peerStore.peers.anyIt(it.peerId == peers[8].peerId)
+ pm.switch.peerStore.peers.anyIt(it.peerId == peers[15].peerId)
+ pm.switch.peerStore.peers.anyIt(it.peerId == peers[18].peerId)
+ pm.switch.peerStore.peers.anyIt(it.peerId == peers[24].peerId)
+ pm.switch.peerStore.peers.anyIt(it.peerId == peers[29].peerId)
# ensure peers that failed were the first to be pruned
- not pm.wakuPeerStore.peers.anyIt(it.peerId == peers[0].peerId)
- not pm.wakuPeerStore.peers.anyIt(it.peerId == peers[1].peerId)
- not pm.wakuPeerStore.peers.anyIt(it.peerId == peers[2].peerId)
- not pm.wakuPeerStore.peers.anyIt(it.peerId == peers[3].peerId)
- not pm.wakuPeerStore.peers.anyIt(it.peerId == peers[4].peerId)
+ not pm.switch.peerStore.peers.anyIt(it.peerId == peers[0].peerId)
+ not pm.switch.peerStore.peers.anyIt(it.peerId == peers[1].peerId)
+ not pm.switch.peerStore.peers.anyIt(it.peerId == peers[2].peerId)
+ not pm.switch.peerStore.peers.anyIt(it.peerId == peers[3].peerId)
+ not pm.switch.peerStore.peers.anyIt(it.peerId == peers[4].peerId)
asyncTest "canBeConnected() returns correct value":
let pm = PeerManager.new(
@@ -1061,8 +1082,8 @@ procSuite "Peer Manager":
pm.canBeConnected(p1) == true
# peer with ONE error that just failed
- pm.wakuPeerStore[NumberFailedConnBook][p1] = 1
- pm.wakuPeerStore[LastFailedConnBook][p1] = Moment.init(getTime().toUnix, Second)
+ pm.switch.peerStore[NumberFailedConnBook][p1] = 1
+ pm.switch.peerStore[LastFailedConnBook][p1] = Moment.init(getTime().toUnix, Second)
# we cant connect right now
check:
pm.canBeConnected(p1) == false
@@ -1073,8 +1094,8 @@ procSuite "Peer Manager":
pm.canBeConnected(p1) == true
# peer with TWO errors, we can connect until 2 seconds have passed
- pm.wakuPeerStore[NumberFailedConnBook][p1] = 2
- pm.wakuPeerStore[LastFailedConnBook][p1] = Moment.init(getTime().toUnix, Second)
+ pm.switch.peerStore[NumberFailedConnBook][p1] = 2
+ pm.switch.peerStore[LastFailedConnBook][p1] = Moment.init(getTime().toUnix, Second)
# cant be connected after 1 second
await sleepAsync(chronos.milliseconds(1000))
@@ -1171,6 +1192,23 @@ procSuite "Peer Manager":
check:
nodes[0].peerManager.ipTable["127.0.0.1"].len == 1
nodes[0].peerManager.switch.connManager.getConnections().len == 1
- nodes[0].peerManager.wakuPeerStore.peers().len == 1
+ nodes[0].peerManager.switch.peerStore.peers().len == 1
await allFutures(nodes.mapIt(it.stop()))
+
+ asyncTest "Retrieve peer that mounted peer exchange":
+ let
+ node1 = newTestWakuNode(generateSecp256k1Key(), getPrimaryIPAddr(), Port(55048))
+ node2 = newTestWakuNode(generateSecp256k1Key(), getPrimaryIPAddr(), Port(55023))
+
+ await allFutures(node1.start(), node2.start())
+ await allFutures(node1.mountRelay(), node2.mountRelay())
+ await allFutures(node1.mountPeerExchange(), node2.mountPeerExchange())
+
+ await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
+
+ var r = node1.peerManager.selectPeer(WakuRelayCodec)
+ assert r.isSome(), "could not retrieve peer mounting WakuRelayCodec"
+
+ r = node1.peerManager.selectPeer(WakuPeerExchangeCodec)
+ assert r.isSome(), "could not retrieve peer mounting WakuPeerExchangeCodec"
diff --git a/tests/test_peer_store_extended.nim b/tests/test_peer_store_extended.nim
index ef03fc69a..16926c7c2 100644
--- a/tests/test_peer_store_extended.nim
+++ b/tests/test_peer_store_extended.nim
@@ -9,12 +9,8 @@ import
libp2p/multiaddress,
testutils/unittests
import
- waku/[
- node/peer_manager/peer_manager,
- node/peer_manager/waku_peer_store,
- waku_node,
- waku_core/peers,
- ],
+ waku/
+ [node/peer_manager/peer_manager, node/peer_manager/waku_peer_store, waku_core/peers],
./testlib/wakucore
suite "Extended nim-libp2p Peer Store":
@@ -25,7 +21,7 @@ suite "Extended nim-libp2p Peer Store":
setup:
# Setup a nim-libp2p peerstore with some peers
- let peerStore = WakuPeerStore.new(nil, capacity = 50)
+ let peerStore = PeerStore.new(nil, capacity = 50)
var p1, p2, p3, p4, p5, p6: PeerId
# create five peers basePeerId + [1-5]
@@ -320,7 +316,7 @@ suite "Extended nim-libp2p Peer Store":
test "del() successfully deletes waku custom books":
# Given
- let peerStore = WakuPeerStore.new(nil, capacity = 5)
+ let peerStore = PeerStore.new(nil, capacity = 5)
var p1: PeerId
require p1.init("QmeuZJbXrszW2jdT7GdduSjQskPU3S7vvGWKtKgDfkDvW1")
diff --git a/tests/test_relay_peer_exchange.nim b/tests/test_relay_peer_exchange.nim
index 0be3c9193..a729ff1a7 100644
--- a/tests/test_relay_peer_exchange.nim
+++ b/tests/test_relay_peer_exchange.nim
@@ -4,10 +4,8 @@ import
std/[sequtils, options],
stew/shims/net,
testutils/unittests,
- chronicles,
chronos,
libp2p/peerid,
- libp2p/crypto/crypto,
libp2p/protocols/pubsub/gossipsub
import waku/waku_core, waku/waku_node, ./testlib/wakucore, ./testlib/wakunode
@@ -25,8 +23,10 @@ procSuite "Relay (GossipSub) Peer Exchange":
newTestWakuNode(node2Key, listenAddress, port, sendSignedPeerRecord = true)
# When both client and server mount relay without a handler
- await node1.mountRelay(@[DefaultRelayShard])
- await node2.mountRelay(@[DefaultRelayShard], none(RoutingRecordsHandler))
+ (await node1.mountRelay(@[DefaultRelayShard])).isOkOr:
+ assert false, "Failed to mount relay"
+ (await node2.mountRelay(@[DefaultRelayShard], none(RoutingRecordsHandler))).isOkOr:
+ assert false, "Failed to mount relay"
# Then the relays are mounted without a handler
check:
@@ -75,9 +75,12 @@ procSuite "Relay (GossipSub) Peer Exchange":
peerExchangeHandle: RoutingRecordsHandler = peerExchangeHandler
# Givem the nodes mount relay with a peer exchange handler
- await node1.mountRelay(@[DefaultRelayShard], some(emptyPeerExchangeHandle))
- await node2.mountRelay(@[DefaultRelayShard], some(emptyPeerExchangeHandle))
- await node3.mountRelay(@[DefaultRelayShard], some(peerExchangeHandle))
+ (await node1.mountRelay(@[DefaultRelayShard], some(emptyPeerExchangeHandle))).isOkOr:
+ assert false, "Failed to mount relay"
+ (await node2.mountRelay(@[DefaultRelayShard], some(emptyPeerExchangeHandle))).isOkOr:
+ assert false, "Failed to mount relay"
+ (await node3.mountRelay(@[DefaultRelayShard], some(peerExchangeHandle))).isOkOr:
+ assert false, "Failed to mount relay"
# Ensure that node1 prunes all peers after the first connection
node1.wakuRelay.parameters.dHigh = 1
diff --git a/tests/test_waku_dnsdisc.nim b/tests/test_waku_dnsdisc.nim
index 228fa5542..7028b20eb 100644
--- a/tests/test_waku_dnsdisc.nim
+++ b/tests/test_waku_dnsdisc.nim
@@ -3,7 +3,8 @@
import
std/[sequtils, tables],
stew/shims/net,
- stew/[base32, results],
+ results,
+ stew/base32,
testutils/unittests,
chronicles,
chronos,
@@ -36,9 +37,12 @@ suite "Waku DNS Discovery":
node3 = newTestWakuNode(nodeKey3, bindIp, Port(63503))
enr3 = node3.enr
- await node1.mountRelay()
- await node2.mountRelay()
- await node3.mountRelay()
+ (await node1.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
+ (await node2.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
+ (await node3.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
await allFutures([node1.start(), node2.start(), node3.start()])
# Build and sign tree
@@ -74,7 +78,8 @@ suite "Waku DNS Discovery":
nodeKey4 = generateSecp256k1Key()
node4 = newTestWakuNode(nodeKey4, bindIp, Port(63504))
- await node4.mountRelay()
+ (await node4.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
await node4.start()
var wakuDnsDisc = WakuDnsDiscovery.init(location, resolver).get()
@@ -94,20 +99,20 @@ suite "Waku DNS Discovery":
check:
# We have successfully connected to all discovered nodes
- node4.peerManager.wakuPeerStore.peers().anyIt(
+ node4.peerManager.switch.peerStore.peers().anyIt(
it.peerId == node1.switch.peerInfo.peerId
)
- node4.peerManager.wakuPeerStore.connectedness(node1.switch.peerInfo.peerId) ==
+ node4.peerManager.switch.peerStore.connectedness(node1.switch.peerInfo.peerId) ==
Connected
- node4.peerManager.wakuPeerStore.peers().anyIt(
+ node4.peerManager.switch.peerStore.peers().anyIt(
it.peerId == node2.switch.peerInfo.peerId
)
- node4.peerManager.wakuPeerStore.connectedness(node2.switch.peerInfo.peerId) ==
+ node4.peerManager.switch.peerStore.connectedness(node2.switch.peerInfo.peerId) ==
Connected
- node4.peerManager.wakuPeerStore.peers().anyIt(
+ node4.peerManager.switch.peerStore.peers().anyIt(
it.peerId == node3.switch.peerInfo.peerId
)
- node4.peerManager.wakuPeerStore.connectedness(node3.switch.peerInfo.peerId) ==
+ node4.peerManager.switch.peerStore.connectedness(node3.switch.peerInfo.peerId) ==
Connected
await allFutures([node1.stop(), node2.stop(), node3.stop(), node4.stop()])
diff --git a/tests/test_waku_enr.nim b/tests/test_waku_enr.nim
index b6571b09f..2ffff5e57 100644
--- a/tests/test_waku_enr.nim
+++ b/tests/test_waku_enr.nim
@@ -1,6 +1,6 @@
{.used.}
-import std/[options, sequtils], stew/results, testutils/unittests
+import std/[options, sequtils], results, testutils/unittests
import waku/waku_core, waku/waku_enr, ./testlib/wakucore
suite "Waku ENR - Capabilities bitfield":
diff --git a/tests/test_waku_keepalive.nim b/tests/test_waku_keepalive.nim
index aebee13dc..d4d05ad97 100644
--- a/tests/test_waku_keepalive.nim
+++ b/tests/test_waku_keepalive.nim
@@ -1,7 +1,6 @@
{.used.}
import
- std/options,
stew/shims/net as stewNet,
testutils/unittests,
chronos,
@@ -32,11 +31,13 @@ suite "Waku Keepalive":
completionFut.complete(true)
await node1.start()
- await node1.mountRelay()
+ (await node1.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
await node1.mountLibp2pPing()
await node2.start()
- await node2.mountRelay()
+ (await node2.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
let pingProto = Ping.new(handler = pingHandler)
await pingProto.start()
diff --git a/tests/test_waku_netconfig.nim b/tests/test_waku_netconfig.nim
index 4d5a2df7d..d2c9cc780 100644
--- a/tests/test_waku_netconfig.nim
+++ b/tests/test_waku_netconfig.nim
@@ -4,7 +4,7 @@ import chronos, confutils/toml/std/net, libp2p/multiaddress, testutils/unittests
import ./testlib/wakunode, waku/waku_enr/capabilities
-include waku/node/config
+include waku/node/net_config
proc defaultTestWakuFlags(): CapabilitiesBitfield =
CapabilitiesBitfield.init(
@@ -13,19 +13,27 @@ proc defaultTestWakuFlags(): CapabilitiesBitfield =
suite "Waku NetConfig":
asyncTest "Create NetConfig with default values":
- let conf = defaultTestWakuNodeConf()
+ let conf = defaultTestWakuConf()
let wakuFlags = defaultTestWakuFlags()
let netConfigRes = NetConfig.init(
- bindIp = conf.listenAddress,
- bindPort = conf.tcpPort,
+ bindIp = conf.networkConf.p2pListenAddress,
+ bindPort = conf.networkConf.p2pTcpPort,
extIp = none(IpAddress),
extPort = none(Port),
extMultiAddrs = @[],
- wsBindPort = conf.websocketPort,
- wsEnabled = conf.websocketSupport,
- wssEnabled = conf.websocketSecureSupport,
+ wsBindPort =
+ if conf.webSocketConf.isSome():
+ some(conf.webSocketConf.get().port)
+ else:
+ none(Port),
+ wsEnabled = conf.webSocketConf.isSome(),
+ wssEnabled =
+ if conf.webSocketConf.isSome():
+ conf.webSocketConf.get().secureConf.isSome()
+ else:
+ false,
dns4DomainName = none(string),
discv5UdpPort = none(Port),
wakuFlags = some(wakuFlags),
@@ -35,10 +43,11 @@ suite "Waku NetConfig":
netConfigRes.isOk()
asyncTest "AnnouncedAddresses contains only bind address when no external addresses are provided":
- let conf = defaultTestWakuNodeConf()
+ let conf = defaultTestWakuConf()
- let netConfigRes =
- NetConfig.init(bindIp = conf.listenAddress, bindPort = conf.tcpPort)
+ let netConfigRes = NetConfig.init(
+ bindIp = conf.networkConf.p2pListenAddress, bindPort = conf.networkConf.p2pTcpPort
+ )
assert netConfigRes.isOk(), $netConfigRes.error
@@ -47,17 +56,19 @@ suite "Waku NetConfig":
check:
netConfig.announcedAddresses.len == 1 # Only bind address should be present
netConfig.announcedAddresses[0] ==
- formatListenAddress(ip4TcpEndPoint(conf.listenAddress, conf.tcpPort))
+ formatListenAddress(
+ ip4TcpEndPoint(conf.networkConf.p2pListenAddress, conf.networkConf.p2pTcpPort)
+ )
asyncTest "AnnouncedAddresses contains external address if extIp/Port are provided":
let
- conf = defaultTestWakuNodeConf()
+ conf = defaultTestWakuConf()
extIp = parseIpAddress("1.2.3.4")
extPort = Port(1234)
let netConfigRes = NetConfig.init(
- bindIp = conf.listenAddress,
- bindPort = conf.tcpPort,
+ bindIp = conf.networkConf.p2pListenAddress,
+ bindPort = conf.networkConf.p2pTcpPort,
extIp = some(extIp),
extPort = some(extPort),
)
@@ -72,13 +83,13 @@ suite "Waku NetConfig":
asyncTest "AnnouncedAddresses contains dns4DomainName if provided":
let
- conf = defaultTestWakuNodeConf()
+ conf = defaultTestWakuConf()
dns4DomainName = "example.com"
extPort = Port(1234)
let netConfigRes = NetConfig.init(
- bindIp = conf.listenAddress,
- bindPort = conf.tcpPort,
+ bindIp = conf.networkConf.p2pListenAddress,
+ bindPort = conf.networkConf.p2pTcpPort,
dns4DomainName = some(dns4DomainName),
extPort = some(extPort),
)
@@ -93,14 +104,14 @@ suite "Waku NetConfig":
asyncTest "AnnouncedAddresses includes extMultiAddrs when provided":
let
- conf = defaultTestWakuNodeConf()
+ conf = defaultTestWakuConf()
extIp = parseIpAddress("1.2.3.4")
extPort = Port(1234)
extMultiAddrs = @[ip4TcpEndPoint(extIp, extPort)]
let netConfigRes = NetConfig.init(
- bindIp = conf.listenAddress,
- bindPort = conf.tcpPort,
+ bindIp = conf.networkConf.p2pListenAddress,
+ bindPort = conf.networkConf.p2pTcpPort,
extMultiAddrs = extMultiAddrs,
)
@@ -114,14 +125,14 @@ suite "Waku NetConfig":
asyncTest "AnnouncedAddresses uses dns4DomainName over extIp when both are provided":
let
- conf = defaultTestWakuNodeConf()
+ conf = defaultTestWakuConf()
dns4DomainName = "example.com"
extIp = parseIpAddress("1.2.3.4")
extPort = Port(1234)
let netConfigRes = NetConfig.init(
- bindIp = conf.listenAddress,
- bindPort = conf.tcpPort,
+ bindIp = conf.networkConf.p2pListenAddress,
+ bindPort = conf.networkConf.p2pTcpPort,
dns4DomainName = some(dns4DomainName),
extIp = some(extIp),
extPort = some(extPort),
@@ -137,12 +148,12 @@ suite "Waku NetConfig":
asyncTest "AnnouncedAddresses includes WebSocket addresses when enabled":
var
- conf = defaultTestWakuNodeConf()
+ conf = defaultTestWakuConf()
wssEnabled = false
var netConfigRes = NetConfig.init(
- bindIp = conf.listenAddress,
- bindPort = conf.tcpPort,
+ bindIp = conf.networkConf.p2pListenAddress,
+ bindPort = conf.networkConf.p2pTcpPort,
wsEnabled = true,
wssEnabled = wssEnabled,
)
@@ -153,16 +164,18 @@ suite "Waku NetConfig":
check:
netConfig.announcedAddresses.len == 2 # Bind address + wsHostAddress
- netConfig.announcedAddresses[1] ==
- (ip4TcpEndPoint(conf.listenAddress, conf.websocketPort) & wsFlag(wssEnabled))
+ netConfig.announcedAddresses[1] == (
+ ip4TcpEndPoint(conf.networkConf.p2pListenAddress, conf.webSocketConf.get().port) &
+ wsFlag(wssEnabled)
+ )
## Now try the same for the case of wssEnabled = true
wssEnabled = true
netConfigRes = NetConfig.init(
- bindIp = conf.listenAddress,
- bindPort = conf.tcpPort,
+ bindIp = conf.networkConf.p2pListenAddress,
+ bindPort = conf.networkConf.p2pTcpPort,
wsEnabled = true,
wssEnabled = wssEnabled,
)
@@ -173,19 +186,21 @@ suite "Waku NetConfig":
check:
netConfig.announcedAddresses.len == 2 # Bind address + wsHostAddress
- netConfig.announcedAddresses[1] ==
- (ip4TcpEndPoint(conf.listenAddress, conf.websocketPort) & wsFlag(wssEnabled))
+ netConfig.announcedAddresses[1] == (
+ ip4TcpEndPoint(conf.networkConf.p2pListenAddress, conf.websocketConf.get().port) &
+ wsFlag(wssEnabled)
+ )
asyncTest "Announced WebSocket address contains external IP if provided":
let
- conf = defaultTestWakuNodeConf()
+ conf = defaultTestWakuConf()
extIp = parseIpAddress("1.2.3.4")
extPort = Port(1234)
wssEnabled = false
let netConfigRes = NetConfig.init(
- bindIp = conf.listenAddress,
- bindPort = conf.tcpPort,
+ bindIp = conf.networkConf.p2pListenAddress,
+ bindPort = conf.networkConf.p2pTcpPort,
extIp = some(extIp),
extPort = some(extPort),
wsEnabled = true,
@@ -199,18 +214,18 @@ suite "Waku NetConfig":
check:
netConfig.announcedAddresses.len == 2 # External address + wsHostAddress
netConfig.announcedAddresses[1] ==
- (ip4TcpEndPoint(extIp, conf.websocketPort) & wsFlag(wssEnabled))
+ (ip4TcpEndPoint(extIp, conf.websocketConf.get().port) & wsFlag(wssEnabled))
asyncTest "Announced WebSocket address contains dns4DomainName if provided":
let
- conf = defaultTestWakuNodeConf()
+ conf = defaultTestWakuConf()
dns4DomainName = "example.com"
extPort = Port(1234)
wssEnabled = false
let netConfigRes = NetConfig.init(
- bindIp = conf.listenAddress,
- bindPort = conf.tcpPort,
+ bindIp = conf.networkConf.p2pListenAddress,
+ bindPort = conf.networkConf.p2pTcpPort,
dns4DomainName = some(dns4DomainName),
extPort = some(extPort),
wsEnabled = true,
@@ -223,20 +238,22 @@ suite "Waku NetConfig":
check:
netConfig.announcedAddresses.len == 2 # Bind address + wsHostAddress
- netConfig.announcedAddresses[1] ==
- (dns4TcpEndPoint(dns4DomainName, conf.websocketPort) & wsFlag(wssEnabled))
+ netConfig.announcedAddresses[1] == (
+ dns4TcpEndPoint(dns4DomainName, conf.webSocketConf.get().port) &
+ wsFlag(wssEnabled)
+ )
asyncTest "Announced WebSocket address contains dns4DomainName if provided alongside extIp":
let
- conf = defaultTestWakuNodeConf()
+ conf = defaultTestWakuConf()
dns4DomainName = "example.com"
extIp = parseIpAddress("1.2.3.4")
extPort = Port(1234)
wssEnabled = false
let netConfigRes = NetConfig.init(
- bindIp = conf.listenAddress,
- bindPort = conf.tcpPort,
+ bindIp = conf.networkConf.p2pListenAddress,
+ bindPort = conf.networkConf.p2pTcpPort,
dns4DomainName = some(dns4DomainName),
extIp = some(extIp),
extPort = some(extPort),
@@ -251,32 +268,35 @@ suite "Waku NetConfig":
check:
netConfig.announcedAddresses.len == 2 # DNS address + wsHostAddress
netConfig.announcedAddresses[0] == dns4TcpEndPoint(dns4DomainName, extPort)
- netConfig.announcedAddresses[1] ==
- (dns4TcpEndPoint(dns4DomainName, conf.websocketPort) & wsFlag(wssEnabled))
+ netConfig.announcedAddresses[1] == (
+ dns4TcpEndPoint(dns4DomainName, conf.webSocketConf.get().port) &
+ wsFlag(wssEnabled)
+ )
asyncTest "ENR is set with bindIp/Port if no extIp/Port are provided":
- let conf = defaultTestWakuNodeConf()
+ let conf = defaultTestWakuConf()
- let netConfigRes =
- NetConfig.init(bindIp = conf.listenAddress, bindPort = conf.tcpPort)
+ let netConfigRes = NetConfig.init(
+ bindIp = conf.networkConf.p2pListenAddress, bindPort = conf.networkConf.p2pTcpPort
+ )
assert netConfigRes.isOk(), $netConfigRes.error
let netConfig = netConfigRes.get()
check:
- netConfig.enrIp.get() == conf.listenAddress
- netConfig.enrPort.get() == conf.tcpPort
+ netConfig.enrIp.get() == conf.networkConf.p2pListenAddress
+ netConfig.enrPort.get() == conf.networkConf.p2pTcpPort
asyncTest "ENR is set with extIp/Port if provided":
let
- conf = defaultTestWakuNodeConf()
+ conf = defaultTestWakuConf()
extIp = parseIpAddress("1.2.3.4")
extPort = Port(1234)
let netConfigRes = NetConfig.init(
- bindIp = conf.listenAddress,
- bindPort = conf.tcpPort,
+ bindIp = conf.networkConf.p2pListenAddress,
+ bindPort = conf.networkConf.p2pTcpPort,
extIp = some(extIp),
extPort = some(extPort),
)
@@ -291,13 +311,13 @@ suite "Waku NetConfig":
asyncTest "ENR is set with dns4DomainName if provided":
let
- conf = defaultTestWakuNodeConf()
+ conf = defaultTestWakuConf()
dns4DomainName = "example.com"
extPort = Port(1234)
let netConfigRes = NetConfig.init(
- bindIp = conf.listenAddress,
- bindPort = conf.tcpPort,
+ bindIp = conf.networkConf.p2pListenAddress,
+ bindPort = conf.networkConf.p2pTcpPort,
dns4DomainName = some(dns4DomainName),
extPort = some(extPort),
)
@@ -311,7 +331,7 @@ suite "Waku NetConfig":
asyncTest "wsHostAddress is not announced if a WS/WSS address is provided in extMultiAddrs":
var
- conf = defaultTestWakuNodeConf()
+ conf = defaultTestWakuConf()
extAddIp = parseIpAddress("1.2.3.4")
extAddPort = Port(1234)
wsEnabled = true
@@ -319,8 +339,8 @@ suite "Waku NetConfig":
extMultiAddrs = @[(ip4TcpEndPoint(extAddIp, extAddPort) & wsFlag(wssEnabled))]
var netConfigRes = NetConfig.init(
- bindIp = conf.listenAddress,
- bindPort = conf.tcpPort,
+ bindIp = conf.networkConf.p2pListenAddress,
+ bindPort = conf.networkConf.p2pTcpPort,
extMultiAddrs = extMultiAddrs,
wsEnabled = wsEnabled,
)
@@ -338,8 +358,8 @@ suite "Waku NetConfig":
extMultiAddrs = @[(ip4TcpEndPoint(extAddIp, extAddPort) & wsFlag(wssEnabled))]
netConfigRes = NetConfig.init(
- bindIp = conf.listenAddress,
- bindPort = conf.tcpPort,
+ bindIp = conf.networkConf.p2pListenAddress,
+ bindPort = conf.networkConf.p2pTcpPort,
extMultiAddrs = extMultiAddrs,
wssEnabled = wssEnabled,
)
@@ -354,14 +374,14 @@ suite "Waku NetConfig":
asyncTest "Only extMultiAddrs are published when enabling extMultiAddrsOnly flag":
let
- conf = defaultTestWakuNodeConf()
+ conf = defaultTestWakuConf()
extAddIp = parseIpAddress("1.2.3.4")
extAddPort = Port(1234)
extMultiAddrs = @[ip4TcpEndPoint(extAddIp, extAddPort)]
let netConfigRes = NetConfig.init(
- bindIp = conf.listenAddress,
- bindPort = conf.tcpPort,
+ bindIp = conf.networkConf.p2pListenAddress,
+ bindPort = conf.networkConf.p2pTcpPort,
extMultiAddrs = extMultiAddrs,
extMultiAddrsOnly = true,
)
diff --git a/tests/test_waku_noise_sessions.nim b/tests/test_waku_noise_sessions.nim
index a02407e63..543653982 100644
--- a/tests/test_waku_noise_sessions.nim
+++ b/tests/test_waku_noise_sessions.nim
@@ -1,6 +1,6 @@
{.used.}
-import std/tables, stew/[results, byteutils], testutils/unittests
+import std/tables, results, stew/byteutils, testutils/unittests
import
waku/[
common/protobuf,
diff --git a/tests/test_wakunode.nim b/tests/test_wakunode.nim
index 2213b7f8e..51dd999b0 100644
--- a/tests/test_wakunode.nim
+++ b/tests/test_wakunode.nim
@@ -17,9 +17,7 @@ import
libp2p/nameresolving/mockresolver,
eth/p2p/discoveryv5/enr
import
- waku/[waku_core, waku_node, node/peer_manager, waku_relay, waku_peer_exchange],
- ./testlib/wakucore,
- ./testlib/wakunode
+ waku/[waku_core, waku_node, node/peer_manager], ./testlib/wakucore, ./testlib/wakunode
suite "WakuNode":
asyncTest "Protocol matcher works as expected":
@@ -36,13 +34,15 @@ suite "WakuNode":
# Setup node 1 with stable codec "/vac/waku/relay/2.0.0"
await node1.start()
- await node1.mountRelay(@[shard])
+ (await node1.mountRelay(@[shard])).isOkOr:
+ assert false, "Failed to mount relay"
node1.wakuRelay.codec = "/vac/waku/relay/2.0.0"
# Setup node 2 with beta codec "/vac/waku/relay/2.0.0-beta2"
await node2.start()
- await node2.mountRelay(@[shard])
+ (await node2.mountRelay(@[shard])).isOkOr:
+ assert false, "Failed to mount relay"
node2.wakuRelay.codec = "/vac/waku/relay/2.0.0-beta2"
check:
@@ -63,7 +63,14 @@ suite "WakuNode":
msg.payload == payload
completionFut.complete(true)
- node2.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler))
+ ## The following unsubscription is necessary to remove the default relay handler, which is
+ ## added when mountRelay is called.
+ node2.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr:
+ assert false, "Failed to unsubscribe from topic: " & $error
+
+ ## Subscribe to the relay topic to add the custom relay handler defined above
+ node2.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr:
+ assert false, "Failed to subscribe to topic"
await sleepAsync(2000.millis)
var res = await node1.publish(some($shard), message)
@@ -94,8 +101,10 @@ suite "WakuNode":
node2PeerId = $(node2.switch.peerInfo.peerId)
node2Dns4Addr = "/dns4/localhost/tcp/61022/p2p/" & node2PeerId
- await node1.mountRelay()
- await node2.mountRelay()
+ (await node1.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
+ (await node2.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
await allFutures([node1.start(), node2.start()])
@@ -119,7 +128,8 @@ suite "WakuNode":
# Initialize and start node1
await node1.start()
- await node1.mountRelay()
+ (await node1.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
# Create an array to hold the other nodes
var otherNodes: seq[WakuNode] = @[]
@@ -131,7 +141,8 @@ suite "WakuNode":
port = 60012 + i * 2 # Ensure unique ports for each node
node = newTestWakuNode(nodeKey, parseIpAddress("127.0.0.1"), Port(port))
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
otherNodes.add(node)
# Connect all other nodes to node1
@@ -298,10 +309,12 @@ suite "WakuNode":
node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(61016))
await node1.start()
- await node1.mountRelay()
+ (await node1.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
await node2.start()
- await node2.mountRelay()
+ (await node2.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
await node2.connectToNodes(@[node1.switch.peerInfo.toRemotePeerInfo()])
@@ -339,10 +352,12 @@ suite "WakuNode":
node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(61020))
await node1.start()
- await node1.mountRelay()
+ (await node1.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
await node2.start()
- await node2.mountRelay()
+ (await node2.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
await node2.connectToNodes(@[node1.switch.peerInfo.toRemotePeerInfo()])
diff --git a/tests/testlib/simple_mock.nim b/tests/testlib/simple_mock.nim
index 234647d33..91ec19261 100644
--- a/tests/testlib/simple_mock.nim
+++ b/tests/testlib/simple_mock.nim
@@ -9,9 +9,9 @@ type Instr {.union.} = object
proc mockImpl*(target, replacement: pointer) =
# YOLO who needs alignment
#doAssert (cast[ByteAddress](target) and ByteAddress(0x07)) == 0
- var page = cast[pointer](cast[ByteAddress](target) and (not 0xfff))
+ var page = cast[pointer](cast[uint](target) and (not 0xfff))
doAssert mprotect(page, 4096, PROT_WRITE or PROT_EXEC) == 0
- let rel = cast[ByteAddress](replacement) - cast[ByteAddress](target) - 5
+ let rel = cast[uint](replacement) - cast[uint](target) - 5
var instr = Instr(
bytes: [
0xe9.byte,
diff --git a/tests/testlib/wakucore.nim b/tests/testlib/wakucore.nim
index d18a87e7d..c5e16d03a 100644
--- a/tests/testlib/wakucore.nim
+++ b/tests/testlib/wakucore.nim
@@ -1,7 +1,7 @@
import
std/[options, times],
- stew/[results, byteutils],
- stew/shims/net,
+ results,
+ stew/byteutils,
chronos,
libp2p/switch,
libp2p/builders,
diff --git a/tests/testlib/wakunode.nim b/tests/testlib/wakunode.nim
index 1c9b8ec83..87fdbcf5f 100644
--- a/tests/testlib/wakunode.nim
+++ b/tests/testlib/wakunode.nim
@@ -1,6 +1,6 @@
import
std/options,
- stew/results,
+ results,
stew/shims/net,
chronos,
libp2p/switch,
@@ -15,35 +15,41 @@ import
node/peer_manager,
waku_enr,
discovery/waku_discv5,
- factory/external_config,
factory/internal_config,
+ factory/waku_conf,
+ factory/conf_builder/conf_builder,
factory/builder,
],
./common
# Waku node
-proc defaultTestWakuNodeConf*(): WakuNodeConf =
- ## set cluster-id == 0 to not use TWN as that needs a background blockchain (e.g. anvil)
- ## running because RLN is mounted if TWN (cluster-id == 1) is configured.
- WakuNodeConf(
- cmd: noCommand,
- tcpPort: Port(60000),
- websocketPort: Port(8000),
- listenAddress: parseIpAddress("0.0.0.0"),
- restAddress: parseIpAddress("127.0.0.1"),
- metricsServerAddress: parseIpAddress("127.0.0.1"),
- dnsAddrsNameServers: @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")],
- nat: "any",
- maxConnections: 50,
- relayServiceRatio: "60:40",
- maxMessageSize: "1024 KiB",
- clusterId: DefaultClusterId,
- shards: @[DefaultShardId],
- relay: true,
- rendezvous: true,
- storeMessageDbUrl: "sqlite://store.sqlite3",
+# TODO: migrate to usage of a test cluster conf
+proc defaultTestWakuConfBuilder*(): WakuConfBuilder =
+ var builder = WakuConfBuilder.init()
+ builder.withP2pTcpPort(Port(60000))
+ builder.withP2pListenAddress(parseIpAddress("0.0.0.0"))
+ builder.restServerConf.withListenAddress(parseIpAddress("127.0.0.1"))
+ builder.withDnsAddrsNameServers(
+ @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")]
)
+ builder.withNatStrategy("any")
+ builder.withMaxConnections(50)
+ builder.withRelayServiceRatio("60:40")
+ builder.withMaxMessageSize("1024 KiB")
+ builder.withClusterId(DefaultClusterId)
+ builder.withShards(@[DefaultShardId])
+ builder.withRelay(true)
+ builder.withRendezvous(true)
+ builder.storeServiceConf.withDbMigration(false)
+ builder.storeServiceConf.withSupportV2(false)
+ builder.webSocketConf.withWebSocketPort(Port(8000))
+ builder.webSocketConf.withEnabled(true)
+ return builder
+
+proc defaultTestWakuConf*(): WakuConf =
+ var builder = defaultTestWakuConfBuilder()
+ return builder.build().value
proc newTestWakuNode*(
nodeKey: crypto.PrivateKey,
@@ -78,31 +84,31 @@ proc newTestWakuNode*(
else:
extPort
- var conf = defaultTestWakuNodeConf()
+ var conf = defaultTestWakuConf()
conf.clusterId = clusterId
conf.shards = shards
if dns4DomainName.isSome() and extIp.isNone():
# If there's an error resolving the IP, an exception is thrown and test fails
- let dns = (waitFor dnsResolve(dns4DomainName.get(), conf)).valueOr:
+ let dns = (waitFor dnsResolve(dns4DomainName.get(), conf.dnsAddrsNameServers)).valueOr:
raise newException(Defect, error)
resolvedExtIp = some(parseIpAddress(dns))
let netConf = NetConfig.init(
- bindIp = bindIp,
clusterId = conf.clusterId,
+ bindIp = bindIp,
bindPort = bindPort,
extIp = resolvedExtIp,
extPort = extPort,
extMultiAddrs = extMultiAddrs,
- wsBindPort = wsBindPort,
+ wsBindPort = some(wsBindPort),
wsEnabled = wsEnabled,
wssEnabled = wssEnabled,
- wakuFlags = wakuFlags,
dns4DomainName = dns4DomainName,
discv5UdpPort = discv5UdpPort,
+ wakuFlags = wakuFlags,
).valueOr:
raise newException(Defect, "Invalid network configuration: " & error)
diff --git a/tests/waku_archive/archive_utils.nim b/tests/waku_archive/archive_utils.nim
index 48e23f41d..498855075 100644
--- a/tests/waku_archive/archive_utils.nim
+++ b/tests/waku_archive/archive_utils.nim
@@ -1,13 +1,12 @@
{.used.}
-import std/options, stew/results, chronos, libp2p/crypto/crypto
+import std/options, results, chronos, libp2p/crypto/crypto
import
waku/[
node/peer_manager,
waku_core,
waku_archive,
- waku_archive/common,
waku_archive/driver/sqlite_driver,
waku_archive/driver/sqlite_driver/migrations,
common/databases/db_sqlite,
diff --git a/tests/waku_archive/test_driver_queue.nim b/tests/waku_archive/test_driver_queue.nim
index 16c0163c7..584ea9d7e 100644
--- a/tests/waku_archive/test_driver_queue.nim
+++ b/tests/waku_archive/test_driver_queue.nim
@@ -1,6 +1,6 @@
{.used.}
-import std/options, stew/results, testutils/unittests
+import std/options, results, testutils/unittests
import
waku/[
waku_archive,
diff --git a/tests/waku_archive/test_driver_queue_index.nim b/tests/waku_archive/test_driver_queue_index.nim
index c383a676c..f34e181af 100644
--- a/tests/waku_archive/test_driver_queue_index.nim
+++ b/tests/waku_archive/test_driver_queue_index.nim
@@ -1,6 +1,6 @@
{.used.}
-import std/[times, random], stew/byteutils, testutils/unittests, nimcrypto
+import std/random, testutils/unittests
import waku/waku_core, waku/waku_archive/driver/queue_driver/index
var rng = initRand()
diff --git a/tests/waku_archive/test_driver_queue_pagination.nim b/tests/waku_archive/test_driver_queue_pagination.nim
index dec3ccdee..45543c570 100644
--- a/tests/waku_archive/test_driver_queue_pagination.nim
+++ b/tests/waku_archive/test_driver_queue_pagination.nim
@@ -9,7 +9,6 @@ import
waku_archive/driver/queue_driver/index,
waku_core,
],
- ../testlib/common,
../testlib/wakucore
proc getTestQueueDriver(numMessages: int): QueueDriver =
diff --git a/tests/waku_archive/test_driver_sqlite.nim b/tests/waku_archive/test_driver_sqlite.nim
index 3ceae595d..5809a8492 100644
--- a/tests/waku_archive/test_driver_sqlite.nim
+++ b/tests/waku_archive/test_driver_sqlite.nim
@@ -2,12 +2,7 @@
import std/sequtils, testutils/unittests, chronos
import
- waku/[
- common/databases/db_sqlite,
- waku_archive,
- waku_archive/driver/sqlite_driver,
- waku_core,
- ],
+ waku/[waku_archive, waku_archive/driver/sqlite_driver, waku_core],
../waku_archive/archive_utils,
../testlib/wakucore
diff --git a/tests/waku_archive/test_driver_sqlite_query.nim b/tests/waku_archive/test_driver_sqlite_query.nim
index fc00a3be8..327ae17bb 100644
--- a/tests/waku_archive/test_driver_sqlite_query.nim
+++ b/tests/waku_archive/test_driver_sqlite_query.nim
@@ -4,13 +4,7 @@ import
std/[options, sequtils, random, algorithm], testutils/unittests, chronos, chronicles
import
- waku/[
- common/databases/db_sqlite,
- waku_archive,
- waku_archive/driver/sqlite_driver,
- waku_core,
- waku_core/message/digest,
- ],
+ waku/[waku_archive, waku_core, waku_core/message/digest],
../testlib/common,
../testlib/wakucore,
../waku_archive/archive_utils
diff --git a/tests/waku_archive/test_retention_policy.nim b/tests/waku_archive/test_retention_policy.nim
index 4686dda7e..ea86e1d69 100644
--- a/tests/waku_archive/test_retention_policy.nim
+++ b/tests/waku_archive/test_retention_policy.nim
@@ -1,13 +1,11 @@
{.used.}
-import std/[sequtils, times], stew/results, testutils/unittests, chronos
+import std/[sequtils, times], results, testutils/unittests, chronos
import
waku/[
- common/databases/db_sqlite,
waku_core,
waku_core/message/digest,
waku_archive,
- waku_archive/driver/sqlite_driver,
waku_archive/retention_policy,
waku_archive/retention_policy/retention_policy_capacity,
waku_archive/retention_policy/retention_policy_size,
diff --git a/tests/waku_archive/test_waku_archive.nim b/tests/waku_archive/test_waku_archive.nim
index 9211b15e9..802473d64 100644
--- a/tests/waku_archive/test_waku_archive.nim
+++ b/tests/waku_archive/test_waku_archive.nim
@@ -4,12 +4,10 @@ import std/[options, sequtils], testutils/unittests, chronos, libp2p/crypto/cryp
import
waku/[
- common/databases/db_sqlite,
common/databases/db_postgres/dbconn,
common/paging,
waku_core,
waku_core/message/digest,
- waku_archive/driver/sqlite_driver,
waku_archive,
],
../waku_archive/archive_utils,
diff --git a/tests/waku_archive_legacy/test_driver_queue.nim b/tests/waku_archive_legacy/test_driver_queue.nim
index c69e5aa6a..aec9ad65d 100644
--- a/tests/waku_archive_legacy/test_driver_queue.nim
+++ b/tests/waku_archive_legacy/test_driver_queue.nim
@@ -1,6 +1,6 @@
{.used.}
-import std/options, stew/results, testutils/unittests
+import std/options, results, testutils/unittests
import
waku/waku_archive_legacy,
waku/waku_archive_legacy/driver/queue_driver/queue_driver {.all.},
diff --git a/tests/waku_archive_legacy/test_driver_sqlite.nim b/tests/waku_archive_legacy/test_driver_sqlite.nim
index af043116f..9d8c4d14b 100644
--- a/tests/waku_archive_legacy/test_driver_sqlite.nim
+++ b/tests/waku_archive_legacy/test_driver_sqlite.nim
@@ -2,12 +2,10 @@
import std/sequtils, testutils/unittests, chronos
import
- waku/common/databases/db_sqlite,
waku/waku_archive_legacy,
waku/waku_archive_legacy/driver/sqlite_driver,
waku/waku_core,
../waku_archive_legacy/archive_utils,
- ../testlib/common,
../testlib/wakucore
suite "SQLite driver":
diff --git a/tests/waku_archive_legacy/test_driver_sqlite_query.nim b/tests/waku_archive_legacy/test_driver_sqlite_query.nim
index ecf88e7c0..42f394891 100644
--- a/tests/waku_archive_legacy/test_driver_sqlite_query.nim
+++ b/tests/waku_archive_legacy/test_driver_sqlite_query.nim
@@ -4,9 +4,7 @@ import
std/[options, sequtils, random, algorithm], testutils/unittests, chronos, chronicles
import
- waku/common/databases/db_sqlite,
waku/waku_archive_legacy,
- waku/waku_archive_legacy/driver/sqlite_driver,
waku/waku_core,
waku/waku_core/message/digest,
../testlib/common,
diff --git a/tests/waku_archive_legacy/test_waku_archive.nim b/tests/waku_archive_legacy/test_waku_archive.nim
index 181560a28..e58b2cfc9 100644
--- a/tests/waku_archive_legacy/test_waku_archive.nim
+++ b/tests/waku_archive_legacy/test_waku_archive.nim
@@ -1,21 +1,13 @@
{.used.}
-import
- std/[options, sequtils],
- testutils/unittests,
- chronicles,
- chronos,
- libp2p/crypto/crypto
+import std/[options, sequtils], testutils/unittests, chronos, libp2p/crypto/crypto
import
- waku/common/databases/db_sqlite,
waku/common/paging,
waku/waku_core,
waku/waku_core/message/digest,
- waku/waku_archive_legacy/driver/sqlite_driver,
waku/waku_archive_legacy,
../waku_archive_legacy/archive_utils,
- ../testlib/common,
../testlib/wakucore
suite "Waku Archive - message handling":
diff --git a/tests/waku_core/test_peers.nim b/tests/waku_core/test_peers.nim
index 3dc68fa1a..59ae2e2f3 100644
--- a/tests/waku_core/test_peers.nim
+++ b/tests/waku_core/test_peers.nim
@@ -1,7 +1,7 @@
{.used.}
import
- stew/results,
+ results,
testutils/unittests,
libp2p/multiaddress,
libp2p/peerid,
diff --git a/tests/waku_discv5/test_all.nim b/tests/waku_discv5/test_all.nim
deleted file mode 100644
index a6d2c22c4..000000000
--- a/tests/waku_discv5/test_all.nim
+++ /dev/null
@@ -1 +0,0 @@
-import ./test_waku_discv5
diff --git a/tests/waku_discv5/test_waku_discv5.nim b/tests/waku_discv5/test_waku_discv5.nim
index c4696d658..c5dd1c55e 100644
--- a/tests/waku_discv5/test_waku_discv5.nim
+++ b/tests/waku_discv5/test_waku_discv5.nim
@@ -1,24 +1,32 @@
{.used.}
import
- std/[sequtils, algorithm],
- stew/results,
- stew/shims/net,
+ std/[sequtils, algorithm, options, net],
+ results,
chronos,
chronicles,
testutils/unittests,
libp2p/crypto/crypto as libp2p_keys,
- eth/keys as eth_keys
+ eth/keys as eth_keys,
+ eth/p2p/discoveryv5/enr as ethEnr,
+ libp2p/crypto/secp,
+ libp2p/protocols/rendezvous
import
- waku/[waku_core/topics, waku_enr, discovery/waku_discv5, common/enr],
- ../testlib/[wakucore, testasync, assertions, futures, wakunode],
+ waku/[
+ waku_core/topics,
+ waku_core/codecs,
+ waku_enr,
+ discovery/waku_discv5,
+ waku_enr/capabilities,
+ factory/conf_builder/conf_builder,
+ factory/waku,
+ node/waku_node,
+ node/peer_manager,
+ ],
+ ../testlib/[wakucore, testasync, assertions, futures, wakunode, testutils],
../waku_enr/utils,
- ./utils
-
-import eth/p2p/discoveryv5/enr as ethEnr
-
-include waku/factory/waku
+ ./utils as discv5_utils
suite "Waku Discovery v5":
const validEnr =
@@ -53,7 +61,7 @@ suite "Waku Discovery v5":
var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum)
require builder.withWakuRelaySharding(shardsTopics).isOk()
- builder.withWakuCapabilities(Relay)
+ builder.withWakuCapabilities(Capabilities.Relay)
let recordRes = builder.build()
require recordRes.isOk()
@@ -73,7 +81,7 @@ suite "Waku Discovery v5":
var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum)
require builder.withWakuRelaySharding(shardsTopics).isOk()
- builder.withWakuCapabilities(Relay)
+ builder.withWakuCapabilities(Capabilities.Relay)
let recordRes = builder.build()
require recordRes.isOk()
@@ -93,7 +101,7 @@ suite "Waku Discovery v5":
var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum)
require builder.withWakuRelaySharding(shardsTopics).isOk()
- builder.withWakuCapabilities(Relay)
+ builder.withWakuCapabilities(Capabilities.Relay)
let recordRes = builder.build()
require recordRes.isOk()
@@ -187,7 +195,7 @@ suite "Waku Discovery v5":
indices = indices,
flags = recordFlags,
)
- node = newTestDiscv5(
+ node = discv5_utils.newTestDiscv5(
privKey = privKey,
bindIp = bindIp,
tcpPort = tcpPort,
@@ -298,7 +306,9 @@ suite "Waku Discovery v5":
# Cleanup
await allFutures(node1.stop(), node2.stop(), node3.stop(), node4.stop())
- asyncTest "find random peers with instance predicate":
+ xasyncTest "find random peers with instance predicate":
+ ## This is skipped because is flaky and made CI randomly fail but is useful to run manually
+
## Setup
# Records
let
@@ -342,7 +352,8 @@ suite "Waku Discovery v5":
let res4 = await node4.start()
assertResultOk res4
- await sleepAsync(FUTURE_TIMEOUT)
+ ## leave some time for discv5 to act
+ await sleepAsync(chronos.seconds(10))
## When
let peers = await node1.findRandomPeers()
@@ -355,7 +366,7 @@ suite "Waku Discovery v5":
# Cleanup
await allFutures(node1.stop(), node2.stop(), node3.stop(), node4.stop())
- suite "addBoostrapNode":
+ suite "addBootstrapNode":
asyncTest "address is valid":
# Given an empty list of enrs
var enrs: seq[Record] = @[]
@@ -407,40 +418,127 @@ suite "Waku Discovery v5":
enrs.len == 0
suite "waku discv5 initialization":
+ asyncTest "Start waku and check discv5 discovered peers":
+ let myRng = libp2p_keys.newRng()
+ var confBuilder = defaultTestWakuConfBuilder()
+
+ confBuilder.withNodeKey(libp2p_keys.PrivateKey.random(Secp256k1, myRng[])[])
+ confBuilder.discv5Conf.withEnabled(true)
+ confBuilder.discv5Conf.withUdpPort(9000.Port)
+
+ let conf = confBuilder.build().valueOr:
+ raiseAssert error
+
+ let waku0 = Waku.new(conf).valueOr:
+ raiseAssert error
+ (waitFor startWaku(addr waku0)).isOkOr:
+ raiseAssert error
+
+ confBuilder.withNodeKey(crypto.PrivateKey.random(Secp256k1, myRng[])[])
+ confBuilder.discv5Conf.withBootstrapNodes(@[waku0.node.enr.toURI()])
+ confBuilder.discv5Conf.withEnabled(true)
+ confBuilder.discv5Conf.withUdpPort(9001.Port)
+ confBuilder.withP2pTcpPort(60001.Port)
+ confBuilder.websocketConf.withEnabled(false)
+
+ let conf1 = confBuilder.build().valueOr:
+ raiseAssert error
+
+ let waku1 = Waku.new(conf1).valueOr:
+ raiseAssert error
+ (waitFor startWaku(addr waku1)).isOkOr:
+ raiseAssert error
+
+ await waku1.node.mountPeerExchange()
+ await waku1.node.mountRendezvous()
+
+ confBuilder.discv5Conf.withBootstrapNodes(@[waku1.node.enr.toURI()])
+ confBuilder.withP2pTcpPort(60003.Port)
+ confBuilder.discv5Conf.withUdpPort(9003.Port)
+ confBuilder.withNodeKey(crypto.PrivateKey.random(Secp256k1, myRng[])[])
+ confBuilder.websocketConf.withEnabled(false)
+
+ let conf2 = confBuilder.build().valueOr:
+ raiseAssert error
+
+ let waku2 = Waku.new(conf2).valueOr:
+ raiseAssert error
+ (waitFor startWaku(addr waku2)).isOkOr:
+ raiseAssert error
+
+ # leave some time for discv5 to act
+ await sleepAsync(chronos.seconds(10))
+
+ var r = waku0.node.peerManager.selectPeer(WakuPeerExchangeCodec)
+ assert r.isSome(), "could not retrieve peer mounting WakuPeerExchangeCodec"
+
+ r = waku1.node.peerManager.selectPeer(WakuRelayCodec)
+ assert r.isSome(), "could not retrieve peer mounting WakuRelayCodec"
+
+ r = waku1.node.peerManager.selectPeer(WakuPeerExchangeCodec)
+ assert r.isNone(), "should not retrieve peer mounting WakuPeerExchangeCodec"
+
+ r = waku2.node.peerManager.selectPeer(WakuPeerExchangeCodec)
+ assert r.isSome(), "could not retrieve peer mounting WakuPeerExchangeCodec"
+
+ r = waku2.node.peerManager.selectPeer(RendezVousCodec)
+ assert r.isSome(), "could not retrieve peer mounting RendezVousCodec"
+
asyncTest "Discv5 bootstrap nodes should be added to the peer store":
- var conf = defaultTestWakuNodeConf()
+ var confBuilder = defaultTestWakuConfBuilder()
+ confBuilder.discv5Conf.withEnabled(true)
+ confBuilder.discv5Conf.withUdpPort(9003.Port)
+ confBuilder.discv5Conf.withBootstrapNodes(@[validEnr])
+ let conf = confBuilder.build().valueOr:
+ raiseAssert error
- conf.discv5BootstrapNodes = @[validEnr]
-
- let waku = Waku.init(conf).valueOr:
+ let waku = Waku.new(conf).valueOr:
raiseAssert error
discard setupDiscoveryV5(
- waku.node.enr, waku.node.peerManager, waku.node.topicSubscriptionQueue,
- waku.conf, waku.dynamicBootstrapNodes, waku.rng, waku.key,
+ waku.node.enr,
+ waku.node.peerManager,
+ waku.node.topicSubscriptionQueue,
+ waku.conf.discv5Conf.get(),
+ waku.dynamicBootstrapNodes,
+ waku.rng,
+ waku.conf.nodeKey,
+ waku.conf.networkConf.p2pListenAddress,
+ waku.conf.portsShift,
)
check:
- waku.node.peerManager.wakuPeerStore.peers().anyIt(
+ waku.node.peerManager.switch.peerStore.peers().anyIt(
it.enr.isSome() and it.enr.get().toUri() == validEnr
)
asyncTest "Invalid discv5 bootstrap node ENRs are ignored":
- var conf = defaultTestWakuNodeConf()
+ var confBuilder = defaultTestWakuConfBuilder()
+ confBuilder.discv5Conf.withEnabled(true)
+ confBuilder.discv5Conf.withUdpPort(9004.Port)
let invalidEnr = "invalid-enr"
- conf.discv5BootstrapNodes = @[invalidEnr]
+ confBuilder.discv5Conf.withBootstrapNodes(@[invalidEnr])
+ let conf = confBuilder.build().valueOr:
+ raiseAssert error
- let waku = Waku.init(conf).valueOr:
+ let waku = Waku.new(conf).valueOr:
raiseAssert error
discard setupDiscoveryV5(
- waku.node.enr, waku.node.peerManager, waku.node.topicSubscriptionQueue,
- waku.conf, waku.dynamicBootstrapNodes, waku.rng, waku.key,
+ waku.node.enr,
+ waku.node.peerManager,
+ waku.node.topicSubscriptionQueue,
+ conf.discv5Conf.get(),
+ waku.dynamicBootstrapNodes,
+ waku.rng,
+ waku.conf.nodeKey,
+ waku.conf.networkConf.p2pListenAddress,
+ waku.conf.portsShift,
)
check:
- not waku.node.peerManager.wakuPeerStore.peers().anyIt(
+ not waku.node.peerManager.switch.peerStore.peers().anyIt(
it.enr.isSome() and it.enr.get().toUri() == invalidEnr
)
diff --git a/tests/waku_discv5/utils.nim b/tests/waku_discv5/utils.nim
index 679d206ea..422e13fd9 100644
--- a/tests/waku_discv5/utils.nim
+++ b/tests/waku_discv5/utils.nim
@@ -1,6 +1,5 @@
import
std/options,
- stew/results,
stew/shims/net,
chronos,
libp2p/crypto/crypto as libp2p_keys,
diff --git a/tests/waku_enr/utils.nim b/tests/waku_enr/utils.nim
index 6dd017add..8f79b1d8f 100644
--- a/tests/waku_enr/utils.nim
+++ b/tests/waku_enr/utils.nim
@@ -1,15 +1,13 @@
import
std/options,
sequtils,
- stew/results,
+ results,
stew/shims/net,
chronos,
libp2p/crypto/crypto as libp2p_keys,
eth/keys as eth_keys
-import
- waku/[waku_core/topics, waku_enr, discovery/waku_discv5, waku_enr/sharding],
- ../testlib/[common, wakucore]
+import waku/[waku_enr, discovery/waku_discv5, waku_enr/sharding], ../testlib/wakucore
proc newTestEnrRecord*(
privKey: libp2p_keys.PrivateKey,
diff --git a/tests/waku_filter_v2/test_waku_client.nim b/tests/waku_filter_v2/test_waku_client.nim
index dbfcd1c51..2c3e2f4ec 100644
--- a/tests/waku_filter_v2/test_waku_client.nim
+++ b/tests/waku_filter_v2/test_waku_client.nim
@@ -1,13 +1,6 @@
{.used.}
-import
- std/[options, tables, sequtils, strutils, json],
- testutils/unittests,
- stew/[results, byteutils],
- chronos,
- chronicles,
- os,
- libp2p/peerstore
+import std/[options, sequtils, json], testutils/unittests, results, chronos
import
waku/node/[peer_manager, waku_node],
diff --git a/tests/waku_filter_v2/test_waku_filter_dos_protection.nim b/tests/waku_filter_v2/test_waku_filter_dos_protection.nim
index c751114c1..7c8c640ba 100644
--- a/tests/waku_filter_v2/test_waku_filter_dos_protection.nim
+++ b/tests/waku_filter_v2/test_waku_filter_dos_protection.nim
@@ -1,25 +1,18 @@
{.used.}
import
- std/[options, tables, sequtils, strutils, json],
+ std/[options, tables, json],
testutils/unittests,
- stew/[results, byteutils],
+ results,
chronos,
chronicles,
- os,
libp2p/peerstore
import
- waku/[
- node/peer_manager,
- waku_core,
- common/rate_limit/setting,
- common/rate_limit/token_bucket,
- ],
- waku/waku_filter_v2/[common, client, subscriptions, protocol, rpc_codec],
- ../testlib/[wakucore, testasync, testutils, futures, sequtils],
- ./waku_filter_utils,
- ../resources/payloads
+ waku/[node/peer_manager, waku_core],
+ waku/waku_filter_v2/[common, client, subscriptions, protocol],
+ ../testlib/[wakucore, testasync, futures],
+ ./waku_filter_utils
type AFilterClient = ref object of RootObj
clientSwitch*: Switch
diff --git a/tests/waku_filter_v2/waku_filter_utils.nim b/tests/waku_filter_v2/waku_filter_utils.nim
index 5698949c5..2f04ceb36 100644
--- a/tests/waku_filter_v2/waku_filter_utils.nim
+++ b/tests/waku_filter_v2/waku_filter_utils.nim
@@ -1,4 +1,4 @@
-import std/[options, tables, sets, sequtils, algorithm], chronos, chronicles, os
+import std/[options, tables, sets, algorithm], chronos, chronicles, os
import
waku/[
diff --git a/tests/waku_lightpush/lightpush_utils.nim b/tests/waku_lightpush/lightpush_utils.nim
index ec4b8e254..0dde4140d 100644
--- a/tests/waku_lightpush/lightpush_utils.nim
+++ b/tests/waku_lightpush/lightpush_utils.nim
@@ -1,6 +1,6 @@
{.used.}
-import std/options, chronicles, chronos, libp2p/crypto/crypto
+import std/options, chronos, chronicles, libp2p/crypto/crypto
import
waku/node/peer_manager,
diff --git a/tests/waku_lightpush/test_client.nim b/tests/waku_lightpush/test_client.nim
index 3499ae9e4..500d228ce 100644
--- a/tests/waku_lightpush/test_client.nim
+++ b/tests/waku_lightpush/test_client.nim
@@ -3,8 +3,8 @@
import
std/[options, strscans],
testutils/unittests,
- chronicles,
chronos,
+ chronicles,
libp2p/crypto/crypto
import
@@ -15,7 +15,7 @@ import
waku_lightpush/client,
waku_lightpush/protocol_metrics,
],
- ../testlib/[assertions, wakucore, testasync, futures, testutils],
+ ../testlib/[assertions, wakucore, testasync, futures],
./lightpush_utils,
../resources/[pubsub_topics, content_topics, payloads]
@@ -343,6 +343,39 @@ suite "Waku Lightpush Client":
# Cleanup
await serverSwitch2.stop()
+ asyncTest "Check timestamp is not zero":
+ ## This test validates that, even the generated message has a timestamp of 0,
+ ## the node will eventually set a timestamp when publishing the message.
+ let
+ zeroTimestamp = 0
+ meta = "TEST-META"
+ message = fakeWakuMessage(
+ payloads.ALPHABETIC, content_topics.CURRENT, meta, zeroTimestamp
+ )
+
+ # When publishing a valid payload
+ let publishResponse =
+ await client.publish(some(pubsubTopic), message, serverRemotePeerInfo)
+
+ # Then the message is received by the server
+ discard await handlerFuture.withTimeout(FUTURE_TIMEOUT)
+ assertResultOk publishResponse
+ check handlerFuture.finished()
+
+ # And the message is received with the correct topic and payload
+ let (readPubsubTopic, readMessage) = handlerFuture.read()
+
+ check:
+ pubsubTopic == readPubsubTopic
+ message.payload == readMessage.payload
+ message.contentTopic == readMessage.contentTopic
+ message.meta == readMessage.meta
+ message.timestamp != readMessage.timestamp
+ message.ephemeral == readMessage.ephemeral
+ message.proof == readMessage.proof
+ message.version == readMessage.version
+ readMessage.timestamp > 0
+
suite "Verification of PushResponse Payload":
asyncTest "Positive Responses":
# When sending a valid PushRequest
diff --git a/tests/waku_lightpush/test_ratelimit.nim b/tests/waku_lightpush/test_ratelimit.nim
index 7148be37a..0dd7913d1 100644
--- a/tests/waku_lightpush/test_ratelimit.nim
+++ b/tests/waku_lightpush/test_ratelimit.nim
@@ -1,24 +1,11 @@
{.used.}
-import
- std/[options, strscans],
- testutils/unittests,
- chronicles,
- chronos,
- libp2p/crypto/crypto
+import std/options, testutils/unittests, chronos, libp2p/crypto/crypto
import
- waku/[
- node/peer_manager,
- common/rate_limit/setting,
- waku_core,
- waku_lightpush,
- waku_lightpush/client,
- waku_lightpush/protocol_metrics,
- ],
- ../testlib/[assertions, wakucore, testasync, futures, testutils],
- ./lightpush_utils,
- ../resources/[pubsub_topics, content_topics, payloads]
+ waku/[node/peer_manager, waku_core, waku_lightpush, waku_lightpush/client],
+ ../testlib/wakucore,
+ ./lightpush_utils
suite "Rate limited push service":
asyncTest "push message with rate limit not violated":
diff --git a/tests/waku_lightpush/test_server_i13n.nim b/tests/waku_lightpush/test_server_i13n.nim
new file mode 100644
index 000000000..7bd40894e
--- /dev/null
+++ b/tests/waku_lightpush/test_server_i13n.nim
@@ -0,0 +1,89 @@
+{.used.}
+
+import
+ std/[options, strscans],
+ testutils/unittests,
+ chronicles,
+ chronos,
+ libp2p/crypto/crypto
+
+import
+ waku/[
+ node/peer_manager,
+ waku_core,
+ waku_lightpush,
+ waku_lightpush/client,
+ waku_lightpush/protocol_metrics,
+ ],
+ ../testlib/[assertions, wakucore, testasync, futures, testutils],
+ ./lightpush_utils,
+ ../resources/[pubsub_topics, content_topics, payloads]
+
+suite "Lightpush Server Incentivization Test":
+ var
+ serverSwitch {.threadvar.}: Switch
+ clientSwitch {.threadvar.}: Switch
+ server {.threadvar.}: WakuLightPush
+ client {.threadvar.}: WakuLightPushClient
+ serverPeerId {.threadvar.}: RemotePeerInfo
+ handlerFuture {.threadvar.}: Future[(string, WakuMessage)]
+ tokenPeriod {.threadvar.}: Duration
+ waitInBetweenFor {.threadvar.}: Duration
+ firstWaitExtend {.threadvar.}: Duration
+
+ asyncSetup:
+ serverSwitch = newTestSwitch()
+ clientSwitch = newTestSwitch()
+
+ await allFutures(serverSwitch.start(), clientSwitch.start())
+
+ handlerFuture = newFuture[(string, WakuMessage)]()
+ let handler: PushMessageHandler = proc(
+ peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage
+ ): Future[WakuLightPushResult] {.async.} =
+ handlerFuture.complete((pubsubTopic, message))
+ return lightpushSuccessResult(1)
+
+ tokenPeriod = 500.millis
+ server = await newTestWakuLightpushNode(
+ serverSwitch, handler, some((3, tokenPeriod)), eligibilityEnabled = true
+ )
+ client = newTestWakuLightpushClient(clientSwitch)
+ serverPeerId = serverSwitch.peerInfo.toRemotePeerInfo()
+
+ waitInBetweenFor = 20.millis
+ firstWaitExtend = 300.millis
+
+ asyncTeardown:
+ await allFutures(clientSwitch.stop(), serverSwitch.stop())
+
+ asyncTest "message with valid eligibility proof is published":
+ let sendMsgProc = proc(): Future[void] {.async.} =
+ let message = fakeWakuMessage()
+
+ handlerFuture = newFuture[(string, WakuMessage)]()
+ let requestRes =
+ await client.publish(some(DefaultPubsubTopic), message, peer = serverPeerId)
+
+ check await handlerFuture.withTimeout(50.millis)
+
+ check:
+ requestRes.isOk()
+ handlerFuture.finished()
+
+ let (handledMessagePubsubTopic, handledMessage) = handlerFuture.read()
+
+ check:
+ handledMessagePubsubTopic == DefaultPubsubTopic
+ handledMessage == message
+
+ for runCnt in 0 ..< 3:
+ let startTime = Moment.now()
+ for testCnt in 0 ..< 3:
+ await sendMsgProc()
+ await sleepAsync(waitInBetweenFor)
+
+ let endTime = Moment.now()
+ let elapsed = (endTime - startTime)
+ await sleepAsync(tokenPeriod - elapsed + firstWaitExtend)
+ firstWaitExtend = 100.millis
diff --git a/tests/waku_lightpush_legacy/lightpush_utils.nim b/tests/waku_lightpush_legacy/lightpush_utils.nim
index 733fbc8b1..11c4bf929 100644
--- a/tests/waku_lightpush_legacy/lightpush_utils.nim
+++ b/tests/waku_lightpush_legacy/lightpush_utils.nim
@@ -1,10 +1,9 @@
{.used.}
-import std/options, chronicles, chronos, libp2p/crypto/crypto
+import std/options, chronos, libp2p/crypto/crypto
import
waku/node/peer_manager,
- waku/waku_core,
waku/waku_lightpush_legacy,
waku/waku_lightpush_legacy/[client, common],
waku/common/rate_limit/setting,
diff --git a/tests/waku_lightpush_legacy/test_client.nim b/tests/waku_lightpush_legacy/test_client.nim
index b71b7d5c3..1dcb466c9 100644
--- a/tests/waku_lightpush_legacy/test_client.nim
+++ b/tests/waku_lightpush_legacy/test_client.nim
@@ -1,11 +1,6 @@
{.used.}
-import
- std/[options, strscans],
- testutils/unittests,
- chronicles,
- chronos,
- libp2p/crypto/crypto
+import std/[options, strscans], testutils/unittests, chronos, libp2p/crypto/crypto
import
waku/[
@@ -16,9 +11,8 @@ import
waku_lightpush_legacy/common,
waku_lightpush_legacy/protocol_metrics,
waku_lightpush_legacy/rpc,
- waku_lightpush_legacy/rpc_codec,
],
- ../testlib/[assertions, wakucore, testasync, futures, testutils],
+ ../testlib/[assertions, wakucore, testasync, futures],
./lightpush_utils,
../resources/[pubsub_topics, content_topics, payloads]
diff --git a/tests/waku_lightpush_legacy/test_ratelimit.nim b/tests/waku_lightpush_legacy/test_ratelimit.nim
index 1d033302f..3df8d369d 100644
--- a/tests/waku_lightpush_legacy/test_ratelimit.nim
+++ b/tests/waku_lightpush_legacy/test_ratelimit.nim
@@ -1,27 +1,17 @@
{.used.}
-import
- std/[options, strscans],
- testutils/unittests,
- chronicles,
- chronos,
- libp2p/crypto/crypto
+import std/options, testutils/unittests, chronos, libp2p/crypto/crypto
import
waku/[
node/peer_manager,
- common/rate_limit/setting,
waku_core,
waku_lightpush_legacy,
waku_lightpush_legacy/client,
waku_lightpush_legacy/common,
- waku_lightpush_legacy/protocol_metrics,
- waku_lightpush_legacy/rpc,
- waku_lightpush_legacy/rpc_codec,
],
- ../testlib/[assertions, wakucore, testasync, futures, testutils],
- ./lightpush_utils,
- ../resources/[pubsub_topics, content_topics, payloads]
+ ../testlib/wakucore,
+ ./lightpush_utils
suite "Rate limited push service":
asyncTest "push message with rate limit not violated":
diff --git a/tests/waku_peer_exchange/test_protocol.nim b/tests/waku_peer_exchange/test_protocol.nim
index 11a61c4dc..8f7f20574 100644
--- a/tests/waku_peer_exchange/test_protocol.nim
+++ b/tests/waku_peer_exchange/test_protocol.nim
@@ -1,11 +1,10 @@
{.used.}
import
- std/[options, sequtils, tables, net],
+ std/[options, sequtils, net],
testutils/unittests,
chronos,
- chronicles,
- libp2p/[switch, peerId, crypto/crypto, multistream, muxers/muxer],
+ libp2p/[switch, peerId, crypto/crypto],
eth/[keys, p2p/discoveryv5/enr]
import
@@ -18,14 +17,11 @@ import
waku_peer_exchange/rpc_codec,
waku_peer_exchange/protocol,
node/peer_manager,
- waku_relay/protocol,
- waku_relay,
waku_core,
- waku_core/message/codec,
common/enr/builder,
waku_enr/sharding,
],
- ../testlib/[wakucore, wakunode, simple_mock, assertions],
+ ../testlib/[wakucore, wakunode, assertions],
./utils.nim
suite "Waku Peer Exchange":
diff --git a/tests/waku_relay/test_message_id.nim b/tests/waku_relay/test_message_id.nim
index b46554d17..633303120 100644
--- a/tests/waku_relay/test_message_id.nim
+++ b/tests/waku_relay/test_message_id.nim
@@ -1,10 +1,11 @@
import
unittest,
- stew/[shims/net, results, byteutils],
+ results,
+ stew/[shims/net, byteutils],
nimcrypto/sha2,
libp2p/protocols/pubsub/rpc/messages
-import waku/waku_relay/message_id, ../testlib/sequtils
+import waku/waku_relay/message_id
suite "Message ID Provider":
test "Non-empty string":
diff --git a/tests/waku_relay/test_protocol.nim b/tests/waku_relay/test_protocol.nim
index 399b55ea8..d0e8a7ed6 100644
--- a/tests/waku_relay/test_protocol.nim
+++ b/tests/waku_relay/test_protocol.nim
@@ -1,13 +1,12 @@
{.used.}
import
- std/[options, sequtils, strutils, strformat],
+ std/[options, strformat],
stew/shims/net as stewNet,
testutils/unittests,
- chronicles,
chronos,
libp2p/protocols/pubsub/[pubsub, gossipsub],
- libp2p/[multihash, stream/connection, switch],
+ libp2p/[stream/connection, switch],
./crypto_utils,
std/json
@@ -19,7 +18,7 @@ import
waku_core,
waku_core/message/codec,
],
- ../testlib/[wakucore, testasync, testutils, futures, sequtils],
+ ../testlib/[wakucore, testasync, futures, sequtils],
./utils,
../resources/payloads
diff --git a/tests/waku_relay/test_wakunode_relay.nim b/tests/waku_relay/test_wakunode_relay.nim
index 398470163..5d5ce8458 100644
--- a/tests/waku_relay/test_wakunode_relay.nim
+++ b/tests/waku_relay/test_wakunode_relay.nim
@@ -30,7 +30,8 @@ suite "WakuNode - Relay":
# Relay protocol starts if mounted after node start
await node1.start()
- await node1.mountRelay()
+ (await node1.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
check:
GossipSub(node1.wakuRelay).heartbeatFut.isNil() == false
@@ -41,7 +42,8 @@ suite "WakuNode - Relay":
nodeKey2 = generateSecp256k1Key()
node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(0))
- await node2.mountRelay()
+ (await node2.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
check:
# Relay has not yet started as node has not yet started
@@ -69,13 +71,16 @@ suite "WakuNode - Relay":
message = WakuMessage(payload: payload, contentTopic: contentTopic)
await node1.start()
- await node1.mountRelay(@[shard])
+ (await node1.mountRelay(@[shard])).isOkOr:
+ assert false, "Failed to mount relay"
await node2.start()
- await node2.mountRelay(@[shard])
+ (await node2.mountRelay(@[shard])).isOkOr:
+ assert false, "Failed to mount relay"
await node3.start()
- await node3.mountRelay(@[shard])
+ (await node3.mountRelay(@[shard])).isOkOr:
+ assert false, "Failed to mount relay"
await allFutures(
node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]),
@@ -90,9 +95,17 @@ suite "WakuNode - Relay":
topic == $shard
msg.contentTopic == contentTopic
msg.payload == payload
+ msg.timestamp > 0
completionFut.complete(true)
- node3.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler))
+ ## The following unsubscription is necessary to remove the default relay handler, which is
+ ## added when mountRelay is called.
+ node3.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr:
+ assert false, "Failed to unsubscribe from topic: " & $error
+
+ ## Subscribe to the relay topic to add the custom relay handler defined above
+ node3.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr:
+ assert false, "Failed to subscribe to topic: " & $error
await sleepAsync(500.millis)
var res = await node1.publish(some($shard), message)
@@ -135,13 +148,16 @@ suite "WakuNode - Relay":
# start all the nodes
await node1.start()
- await node1.mountRelay(@[shard])
+ (await node1.mountRelay(@[shard])).isOkOr:
+ assert false, "Failed to mount relay"
await node2.start()
- await node2.mountRelay(@[shard])
+ (await node2.mountRelay(@[shard])).isOkOr:
+ assert false, "Failed to mount relay"
await node3.start()
- await node3.mountRelay(@[shard])
+ (await node3.mountRelay(@[shard])).isOkOr:
+ assert false, "Failed to mount relay"
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
@@ -178,7 +194,14 @@ suite "WakuNode - Relay":
# relay handler is called
completionFut.complete(true)
- node3.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler))
+ ## The following unsubscription is necessary to remove the default relay handler, which is
+ ## added when mountRelay is called.
+ node3.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr:
+ assert false, "Failed to unsubscribe from topic: " & $error
+
+ ## Subscribe to the relay topic to add the custom relay handler defined above
+ node3.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr:
+ assert false, "Failed to subscribe to topic: " & $error
await sleepAsync(500.millis)
var res = await node1.publish(some($shard), message1)
@@ -220,7 +243,8 @@ suite "WakuNode - Relay":
connOk == true
# Node 1 subscribes to topic
- nodes[1].subscribe((kind: PubsubSub, topic: DefaultPubsubTopic))
+ nodes[1].subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr:
+ assert false, "Failed to subscribe to topic: " & $error
await sleepAsync(500.millis)
# Node 0 publishes 5 messages not compliant with WakuMessage (aka random bytes)
@@ -264,10 +288,12 @@ suite "WakuNode - Relay":
message = WakuMessage(payload: payload, contentTopic: contentTopic)
await node1.start()
- await node1.mountRelay(@[shard])
+ (await node1.mountRelay(@[shard])).isOkOr:
+ assert false, "Failed to mount relay"
await node2.start()
- await node2.mountRelay(@[shard])
+ (await node2.mountRelay(@[shard])).isOkOr:
+ assert false, "Failed to mount relay"
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
@@ -279,9 +305,17 @@ suite "WakuNode - Relay":
topic == $shard
msg.contentTopic == contentTopic
msg.payload == payload
+ msg.timestamp > 0
completionFut.complete(true)
- node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler))
+ ## The following unsubscription is necessary to remove the default relay handler, which is
+ ## added when mountRelay is called.
+ node1.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr:
+ assert false, "Failed to unsubscribe from topic: " & $error
+
+ ## Subscribe to the relay topic to add the custom relay handler defined above
+ node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr:
+ assert false, "Failed to subscribe to topic: " & $error
await sleepAsync(500.millis)
let res = await node2.publish(some($shard), message)
@@ -312,10 +346,12 @@ suite "WakuNode - Relay":
message = WakuMessage(payload: payload, contentTopic: contentTopic)
await node1.start()
- await node1.mountRelay(@[shard])
+ (await node1.mountRelay(@[shard])).isOkOr:
+ assert false, "Failed to mount relay"
await node2.start()
- await node2.mountRelay(@[shard])
+ (await node2.mountRelay(@[shard])).isOkOr:
+ assert false, "Failed to mount relay"
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
@@ -327,9 +363,17 @@ suite "WakuNode - Relay":
topic == $shard
msg.contentTopic == contentTopic
msg.payload == payload
+ msg.timestamp > 0
completionFut.complete(true)
- node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler))
+ ## The following unsubscription is necessary to remove the default relay handler, which is
+ ## added when mountRelay is called.
+ node1.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr:
+ assert false, "Failed to unsubscribe from topic: " & $error
+
+ ## Subscribe to the relay topic to add the custom relay handler defined above
+ node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr:
+ assert false, "Failed to subscribe to topic: " & $error
await sleepAsync(500.millis)
let res = await node2.publish(some($shard), message)
@@ -360,10 +404,12 @@ suite "WakuNode - Relay":
message = WakuMessage(payload: payload, contentTopic: contentTopic)
await node1.start()
- await node1.mountRelay(@[shard])
+ (await node1.mountRelay(@[shard])).isOkOr:
+ assert false, "Failed to mount relay"
await node2.start()
- await node2.mountRelay(@[shard])
+ (await node2.mountRelay(@[shard])).isOkOr:
+ assert false, "Failed to mount relay"
#delete websocket peer address
# TODO: a better way to find the index - this is too brittle
@@ -379,9 +425,17 @@ suite "WakuNode - Relay":
topic == $shard
msg.contentTopic == contentTopic
msg.payload == payload
+ msg.timestamp > 0
completionFut.complete(true)
- node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler))
+ ## The following unsubscription is necessary to remove the default relay handler, which is
+ ## added when mountRelay is called.
+ node1.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr:
+ assert false, "Failed to unsubscribe from topic: " & $error
+
+ ## Subscribe to the relay topic to add the custom relay handler defined above
+ node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr:
+ assert false, "Failed to subscribe to topic: " & $error
await sleepAsync(500.millis)
let res = await node2.publish(some($shard), message)
@@ -414,10 +468,12 @@ suite "WakuNode - Relay":
message = WakuMessage(payload: payload, contentTopic: contentTopic)
await node1.start()
- await node1.mountRelay(@[shard])
+ (await node1.mountRelay(@[shard])).isOkOr:
+ assert false, "Failed to mount relay"
await node2.start()
- await node2.mountRelay(@[shard])
+ (await node2.mountRelay(@[shard])).isOkOr:
+ assert false, "Failed to mount relay"
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
@@ -429,9 +485,17 @@ suite "WakuNode - Relay":
topic == $shard
msg.contentTopic == contentTopic
msg.payload == payload
+ msg.timestamp > 0
completionFut.complete(true)
- node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler))
+ ## The following unsubscription is necessary to remove the default relay handler, which is
+ ## added when mountRelay is called.
+ node1.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr:
+ assert false, "Failed to unsubscribe from topic: " & $error
+
+ ## Subscribe to the relay topic to add the custom relay handler defined above
+ node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr:
+ assert false, "Failed to subscribe to topic: " & $error
await sleepAsync(500.millis)
let res = await node2.publish(some($shard), message)
@@ -472,10 +536,12 @@ suite "WakuNode - Relay":
message = WakuMessage(payload: payload, contentTopic: contentTopic)
await node1.start()
- await node1.mountRelay(@[shard])
+ (await node1.mountRelay(@[shard])).isOkOr:
+ assert false, "Failed to mount relay"
await node2.start()
- await node2.mountRelay(@[shard])
+ (await node2.mountRelay(@[shard])).isOkOr:
+ assert false, "Failed to mount relay"
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
@@ -487,9 +553,17 @@ suite "WakuNode - Relay":
topic == $shard
msg.contentTopic == contentTopic
msg.payload == payload
+ msg.timestamp > 0
completionFut.complete(true)
- node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler))
+ ## The following unsubscription is necessary to remove the default relay handler, which is
+ ## added when mountRelay is called.
+ node1.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr:
+ assert false, "Failed to unsubscribe from topic: " & $error
+
+ ## Subscribe to the relay topic to add the custom relay handler defined above
+ node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr:
+ assert false, "Failed to subscribe to topic: " & $error
await sleepAsync(500.millis)
let res = await node2.publish(some($shard), message)
@@ -558,14 +632,15 @@ suite "WakuNode - Relay":
# Stop all nodes
await allFutures(nodes.mapIt(it.stop()))
- asyncTest "Unsubscribe keep the subscription if other content topics also use the shard":
+ asyncTest "Only one subscription is allowed for contenttopics that generate the same shard":
## Setup
let
nodeKey = generateSecp256k1Key()
node = newTestWakuNode(nodeKey, parseIpAddress("0.0.0.0"), Port(0))
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
require node.mountSharding(1, 1).isOk
## Given
@@ -587,19 +662,19 @@ suite "WakuNode - Relay":
"topic must use the same shard"
## When
- node.subscribe((kind: ContentSub, topic: contentTopicA), some(handler))
- node.subscribe((kind: ContentSub, topic: contentTopicB), some(handler))
- node.subscribe((kind: ContentSub, topic: contentTopicC), some(handler))
+ node.subscribe((kind: ContentSub, topic: contentTopicA), some(handler)).isOkOr:
+ assert false, "Failed to subscribe to topic: " & $error
+ node.subscribe((kind: ContentSub, topic: contentTopicB), some(handler)).isErrOr:
+ assert false,
+ "The subscription should fail because is already subscribe to that shard"
+ node.subscribe((kind: ContentSub, topic: contentTopicC), some(handler)).isErrOr:
+ assert false,
+ "The subscription should fail because is already subscribe to that shard"
## Then
- node.unsubscribe((kind: ContentUnsub, topic: contentTopicB))
+ node.unsubscribe((kind: ContentUnsub, topic: contentTopicB)).isOkOr:
+ assert false, "Failed to unsubscribe to topic: " & $error
check node.wakuRelay.isSubscribed(shard)
- node.unsubscribe((kind: ContentUnsub, topic: contentTopicA))
- check node.wakuRelay.isSubscribed(shard)
-
- node.unsubscribe((kind: ContentUnsub, topic: contentTopicC))
- check not node.wakuRelay.isSubscribed(shard)
-
## Cleanup
await node.stop()
diff --git a/tests/waku_relay/utils.nim b/tests/waku_relay/utils.nim
index c1a085b10..6de28583e 100644
--- a/tests/waku_relay/utils.nim
+++ b/tests/waku_relay/utils.nim
@@ -4,8 +4,8 @@ import
std/[strutils, sequtils, tempfiles],
stew/byteutils,
stew/shims/net as stewNet,
- testutils/unittests,
chronos,
+ chronicles,
libp2p/switch,
libp2p/protocols/pubsub/pubsub
@@ -23,8 +23,7 @@ import
],
../waku_store/store_utils,
../waku_archive/archive_utils,
- ../testlib/[wakucore, wakunode, testasync, futures],
- ../resources/payloads
+ ../testlib/[wakucore, futures]
proc noopRawHandler*(): WakuRelayHandler =
var handler: WakuRelayHandler
@@ -45,19 +44,13 @@ proc newTestWakuRelay*(switch = newTestSwitch()): Future[WakuRelay] {.async.} =
proc setupRln*(node: WakuNode, identifier: uint) {.async.} =
await node.mountRlnRelay(
WakuRlnConfig(
- rlnRelayDynamic: false,
- rlnRelayCredIndex: some(identifier),
- rlnRelayTreePath: genTempPath("rln_tree", "wakunode_" & $identifier),
- rlnEpochSizeSec: 1,
+ dynamic: false,
+ credIndex: some(identifier),
+ treePath: genTempPath("rln_tree", "wakunode_" & $identifier),
+ epochSizeSec: 1,
)
)
-proc setupRelayWithRln*(
- node: WakuNode, identifier: uint, shards: seq[RelayShard]
-) {.async.} =
- await node.mountRelay(shards)
- await setupRln(node, identifier)
-
proc subscribeToContentTopicWithHandler*(
node: WakuNode, contentTopic: string
): Future[bool] =
@@ -68,7 +61,9 @@ proc subscribeToContentTopicWithHandler*(
if topic == topic:
completionFut.complete(true)
- node.subscribe((kind: ContentSub, topic: contentTopic), some(relayHandler))
+ (node.subscribe((kind: ContentSub, topic: contentTopic), some(relayHandler))).isOkOr:
+ error "Failed to subscribe to content topic", error
+ completionFut.complete(true)
return completionFut
proc subscribeCompletionHandler*(node: WakuNode, pubsubTopic: string): Future[bool] =
@@ -79,7 +74,9 @@ proc subscribeCompletionHandler*(node: WakuNode, pubsubTopic: string): Future[bo
if topic == pubsubTopic:
completionFut.complete(true)
- node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(relayHandler))
+ (node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(relayHandler))).isOkOr:
+ error "Failed to subscribe to pubsub topic", error
+ completionFut.complete(false)
return completionFut
proc sendRlnMessage*(
diff --git a/tests/waku_rln_relay/test_rln_group_manager_onchain.nim b/tests/waku_rln_relay/test_rln_group_manager_onchain.nim
index 3d7be7220..7ba64e39b 100644
--- a/tests/waku_rln_relay/test_rln_group_manager_onchain.nim
+++ b/tests/waku_rln_relay/test_rln_group_manager_onchain.nim
@@ -3,8 +3,9 @@
{.push raises: [].}
import
- std/[options, os, osproc, sequtils, deques, streams, strutils, tempfiles, strformat],
- stew/[results, byteutils],
+ std/[options, sequtils, deques, random],
+ results,
+ stew/byteutils,
testutils/unittests,
chronos,
chronicles,
@@ -12,23 +13,20 @@ import
web3,
libp2p/crypto/crypto,
eth/keys,
- tests/testlib/testasync
+ tests/testlib/testasync,
+ tests/testlib/testutils
import
waku/[
- waku_node,
- node/waku_node,
waku_rln_relay,
waku_rln_relay/protocol_types,
waku_rln_relay/constants,
- waku_rln_relay/contract,
waku_rln_relay/rln,
waku_rln_relay/conversion_utils,
waku_rln_relay/group_manager/on_chain/group_manager,
],
- ../testlib/[wakucore, wakunode, common],
- ./utils_onchain,
- ./utils
+ ../testlib/wakucore,
+ ./utils_onchain
suite "Onchain group manager":
# We run Anvil
@@ -50,7 +48,6 @@ suite "Onchain group manager":
manager.ethRpc.isSome()
manager.wakuRlnContract.isSome()
manager.initialized
- manager.rlnContractDeployedBlockNumber > 0.Quantity
manager.rlnRelayMaxMessageLimit == 100
asyncTest "should error on initialization when chainId does not match":
@@ -80,10 +77,10 @@ suite "Onchain group manager":
assert metadata.contractAddress == manager.ethContractAddress,
"contractAddress is not equal to " & manager.ethContractAddress
- let differentContractAddress = await uploadRLNContract(manager.ethClientUrl)
+ let differentContractAddress = await uploadRLNContract(manager.ethClientUrls[0])
# simulating a change in the contractAddress
let manager2 = OnchainGroupManager(
- ethClientUrl: EthClient,
+ ethClientUrls: @[EthClient],
ethContractAddress: $differentContractAddress,
rlnInstance: manager.rlnInstance,
onFatalErrorAction: proc(errStr: string) =
@@ -100,18 +97,13 @@ suite "Onchain group manager":
echo "---"
asyncTest "should error if contract does not exist":
- var triggeredError = false
-
manager.ethContractAddress = "0x0000000000000000000000000000000000000000"
- manager.onFatalErrorAction = proc(msg: string) {.gcsafe, closure.} =
- echo "---"
- discard
- "Failed to get the deployed block number. Have you set the correct contract address?: No response from the Web3 provider"
- echo msg
- echo "---"
- triggeredError = true
- discard await manager.init()
+ var triggeredError = false
+ try:
+ discard await manager.init()
+ except CatchableError:
+ triggeredError = true
check triggeredError
@@ -122,103 +114,71 @@ suite "Onchain group manager":
(await manager.init()).isErrOr:
raiseAssert "Expected error when keystore file doesn't exist"
- asyncTest "startGroupSync: should start group sync":
+ asyncTest "trackRootChanges: start tracking roots":
(await manager.init()).isOkOr:
raiseAssert $error
- (await manager.startGroupSync()).isOkOr:
- raiseAssert $error
+ discard manager.trackRootChanges()
- asyncTest "startGroupSync: should guard against uninitialized state":
- (await manager.startGroupSync()).isErrOr:
- raiseAssert "Expected error when not initialized"
+ asyncTest "trackRootChanges: should guard against uninitialized state":
+ try:
+ discard manager.trackRootChanges()
+ except CatchableError:
+ check getCurrentExceptionMsg().len == 38
- asyncTest "startGroupSync: should sync to the state of the group":
+ asyncTest "trackRootChanges: should sync to the state of the group":
let credentials = generateCredentials(manager.rlnInstance)
- let rateCommitment = getRateCommitment(credentials, UserMessageLimit(1)).valueOr:
- raiseAssert $error
(await manager.init()).isOkOr:
raiseAssert $error
- let merkleRootBefore = manager.rlnInstance.getMerkleRoot().valueOr:
- raiseAssert $error
-
- let fut = newFuture[void]("startGroupSync")
-
- proc generateCallback(fut: Future[void]): OnRegisterCallback =
- proc callback(registrations: seq[Membership]): Future[void] {.async.} =
- check:
- registrations.len == 1
- registrations[0].index == 0
- registrations[0].rateCommitment == rateCommitment
- fut.complete()
-
- return callback
+ let merkleRootBefore = manager.fetchMerkleRoot()
try:
- manager.onRegister(generateCallback(fut))
await manager.register(credentials, UserMessageLimit(1))
- (await manager.startGroupSync()).isOkOr:
- raiseAssert $error
except Exception, CatchableError:
assert false, "exception raised: " & getCurrentExceptionMsg()
- await fut
+ discard await withTimeout(trackRootChanges(manager), 15.seconds)
- let merkleRootAfter = manager.rlnInstance.getMerkleRoot().valueOr:
+ let merkleRootAfter = manager.fetchMerkleRoot()
+
+ let metadataSetRes = manager.setMetadata()
+ assert metadataSetRes.isOk(), metadataSetRes.error
+
+ let metadataOpt = getMetadata(manager.rlnInstance).valueOr:
raiseAssert $error
- let metadataOpt = manager.rlnInstance.getMetadata().valueOr:
- raiseAssert $error
+ assert metadataOpt.isSome(), "metadata is not set"
+ let metadata = metadataOpt.get()
+
check:
- metadataOpt.get().validRoots == manager.validRoots.toSeq()
+ metadata.validRoots == manager.validRoots.toSeq()
merkleRootBefore != merkleRootAfter
- asyncTest "startGroupSync: should fetch history correctly":
+ asyncTest "trackRootChanges: should fetch history correctly":
+ # TODO: We can't use `trackRootChanges()` directly in this test because its current implementation
+ # relies on a busy loop rather than event-based monitoring. As a result, some root changes
+ # may be missed, leading to inconsistent test results (i.e., it may randomly return true or false).
+ # To ensure reliability, we use the `updateRoots()` function to validate the `validRoots` window
+ # after each registration.
const credentialCount = 6
let credentials = generateCredentials(manager.rlnInstance, credentialCount)
(await manager.init()).isOkOr:
raiseAssert $error
- let merkleRootBefore = manager.rlnInstance.getMerkleRoot().valueOr:
- raiseAssert $error
-
- type TestGroupSyncFuts = array[0 .. credentialCount - 1, Future[void]]
- var futures: TestGroupSyncFuts
- for i in 0 ..< futures.len():
- futures[i] = newFuture[void]()
- proc generateCallback(
- futs: TestGroupSyncFuts, credentials: seq[IdentityCredential]
- ): OnRegisterCallback =
- var futureIndex = 0
- proc callback(registrations: seq[Membership]): Future[void] {.async.} =
- let rateCommitment =
- getRateCommitment(credentials[futureIndex], UserMessageLimit(1))
- if registrations.len == 1 and
- registrations[0].rateCommitment == rateCommitment.get() and
- registrations[0].index == MembershipIndex(futureIndex):
- futs[futureIndex].complete()
- futureIndex += 1
-
- return callback
+ let merkleRootBefore = manager.fetchMerkleRoot()
try:
- manager.onRegister(generateCallback(futures, credentials))
- (await manager.startGroupSync()).isOkOr:
- raiseAssert $error
-
for i in 0 ..< credentials.len():
await manager.register(credentials[i], UserMessageLimit(1))
+ discard await manager.updateRoots()
except Exception, CatchableError:
assert false, "exception raised: " & getCurrentExceptionMsg()
- await allFutures(futures)
-
- let merkleRootAfter = manager.rlnInstance.getMerkleRoot().valueOr:
- raiseAssert $error
+ let merkleRootAfter = manager.fetchMerkleRoot()
check:
merkleRootBefore != merkleRootAfter
- manager.validRootBuffer.len() == credentialCount - AcceptableRootWindowSize
+ manager.validRoots.len() == credentialCount
asyncTest "register: should guard against uninitialized state":
let dummyCommitment = default(IDCommitment)
@@ -235,14 +195,12 @@ suite "Onchain group manager":
assert false, "exception raised: " & getCurrentExceptionMsg()
asyncTest "register: should register successfully":
+ # TODO :- similar to ```trackRootChanges: should fetch history correctly```
(await manager.init()).isOkOr:
raiseAssert $error
- (await manager.startGroupSync()).isOkOr:
- raiseAssert $error
let idCommitment = generateCredentials(manager.rlnInstance).idCommitment
- let merkleRootBefore = manager.rlnInstance.getMerkleRoot().valueOr:
- raiseAssert $error
+ let merkleRootBefore = manager.fetchMerkleRoot()
try:
await manager.register(
@@ -254,10 +212,10 @@ suite "Onchain group manager":
assert false,
"exception raised when calling register: " & getCurrentExceptionMsg()
- let merkleRootAfter = manager.rlnInstance.getMerkleRoot().valueOr:
- raiseAssert $error
+ let merkleRootAfter = manager.fetchMerkleRoot()
+
check:
- merkleRootAfter.inHex() != merkleRootBefore.inHex()
+ merkleRootAfter != merkleRootBefore
manager.latestIndex == 1
asyncTest "register: callback is called":
@@ -267,19 +225,19 @@ suite "Onchain group manager":
let fut = newFuture[void]()
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
- let rateCommitment = getRateCommitment(idCredentials, UserMessageLimit(1))
+ let rateCommitment = getRateCommitment(idCredentials, UserMessageLimit(1)).get()
check:
registrations.len == 1
- registrations[0].rateCommitment == rateCommitment.get()
+ registrations[0].rateCommitment == rateCommitment
registrations[0].index == 0
fut.complete()
- manager.onRegister(callback)
(await manager.init()).isOkOr:
raiseAssert $error
+
+ manager.onRegister(callback)
+
try:
- (await manager.startGroupSync()).isOkOr:
- raiseAssert $error
await manager.register(
RateCommitment(
idCommitment: idCommitment, userMessageLimit: UserMessageLimit(1)
@@ -301,38 +259,43 @@ suite "Onchain group manager":
assert false, "exception raised: " & getCurrentExceptionMsg()
asyncTest "validateRoot: should validate good root":
- let credentials = generateCredentials(manager.rlnInstance)
- (await manager.init()).isOkOr:
- raiseAssert $error
+ let idCredentials = generateCredentials(manager.rlnInstance)
+ let idCommitment = idCredentials.idCommitment
let fut = newFuture[void]()
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
if registrations.len == 1 and
registrations[0].rateCommitment ==
- getRateCommitment(credentials, UserMessageLimit(1)).get() and
+ getRateCommitment(idCredentials, UserMessageLimit(1)).get() and
registrations[0].index == 0:
- manager.idCredentials = some(credentials)
+ manager.idCredentials = some(idCredentials)
fut.complete()
manager.onRegister(callback)
+ (await manager.init()).isOkOr:
+ raiseAssert $error
+
try:
- (await manager.startGroupSync()).isOkOr:
- raiseAssert $error
- await manager.register(credentials, UserMessageLimit(1))
+ await manager.register(idCredentials, UserMessageLimit(1))
except Exception, CatchableError:
assert false, "exception raised: " & getCurrentExceptionMsg()
await fut
+ let rootUpdated = await manager.updateRoots()
+
+ if rootUpdated:
+ let proofResult = await manager.fetchMerkleProofElements()
+ if proofResult.isErr():
+ error "Failed to fetch Merkle proof", error = proofResult.error
+ manager.merkleProofCache = proofResult.get()
let messageBytes = "Hello".toBytes()
- # prepare the epoch
let epoch = default(Epoch)
debug "epoch in bytes", epochHex = epoch.inHex()
- # generate proof
let validProofRes = manager.generateProof(
data = messageBytes, epoch = epoch, messageId = MessageId(1)
)
@@ -341,38 +304,39 @@ suite "Onchain group manager":
validProofRes.isOk()
let validProof = validProofRes.get()
- # validate the root (should be true)
let validated = manager.validateRoot(validProof.merkleRoot)
check:
validated
asyncTest "validateRoot: should reject bad root":
+ let idCredentials = generateCredentials(manager.rlnInstance)
+ let idCommitment = idCredentials.idCommitment
+
(await manager.init()).isOkOr:
raiseAssert $error
- (await manager.startGroupSync()).isOkOr:
- raiseAssert $error
- let credentials = generateCredentials(manager.rlnInstance)
-
- ## Assume the registration occured out of band
- manager.idCredentials = some(credentials)
- manager.membershipIndex = some(MembershipIndex(0))
manager.userMessageLimit = some(UserMessageLimit(1))
+ manager.membershipIndex = some(MembershipIndex(0))
+ manager.idCredentials = some(idCredentials)
+
+ manager.merkleProofCache = newSeq[byte](640)
+ for i in 0 ..< 640:
+ manager.merkleProofCache[i] = byte(rand(255))
let messageBytes = "Hello".toBytes()
- # prepare the epoch
let epoch = default(Epoch)
debug "epoch in bytes", epochHex = epoch.inHex()
- # generate proof
- let validProof = manager.generateProof(
- data = messageBytes, epoch = epoch, messageId = MessageId(0)
- ).valueOr:
- raiseAssert $error
+ let validProofRes = manager.generateProof(
+ data = messageBytes, epoch = epoch, messageId = MessageId(1)
+ )
+
+ check:
+ validProofRes.isOk()
+ let validProof = validProofRes.get()
- # validate the root (should be false)
let validated = manager.validateRoot(validProof.merkleRoot)
check:
@@ -396,13 +360,19 @@ suite "Onchain group manager":
manager.onRegister(callback)
try:
- (await manager.startGroupSync()).isOkOr:
- raiseAssert $error
await manager.register(credentials, UserMessageLimit(1))
except Exception, CatchableError:
assert false, "exception raised: " & getCurrentExceptionMsg()
await fut
+ let rootUpdated = await manager.updateRoots()
+
+ if rootUpdated:
+ let proofResult = await manager.fetchMerkleProofElements()
+ if proofResult.isErr():
+ error "Failed to fetch Merkle proof", error = proofResult.error
+ manager.merkleProofCache = proofResult.get()
+
let messageBytes = "Hello".toBytes()
# prepare the epoch
@@ -415,7 +385,6 @@ suite "Onchain group manager":
).valueOr:
raiseAssert $error
- # verify the proof (should be true)
let verified = manager.verifyProof(messageBytes, validProof).valueOr:
raiseAssert $error
@@ -425,31 +394,23 @@ suite "Onchain group manager":
asyncTest "verifyProof: should reject invalid proof":
(await manager.init()).isOkOr:
raiseAssert $error
- (await manager.startGroupSync()).isOkOr:
- raiseAssert $error
let idCredential = generateCredentials(manager.rlnInstance)
try:
- await manager.register(
- RateCommitment(
- idCommitment: idCredential.idCommitment, userMessageLimit: UserMessageLimit(1)
- )
- )
+ await manager.register(idCredential, UserMessageLimit(1))
except Exception, CatchableError:
assert false,
"exception raised when calling startGroupSync: " & getCurrentExceptionMsg()
- let idCredential2 = generateCredentials(manager.rlnInstance)
-
- ## Assume the registration occured out of band
- manager.idCredentials = some(idCredential2)
- manager.membershipIndex = some(MembershipIndex(0))
- manager.userMessageLimit = some(UserMessageLimit(1))
-
let messageBytes = "Hello".toBytes()
- # prepare the epoch
+ let rootUpdated = await manager.updateRoots()
+
+ manager.merkleProofCache = newSeq[byte](640)
+ for i in 0 ..< 640:
+ manager.merkleProofCache[i] = byte(rand(255))
+
let epoch = default(Epoch)
debug "epoch in bytes", epochHex = epoch.inHex()
@@ -469,8 +430,8 @@ suite "Onchain group manager":
check:
verified == false
- asyncTest "backfillRootQueue: should backfill roots in event of chain reorg":
- const credentialCount = 6
+ asyncTest "root queue should be updated correctly":
+ const credentialCount = 12
let credentials = generateCredentials(manager.rlnInstance, credentialCount)
(await manager.init()).isOkOr:
raiseAssert $error
@@ -496,33 +457,17 @@ suite "Onchain group manager":
try:
manager.onRegister(generateCallback(futures, credentials))
- (await manager.startGroupSync()).isOkOr:
- raiseAssert $error
for i in 0 ..< credentials.len():
await manager.register(credentials[i], UserMessageLimit(1))
+ discard await manager.updateRoots()
except Exception, CatchableError:
assert false, "exception raised: " & getCurrentExceptionMsg()
await allFutures(futures)
- # At this point, we should have a full root queue, 5 roots, and partial buffer of 1 root
check:
- manager.validRoots.len() == credentialCount - 1
- manager.validRootBuffer.len() == 1
-
- # We can now simulate a chain reorg by calling backfillRootQueue
- let expectedLastRoot = manager.validRootBuffer[0]
- try:
- await manager.backfillRootQueue(1)
- except Exception, CatchableError:
- assert false, "exception raised: " & getCurrentExceptionMsg()
-
- # We should now have 5 roots in the queue, and no partial buffer
- check:
- manager.validRoots.len() == credentialCount - 1
- manager.validRootBuffer.len() == 0
- manager.validRoots[credentialCount - 2] == expectedLastRoot
+ manager.validRoots.len() == credentialCount
asyncTest "isReady should return false if ethRpc is none":
(await manager.init()).isOkOr:
@@ -539,25 +484,9 @@ suite "Onchain group manager":
check:
isReady == false
- asyncTest "isReady should return false if lastSeenBlockHead > lastProcessed":
- (await manager.init()).isOkOr:
- raiseAssert $error
-
- var isReady = true
- try:
- isReady = await manager.isReady()
- except Exception, CatchableError:
- assert false, "exception raised: " & getCurrentExceptionMsg()
-
- check:
- isReady == false
-
asyncTest "isReady should return true if ethRpc is ready":
(await manager.init()).isOkOr:
raiseAssert $error
- # node can only be ready after group sync is done
- (await manager.startGroupSync()).isOkOr:
- raiseAssert $error
var isReady = false
try:
diff --git a/tests/waku_rln_relay/test_rln_group_manager_static.nim b/tests/waku_rln_relay/test_rln_group_manager_static.nim
index 56b5e8df1..5d1916f63 100644
--- a/tests/waku_rln_relay/test_rln_group_manager_static.nim
+++ b/tests/waku_rln_relay/test_rln_group_manager_static.nim
@@ -4,7 +4,7 @@
import
testutils/unittests,
- stew/results,
+ results,
options,
waku/[
waku_rln_relay/protocol_types,
diff --git a/tests/waku_rln_relay/test_rln_serde.nim b/tests/waku_rln_relay/test_rln_serde.nim
index 88badce97..1b1d8cd5f 100644
--- a/tests/waku_rln_relay/test_rln_serde.nim
+++ b/tests/waku_rln_relay/test_rln_serde.nim
@@ -2,7 +2,7 @@
{.push raises: [].}
-import stew/results, stint
+import results
import
./rln/waku_rln_relay_utils,
diff --git a/tests/waku_rln_relay/test_waku_rln_relay.nim b/tests/waku_rln_relay/test_waku_rln_relay.nim
index 6768cd782..907b7c1b3 100644
--- a/tests/waku_rln_relay/test_waku_rln_relay.nim
+++ b/tests/waku_rln_relay/test_waku_rln_relay.nim
@@ -17,7 +17,6 @@ import
waku_rln_relay/protocol_metrics,
waku_keystore,
],
- ../testlib/common,
./rln/waku_rln_relay_utils
suite "Waku rln relay":
@@ -691,11 +690,11 @@ suite "Waku rln relay":
let index = MembershipIndex(5)
let wakuRlnConfig = WakuRlnConfig(
- rlnRelayDynamic: false,
- rlnRelayCredIndex: some(index),
- rlnRelayUserMessageLimit: 1,
- rlnEpochSizeSec: 1,
- rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_2"),
+ dynamic: false,
+ credIndex: some(index),
+ userMessageLimit: 1,
+ epochSizeSec: 1,
+ treePath: genTempPath("rln_tree", "waku_rln_relay_2"),
)
let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr:
@@ -723,13 +722,13 @@ suite "Waku rln relay":
# validate messages
# validateMessage proc checks the validity of the message fields and adds it to the log (if valid)
let
- msgValidate1 = wakuRlnRelay.validateMessageAndUpdateLog(wm1, some(time))
+ msgValidate1 = wakuRlnRelay.validateMessageAndUpdateLog(wm1)
# wm2 is published within the same Epoch as wm1 and should be found as spam
- msgValidate2 = wakuRlnRelay.validateMessageAndUpdateLog(wm2, some(time))
+ msgValidate2 = wakuRlnRelay.validateMessageAndUpdateLog(wm2)
# a valid message should be validated successfully
- msgValidate3 = wakuRlnRelay.validateMessageAndUpdateLog(wm3, some(time))
+ msgValidate3 = wakuRlnRelay.validateMessageAndUpdateLog(wm3)
# wm4 has no rln proof and should not be validated
- msgValidate4 = wakuRlnRelay.validateMessageAndUpdateLog(wm4, some(time))
+ msgValidate4 = wakuRlnRelay.validateMessageAndUpdateLog(wm4)
check:
msgValidate1 == MessageValidationResult.Valid
@@ -742,22 +741,22 @@ suite "Waku rln relay":
let index2 = MembershipIndex(6)
let rlnConf1 = WakuRlnConfig(
- rlnRelayDynamic: false,
- rlnRelayCredIndex: some(index1),
- rlnRelayUserMessageLimit: 1,
- rlnEpochSizeSec: 1,
- rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_3"),
+ dynamic: false,
+ credIndex: some(index1),
+ userMessageLimit: 1,
+ epochSizeSec: 1,
+ treePath: genTempPath("rln_tree", "waku_rln_relay_3"),
)
let wakuRlnRelay1 = (await WakuRlnRelay.new(rlnConf1)).valueOr:
raiseAssert "failed to create waku rln relay: " & $error
let rlnConf2 = WakuRlnConfig(
- rlnRelayDynamic: false,
- rlnRelayCredIndex: some(index2),
- rlnRelayUserMessageLimit: 1,
- rlnEpochSizeSec: 1,
- rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_4"),
+ dynamic: false,
+ credIndex: some(index2),
+ userMessageLimit: 1,
+ epochSizeSec: 1,
+ treePath: genTempPath("rln_tree", "waku_rln_relay_4"),
)
let wakuRlnRelay2 = (await WakuRlnRelay.new(rlnConf2)).valueOr:
@@ -779,9 +778,9 @@ suite "Waku rln relay":
# validate messages
# validateMessage proc checks the validity of the message fields and adds it to the log (if valid)
let
- msgValidate1 = wakuRlnRelay1.validateMessageAndUpdateLog(wm1, some(time))
+ msgValidate1 = wakuRlnRelay1.validateMessageAndUpdateLog(wm1)
# since this message is from a different sender, it should be validated successfully
- msgValidate2 = wakuRlnRelay1.validateMessageAndUpdateLog(wm2, some(time))
+ msgValidate2 = wakuRlnRelay1.validateMessageAndUpdateLog(wm2)
check:
msgValidate1 == MessageValidationResult.Valid
@@ -894,11 +893,11 @@ suite "Waku rln relay":
proc runTestForEpochSizeSec(rlnEpochSizeSec: uint) {.async.} =
let wakuRlnConfig = WakuRlnConfig(
- rlnRelayDynamic: false,
- rlnRelayCredIndex: some(index),
- rlnRelayUserMessageLimit: 1,
- rlnEpochSizeSec: rlnEpochSizeSec,
- rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_4"),
+ dynamic: false,
+ credIndex: some(index),
+ userMessageLimit: 1,
+ epochSizeSec: rlnEpochSizeSec,
+ treePath: genTempPath("rln_tree", "waku_rln_relay_4"),
)
let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr:
diff --git a/tests/waku_rln_relay/test_wakunode_rln_relay.nim b/tests/waku_rln_relay/test_wakunode_rln_relay.nim
index 186343727..3ff6923e0 100644
--- a/tests/waku_rln_relay/test_wakunode_rln_relay.nim
+++ b/tests/waku_rln_relay/test_wakunode_rln_relay.nim
@@ -25,11 +25,11 @@ proc buildWakuRlnConfig(
let treePath = genTempPath("rln_tree", treeFilename)
# Off-chain
return WakuRlnConfig(
- rlnRelayDynamic: false,
- rlnRelayCredIndex: some(credIndex.uint),
- rlnRelayUserMessageLimit: userMessageLimit,
- rlnEpochSizeSec: epochSizeSec,
- rlnRelayTreePath: treePath,
+ dynamic: false,
+ credIndex: some(credIndex.uint),
+ userMessageLimit: userMessageLimit,
+ epochSizeSec: epochSizeSec,
+ treePath: treePath,
)
proc waitForNullifierLog(node: WakuNode, expectedLen: int): Future[bool] {.async.} =
@@ -58,15 +58,16 @@ procSuite "WakuNode - RLN relay":
# set up three nodes
# node1
- await node1.mountRelay(@[DefaultRelayShard])
+ (await node1.mountRelay(@[DefaultRelayShard])).isOkOr:
+ assert false, "Failed to mount relay"
# mount rlnrelay in off-chain mode
let wakuRlnConfig1 = WakuRlnConfig(
- rlnRelayDynamic: false,
- rlnRelayCredIndex: some(1.uint),
- rlnRelayUserMessageLimit: 1,
- rlnEpochSizeSec: 1,
- rlnRelayTreePath: genTempPath("rln_tree", "wakunode"),
+ dynamic: false,
+ credIndex: some(1.uint),
+ userMessageLimit: 1,
+ epochSizeSec: 1,
+ treePath: genTempPath("rln_tree", "wakunode"),
)
await node1.mountRlnRelay(wakuRlnConfig1)
@@ -74,14 +75,15 @@ procSuite "WakuNode - RLN relay":
await node1.start()
# node 2
- await node2.mountRelay(@[DefaultRelayShard])
+ (await node2.mountRelay(@[DefaultRelayShard])).isOkOr:
+ assert false, "Failed to mount relay"
# mount rlnrelay in off-chain mode
let wakuRlnConfig2 = WakuRlnConfig(
- rlnRelayDynamic: false,
- rlnRelayCredIndex: some(2.uint),
- rlnRelayUserMessageLimit: 1,
- rlnEpochSizeSec: 1,
- rlnRelayTreePath: genTempPath("rln_tree", "wakunode_2"),
+ dynamic: false,
+ credIndex: some(2.uint),
+ userMessageLimit: 1,
+ epochSizeSec: 1,
+ treePath: genTempPath("rln_tree", "wakunode_2"),
)
await node2.mountRlnRelay(wakuRlnConfig2)
@@ -89,14 +91,15 @@ procSuite "WakuNode - RLN relay":
await node2.start()
# node 3
- await node3.mountRelay(@[DefaultRelayShard])
+ (await node3.mountRelay(@[DefaultRelayShard])).isOkOr:
+ assert false, "Failed to mount relay"
let wakuRlnConfig3 = WakuRlnConfig(
- rlnRelayDynamic: false,
- rlnRelayCredIndex: some(3.uint),
- rlnRelayUserMessageLimit: 1,
- rlnEpochSizeSec: 1,
- rlnRelayTreePath: genTempPath("rln_tree", "wakunode_3"),
+ dynamic: false,
+ credIndex: some(3.uint),
+ userMessageLimit: 1,
+ epochSizeSec: 1,
+ treePath: genTempPath("rln_tree", "wakunode_3"),
)
await node3.mountRlnRelay(wakuRlnConfig3)
@@ -115,8 +118,14 @@ procSuite "WakuNode - RLN relay":
if topic == DefaultPubsubTopic:
completionFut.complete(true)
- # mount the relay handler
- node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler))
+ ## The following unsubscription is necessary to remove the default relay handler, which is
+ ## added when mountRelay is called.
+ node3.unsubscribe((kind: PubsubUnsub, topic: DefaultPubsubTopic)).isOkOr:
+ assert false, "Failed to unsubscribe from topic: " & $error
+
+ ## Subscribe to the relay topic to add the custom relay handler defined above
+ node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)).isOkOr:
+ assert false, "Failed to subscribe to pubsub topic: " & $error
await sleepAsync(2000.millis)
# prepare the message payload
@@ -126,6 +135,11 @@ procSuite "WakuNode - RLN relay":
var message = WakuMessage(payload: @payload, contentTopic: contentTopic)
doAssert(node1.wakuRlnRelay.unsafeAppendRLNProof(message, epochTime()).isOk())
+ debug "Nodes participating in the test",
+ node1 = shortLog(node1.switch.peerInfo.peerId),
+ node2 = shortLog(node2.switch.peerInfo.peerId),
+ node3 = shortLog(node3.switch.peerInfo.peerId)
+
## node1 publishes a message with a rate limit proof, the message is then relayed to node2 which in turn
## verifies the rate limit proof of the message and relays the message to node3
## verification at node2 occurs inside a topic validator which is installed as part of the waku-rln-relay mount proc
@@ -160,11 +174,11 @@ procSuite "WakuNode - RLN relay":
# mount rlnrelay in off-chain mode
for index, node in nodes:
let wakuRlnConfig = WakuRlnConfig(
- rlnRelayDynamic: false,
- rlnRelayCredIndex: some(index.uint + 1),
- rlnRelayUserMessageLimit: 1,
- rlnEpochSizeSec: 1,
- rlnRelayTreePath: genTempPath("rln_tree", "wakunode_" & $(index + 1)),
+ dynamic: false,
+ credIndex: some(index.uint + 1),
+ userMessageLimit: 1,
+ epochSizeSec: 1,
+ treePath: genTempPath("rln_tree", "wakunode_" & $(index + 1)),
)
await node.mountRlnRelay(wakuRlnConfig)
@@ -187,9 +201,18 @@ procSuite "WakuNode - RLN relay":
elif topic == $shards[1]:
rxMessagesTopic2 = rxMessagesTopic2 + 1
+ ## This unsubscription is necessary to remove the default relay handler, which is
+ ## added when mountRelay is called.
+ nodes[2].unsubscribe((kind: PubsubUnsub, topic: $shards[0])).isOkOr:
+ assert false, "Failed to unsubscribe to pubsub topic: " & $error
+ nodes[2].unsubscribe((kind: PubsubUnsub, topic: $shards[1])).isOkOr:
+ assert false, "Failed to unsubscribe to pubsub topic: " & $error
+
# mount the relay handlers
- nodes[2].subscribe((kind: PubsubSub, topic: $shards[0]), some(relayHandler))
- nodes[2].subscribe((kind: PubsubSub, topic: $shards[1]), some(relayHandler))
+ nodes[2].subscribe((kind: PubsubSub, topic: $shards[0]), some(relayHandler)).isOkOr:
+ assert false, "Failed to subscribe to pubsub topic: " & $error
+ nodes[2].subscribe((kind: PubsubSub, topic: $shards[1]), some(relayHandler)).isOkOr:
+ assert false, "Failed to subscribe to pubsub topic: " & $error
await sleepAsync(1000.millis)
# generate some messages with rln proofs first. generating
@@ -250,15 +273,16 @@ procSuite "WakuNode - RLN relay":
# set up three nodes
# node1
- await node1.mountRelay(@[DefaultRelayShard])
+ (await node1.mountRelay(@[DefaultRelayShard])).isOkOr:
+ assert false, "Failed to mount relay"
# mount rlnrelay in off-chain mode
let wakuRlnConfig1 = WakuRlnConfig(
- rlnRelayDynamic: false,
- rlnRelayCredIndex: some(1.uint),
- rlnRelayUserMessageLimit: 1,
- rlnEpochSizeSec: 1,
- rlnRelayTreePath: genTempPath("rln_tree", "wakunode_4"),
+ dynamic: false,
+ credIndex: some(1.uint),
+ userMessageLimit: 1,
+ epochSizeSec: 1,
+ treePath: genTempPath("rln_tree", "wakunode_4"),
)
await node1.mountRlnRelay(wakuRlnConfig1)
@@ -266,14 +290,15 @@ procSuite "WakuNode - RLN relay":
await node1.start()
# node 2
- await node2.mountRelay(@[DefaultRelayShard])
+ (await node2.mountRelay(@[DefaultRelayShard])).isOkOr:
+ assert false, "Failed to mount relay"
# mount rlnrelay in off-chain mode
let wakuRlnConfig2 = WakuRlnConfig(
- rlnRelayDynamic: false,
- rlnRelayCredIndex: some(2.uint),
- rlnRelayUserMessageLimit: 1,
- rlnEpochSizeSec: 1,
- rlnRelayTreePath: genTempPath("rln_tree", "wakunode_5"),
+ dynamic: false,
+ credIndex: some(2.uint),
+ userMessageLimit: 1,
+ epochSizeSec: 1,
+ treePath: genTempPath("rln_tree", "wakunode_5"),
)
await node2.mountRlnRelay(wakuRlnConfig2)
@@ -281,14 +306,15 @@ procSuite "WakuNode - RLN relay":
await node2.start()
# node 3
- await node3.mountRelay(@[DefaultRelayShard])
+ (await node3.mountRelay(@[DefaultRelayShard])).isOkOr:
+ assert false, "Failed to mount relay"
let wakuRlnConfig3 = WakuRlnConfig(
- rlnRelayDynamic: false,
- rlnRelayCredIndex: some(3.uint),
- rlnRelayUserMessageLimit: 1,
- rlnEpochSizeSec: 1,
- rlnRelayTreePath: genTempPath("rln_tree", "wakunode_6"),
+ dynamic: false,
+ credIndex: some(3.uint),
+ userMessageLimit: 1,
+ epochSizeSec: 1,
+ treePath: genTempPath("rln_tree", "wakunode_6"),
)
await node3.mountRlnRelay(wakuRlnConfig3)
@@ -307,8 +333,14 @@ procSuite "WakuNode - RLN relay":
if topic == DefaultPubsubTopic:
completionFut.complete(true)
+ ## The following unsubscription is necessary to remove the default relay handler, which is
+ ## added when mountRelay is called.
+ node3.unsubscribe((kind: PubsubUnsub, topic: DefaultPubsubTopic)).isOkOr:
+ assert false, "Failed to unsubscribe to pubsub topic: " & $error
+
# mount the relay handler
- node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler))
+ node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)).isOkOr:
+ assert false, "Failed to subscribe to pubsub topic: " & $error
await sleepAsync(2000.millis)
# prepare the message payload
@@ -366,15 +398,16 @@ procSuite "WakuNode - RLN relay":
# set up three nodes
# node1
- await node1.mountRelay(@[DefaultRelayShard])
+ (await node1.mountRelay(@[DefaultRelayShard])).isOkOr:
+ assert false, "Failed to mount relay"
# mount rlnrelay in off-chain mode
let wakuRlnConfig1 = WakuRlnConfig(
- rlnRelayDynamic: false,
- rlnRelayCredIndex: some(1.uint),
- rlnRelayUserMessageLimit: 1,
- rlnEpochSizeSec: 1,
- rlnRelayTreePath: genTempPath("rln_tree", "wakunode_7"),
+ dynamic: false,
+ credIndex: some(1.uint),
+ userMessageLimit: 1,
+ epochSizeSec: 1,
+ treePath: genTempPath("rln_tree", "wakunode_7"),
)
await node1.mountRlnRelay(wakuRlnConfig1)
@@ -382,30 +415,32 @@ procSuite "WakuNode - RLN relay":
await node1.start()
# node 2
- await node2.mountRelay(@[DefaultRelayShard])
+ (await node2.mountRelay(@[DefaultRelayShard])).isOkOr:
+ assert false, "Failed to mount relay"
# mount rlnrelay in off-chain mode
let wakuRlnConfig2 = WakuRlnConfig(
- rlnRelayDynamic: false,
- rlnRelayCredIndex: some(2.uint),
- rlnRelayUserMessageLimit: 1,
- rlnEpochSizeSec: 1,
- rlnRelayTreePath: genTempPath("rln_tree", "wakunode_8"),
+ dynamic: false,
+ credIndex: some(2.uint),
+ userMessageLimit: 1,
+ epochSizeSec: 1,
+ treePath: genTempPath("rln_tree", "wakunode_8"),
)
await node2.mountRlnRelay(wakuRlnConfig2)
await node2.start()
# node 3
- await node3.mountRelay(@[DefaultRelayShard])
+ (await node3.mountRelay(@[DefaultRelayShard])).isOkOr:
+ assert false, "Failed to mount relay"
# mount rlnrelay in off-chain mode
let wakuRlnConfig3 = WakuRlnConfig(
- rlnRelayDynamic: false,
- rlnRelayCredIndex: some(3.uint),
- rlnRelayUserMessageLimit: 1,
- rlnEpochSizeSec: 1,
- rlnRelayTreePath: genTempPath("rln_tree", "wakunode_9"),
+ dynamic: false,
+ credIndex: some(3.uint),
+ userMessageLimit: 1,
+ epochSizeSec: 1,
+ treePath: genTempPath("rln_tree", "wakunode_9"),
)
await node3.mountRlnRelay(wakuRlnConfig3)
@@ -451,13 +486,19 @@ procSuite "WakuNode - RLN relay":
completionFut1.complete(true)
if msg == wm2:
completionFut2.complete(true)
- if msg == wm3:
+ if msg.payload == wm3.payload:
completionFut3.complete(true)
- if msg == wm4:
+ if msg.payload == wm4.payload:
completionFut4.complete(true)
+ ## The following unsubscription is necessary to remove the default relay handler, which is
+ ## added when mountRelay is called.
+ node3.unsubscribe((kind: PubsubUnsub, topic: DefaultPubsubTopic)).isOkOr:
+ assert false, "Failed to unsubscribe to pubsub topic: " & $error
+
# mount the relay handler for node3
- node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler))
+ node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)).isOkOr:
+ assert false, "Failed to subscribe to pubsub topic: " & $error
await sleepAsync(2000.millis)
## node1 publishes and relays 4 messages to node2
@@ -486,7 +527,8 @@ procSuite "WakuNode - RLN relay":
await node2.stop()
await node3.stop()
- asyncTest "clearNullifierLog: should clear epochs > MaxEpochGap":
+ xasyncTest "clearNullifierLog: should clear epochs > MaxEpochGap":
+ ## This is skipped because is flaky and made CI randomly fail but is useful to run manually
# Given two nodes
let
contentTopic = ContentTopic("/waku/2/default-content/proto")
@@ -498,12 +540,15 @@ procSuite "WakuNode - RLN relay":
epochSizeSec: uint64 = 5 # This means rlnMaxEpochGap = 4
# Given both nodes mount relay and rlnrelay
- await node1.mountRelay(shardSeq)
+ (await node1.mountRelay(shardSeq)).isOkOr:
+ assert false, "Failed to mount relay"
let wakuRlnConfig1 = buildWakuRlnConfig(1, epochSizeSec, "wakunode_10")
- await node1.mountRlnRelay(wakuRlnConfig1)
+ (await node1.mountRlnRelay(wakuRlnConfig1)).isOkOr:
+ assert false, "Failed to mount rlnrelay"
# Mount rlnrelay in node2 in off-chain mode
- await node2.mountRelay(@[DefaultRelayShard])
+ (await node2.mountRelay(@[DefaultRelayShard])).isOkOr:
+ assert false, "Failed to mount relay"
let wakuRlnConfig2 = buildWakuRlnConfig(2, epochSizeSec, "wakunode_11")
await node2.mountRlnRelay(wakuRlnConfig2)
@@ -546,7 +591,8 @@ procSuite "WakuNode - RLN relay":
if msg == wm6:
completionFut6.complete(true)
- node2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler))
+ node2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)).isOkOr:
+ assert false, "Failed to subscribe to pubsub topic: " & $error
# Given all messages have an rln proof and are published by the node 1
let publishSleepDuration: Duration = 5000.millis
@@ -636,12 +682,14 @@ procSuite "WakuNode - RLN relay":
# Given both nodes mount relay and rlnrelay
# Mount rlnrelay in node1 in off-chain mode
- await node1.mountRelay(shardSeq)
+ (await node1.mountRelay(shardSeq)).isOkOr:
+ assert false, "Failed to mount relay"
let wakuRlnConfig1 = buildWakuRlnConfig(1, epochSizeSec, "wakunode_10")
await node1.mountRlnRelay(wakuRlnConfig1)
# Mount rlnrelay in node2 in off-chain mode
- await node2.mountRelay(@[DefaultRelayShard])
+ (await node2.mountRelay(@[DefaultRelayShard])).isOkOr:
+ assert false, "Failed to mount relay"
let wakuRlnConfig2 = buildWakuRlnConfig(2, epochSizeSec, "wakunode_11")
await node2.mountRlnRelay(wakuRlnConfig2)
diff --git a/tests/waku_rln_relay/utils.nim b/tests/waku_rln_relay/utils.nim
index 7dfeffe65..a4247ab44 100644
--- a/tests/waku_rln_relay/utils.nim
+++ b/tests/waku_rln_relay/utils.nim
@@ -1,4 +1,4 @@
-import web3, chronos, options, stint, stew/byteutils
+import web3, chronos, stew/byteutils
proc deployContract*(
web3: Web3, code: string, gasPrice = 0, contractInput = ""
diff --git a/tests/waku_rln_relay/utils_onchain.nim b/tests/waku_rln_relay/utils_onchain.nim
index 788d6742e..433f865c4 100644
--- a/tests/waku_rln_relay/utils_onchain.nim
+++ b/tests/waku_rln_relay/utils_onchain.nim
@@ -3,8 +3,9 @@
{.push raises: [].}
import
- std/[options, os, osproc, sequtils, deques, streams, strutils, tempfiles, strformat],
- stew/[results, byteutils],
+ std/[options, os, osproc, deques, streams, strutils, tempfiles, strformat],
+ results,
+ stew/byteutils,
testutils/unittests,
chronos,
chronicles,
@@ -249,7 +250,7 @@ proc stopAnvil*(runAnvil: Process) {.used.} =
error "Anvil daemon termination failed: ", err = getCurrentExceptionMsg()
proc setupOnchainGroupManager*(
- ethClientAddress: string = EthClient, amountEth: UInt256 = 10.u256
+ ethClientUrl: string = EthClient, amountEth: UInt256 = 10.u256
): Future[OnchainGroupManager] {.async.} =
let rlnInstanceRes =
createRlnInstance(tree_path = genTempPath("rln_tree", "group_manager_onchain"))
@@ -258,9 +259,9 @@ proc setupOnchainGroupManager*(
let rlnInstance = rlnInstanceRes.get()
- let contractAddress = await uploadRLNContract(ethClientAddress)
+ let contractAddress = await uploadRLNContract(ethClientUrl)
# connect to the eth client
- let web3 = await newWeb3(ethClientAddress)
+ let web3 = await newWeb3(ethClientUrl)
let accounts = await web3.provider.eth_accounts()
web3.defaultAccount = accounts[0]
@@ -274,7 +275,7 @@ proc setupOnchainGroupManager*(
)
let manager = OnchainGroupManager(
- ethClientUrl: ethClientAddress,
+ ethClientUrls: @[ethClientUrl],
ethContractAddress: $contractAddress,
chainId: CHAIN_ID,
ethPrivateKey: some($privateKey),
diff --git a/tests/waku_rln_relay/utils_static.nim b/tests/waku_rln_relay/utils_static.nim
index d2a781fcd..8f564beb1 100644
--- a/tests/waku_rln_relay/utils_static.nim
+++ b/tests/waku_rln_relay/utils_static.nim
@@ -5,6 +5,7 @@ import
stew/byteutils,
stew/shims/net as stewNet,
chronos,
+ chronicles,
libp2p/switch,
libp2p/protocols/pubsub/pubsub
@@ -24,10 +25,10 @@ proc setupStaticRln*(
) {.async.} =
await node.mountRlnRelay(
WakuRlnConfig(
- rlnRelayDynamic: false,
- rlnRelayCredIndex: some(identifier),
- rlnRelayTreePath: genTempPath("rln_tree", "wakunode_" & $identifier),
- rlnEpochSizeSec: 1,
+ dynamic: false,
+ credIndex: some(identifier),
+ treePath: genTempPath("rln_tree", "wakunode_" & $identifier),
+ epochSizeSec: 1,
)
)
@@ -45,7 +46,10 @@ proc subscribeCompletionHandler*(node: WakuNode, pubsubTopic: string): Future[bo
if topic == pubsubTopic:
completionFut.complete(true)
- node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(relayHandler))
+ node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(relayHandler)).isOkOr:
+ error "failed to subscribe to relay", topic = pubsubTopic, error = error
+ completionFut.complete(false)
+
return completionFut
proc sendRlnMessage*(
diff --git a/tests/waku_store/store_utils.nim b/tests/waku_store/store_utils.nim
index f652f24b6..779074d7e 100644
--- a/tests/waku_store/store_utils.nim
+++ b/tests/waku_store/store_utils.nim
@@ -1,10 +1,9 @@
{.used.}
-import std/options, chronos, chronicles, libp2p/crypto/crypto
+import std/options, chronos
import
- waku/[node/peer_manager, waku_core, waku_store, waku_store/client],
- ../testlib/[common, wakucore]
+ waku/[node/peer_manager, waku_store, waku_store/client], ../testlib/[common, wakucore]
proc newTestWakuStore*(
switch: Switch, handler: StoreQueryRequestHandler
diff --git a/tests/waku_store/test_client.nim b/tests/waku_store/test_client.nim
index 53e95b83e..38b07bdf4 100644
--- a/tests/waku_store/test_client.nim
+++ b/tests/waku_store/test_client.nim
@@ -1,10 +1,10 @@
{.used.}
-import std/options, testutils/unittests, chronos, chronicles, libp2p/crypto/crypto
+import std/options, testutils/unittests, chronos, libp2p/crypto/crypto
import
waku/[node/peer_manager, waku_core, waku_store, waku_store/client, common/paging],
- ../testlib/[common, wakucore, testasync, futures],
+ ../testlib/[wakucore, testasync, futures],
./store_utils
suite "Store Client":
diff --git a/tests/waku_store/test_waku_store.nim b/tests/waku_store/test_waku_store.nim
index b21c66be0..815b3ac7d 100644
--- a/tests/waku_store/test_waku_store.nim
+++ b/tests/waku_store/test_waku_store.nim
@@ -1,6 +1,6 @@
{.used.}
-import std/options, testutils/unittests, chronos, chronicles, libp2p/crypto/crypto
+import std/options, testutils/unittests, chronos, libp2p/crypto/crypto
import
waku/[
@@ -12,7 +12,7 @@ import
waku_store/client,
waku_store/common,
],
- ../testlib/[common, wakucore],
+ ../testlib/wakucore,
./store_utils
suite "Waku Store - query handler":
diff --git a/tests/waku_store/test_wakunode_store.nim b/tests/waku_store/test_wakunode_store.nim
index 1f48d18f2..1d5e4dcfd 100644
--- a/tests/waku_store/test_wakunode_store.nim
+++ b/tests/waku_store/test_wakunode_store.nim
@@ -18,10 +18,8 @@ import
common/paging,
waku_core,
waku_core/message/digest,
- waku_core/subscription,
node/peer_manager,
waku_archive,
- waku_archive/driver/sqlite_driver,
waku_filter_v2,
waku_filter_v2/client,
waku_store,
diff --git a/tests/waku_store_legacy/store_utils.nim b/tests/waku_store_legacy/store_utils.nim
index cd6236928..a70ca9376 100644
--- a/tests/waku_store_legacy/store_utils.nim
+++ b/tests/waku_store_legacy/store_utils.nim
@@ -1,6 +1,6 @@
{.used.}
-import std/options, chronos, chronicles, libp2p/crypto/crypto
+import std/options, chronos
import
waku/[node/peer_manager, waku_core, waku_store_legacy, waku_store_legacy/client],
diff --git a/tests/waku_store_legacy/test_client.nim b/tests/waku_store_legacy/test_client.nim
index 9e403dc21..2a8616375 100644
--- a/tests/waku_store_legacy/test_client.nim
+++ b/tests/waku_store_legacy/test_client.nim
@@ -1,6 +1,6 @@
{.used.}
-import std/options, testutils/unittests, chronos, chronicles, libp2p/crypto/crypto
+import std/options, testutils/unittests, chronos, libp2p/crypto/crypto
import
waku/[
@@ -10,7 +10,7 @@ import
waku_store_legacy/client,
common/paging,
],
- ../testlib/[common, wakucore, testasync, futures],
+ ../testlib/[wakucore, testasync, futures],
./store_utils
suite "Store Client":
diff --git a/tests/waku_store_legacy/test_rpc_codec.nim b/tests/waku_store_legacy/test_rpc_codec.nim
index dae738d01..6897bab41 100644
--- a/tests/waku_store_legacy/test_rpc_codec.nim
+++ b/tests/waku_store_legacy/test_rpc_codec.nim
@@ -9,7 +9,6 @@ import
waku_store_legacy/rpc,
waku_store_legacy/rpc_codec,
],
- ../testlib/common,
../testlib/wakucore
procSuite "Waku Store - RPC codec":
diff --git a/tests/waku_store_legacy/test_waku_store.nim b/tests/waku_store_legacy/test_waku_store.nim
index e5e38b208..b8dc835c8 100644
--- a/tests/waku_store_legacy/test_waku_store.nim
+++ b/tests/waku_store_legacy/test_waku_store.nim
@@ -1,6 +1,6 @@
{.used.}
-import std/options, testutils/unittests, chronos, chronicles, libp2p/crypto/crypto
+import testutils/unittests, chronos, libp2p/crypto/crypto
import
waku/[
@@ -10,7 +10,7 @@ import
waku_store_legacy,
waku_store_legacy/client,
],
- ../testlib/[common, wakucore],
+ ../testlib/wakucore,
./store_utils
suite "Waku Store - query handler legacy":
diff --git a/tests/waku_store_legacy/test_wakunode_store.nim b/tests/waku_store_legacy/test_wakunode_store.nim
index 496ab753e..549033e98 100644
--- a/tests/waku_store_legacy/test_wakunode_store.nim
+++ b/tests/waku_store_legacy/test_wakunode_store.nim
@@ -3,7 +3,6 @@
import
std/net,
testutils/unittests,
- chronicles,
chronos,
libp2p/crypto/crypto,
libp2p/peerid,
diff --git a/tests/waku_store_sync/sync_utils.nim b/tests/waku_store_sync/sync_utils.nim
index 20a6bdfb1..e7fd82b57 100644
--- a/tests/waku_store_sync/sync_utils.nim
+++ b/tests/waku_store_sync/sync_utils.nim
@@ -23,7 +23,7 @@ proc randomHash*(rng: var Rand): WakuMessageHash =
proc newTestWakuRecon*(
switch: Switch,
idsRx: AsyncQueue[SyncID],
- wantsTx: AsyncQueue[(PeerId, Fingerprint)],
+ wantsTx: AsyncQueue[PeerId],
needsTx: AsyncQueue[(PeerId, Fingerprint)],
cluster: uint16 = 1,
shards: seq[uint16] = @[0, 1, 2, 3, 4, 5, 6, 7],
@@ -51,7 +51,7 @@ proc newTestWakuRecon*(
proc newTestWakuTransfer*(
switch: Switch,
idsTx: AsyncQueue[SyncID],
- wantsRx: AsyncQueue[(PeerId, Fingerprint)],
+ wantsRx: AsyncQueue[PeerId],
needsRx: AsyncQueue[(PeerId, Fingerprint)],
): SyncTransfer =
let peerManager = PeerManager.new(switch)
diff --git a/tests/waku_store_sync/test_protocol.nim b/tests/waku_store_sync/test_protocol.nim
index f507ad95b..efdd6a885 100644
--- a/tests/waku_store_sync/test_protocol.nim
+++ b/tests/waku_store_sync/test_protocol.nim
@@ -1,11 +1,7 @@
{.used.}
import
- std/[options, sets, random, math],
- testutils/unittests,
- chronos,
- libp2p/crypto/crypto,
- stew/byteutils
+ std/[options, sets, random, math], testutils/unittests, chronos, libp2p/crypto/crypto
import
../../waku/[
@@ -31,7 +27,7 @@ suite "Waku Sync: reconciliation":
var
idsChannel {.threadvar.}: AsyncQueue[SyncID]
- localWants {.threadvar.}: AsyncQueue[(PeerId, WakuMessageHash)]
+ localWants {.threadvar.}: AsyncQueue[PeerId]
remoteNeeds {.threadvar.}: AsyncQueue[(PeerId, WakuMessageHash)]
var server {.threadvar.}: SyncReconciliation
@@ -47,7 +43,7 @@ suite "Waku Sync: reconciliation":
await allFutures(serverSwitch.start(), clientSwitch.start())
idsChannel = newAsyncQueue[SyncID]()
- localWants = newAsyncQueue[(PeerId, WakuMessageHash)]()
+ localWants = newAsyncQueue[PeerId]()
remoteNeeds = newAsyncQueue[(PeerId, WakuMessageHash)]()
server = await newTestWakuRecon(serverSwitch, idsChannel, localWants, remoteNeeds)
@@ -65,7 +61,6 @@ suite "Waku Sync: reconciliation":
asyncTest "sync 2 nodes both empty":
check:
idsChannel.len == 0
- localWants.len == 0
remoteNeeds.len == 0
let res = await client.storeSynchronization(some(serverPeerInfo))
@@ -73,7 +68,6 @@ suite "Waku Sync: reconciliation":
check:
idsChannel.len == 0
- localWants.len == 0
remoteNeeds.len == 0
asyncTest "sync 2 nodes empty client full server":
@@ -145,8 +139,6 @@ suite "Waku Sync: reconciliation":
check:
remoteNeeds.contains((serverPeerInfo.peerId, hash3)) == false
remoteNeeds.contains((clientPeerInfo.peerId, hash2)) == false
- localWants.contains((clientPeerInfo.peerId, hash3)) == false
- localWants.contains((serverPeerInfo.peerId, hash2)) == false
var syncRes = await client.storeSynchronization(some(serverPeerInfo))
assert syncRes.isOk(), $syncRes.error
@@ -154,8 +146,6 @@ suite "Waku Sync: reconciliation":
check:
remoteNeeds.contains((serverPeerInfo.peerId, hash3)) == true
remoteNeeds.contains((clientPeerInfo.peerId, hash2)) == true
- localWants.contains((clientPeerInfo.peerId, hash3)) == true
- localWants.contains((serverPeerInfo.peerId, hash2)) == true
asyncTest "sync 2 nodes different shards":
let
@@ -174,8 +164,6 @@ suite "Waku Sync: reconciliation":
check:
remoteNeeds.contains((serverPeerInfo.peerId, hash3)) == false
remoteNeeds.contains((clientPeerInfo.peerId, hash2)) == false
- localWants.contains((clientPeerInfo.peerId, hash3)) == false
- localWants.contains((serverPeerInfo.peerId, hash2)) == false
server = await newTestWakuRecon(
serverSwitch, idsChannel, localWants, remoteNeeds, shards = @[0.uint16, 1, 2, 3]
@@ -189,7 +177,6 @@ suite "Waku Sync: reconciliation":
check:
remoteNeeds.len == 0
- localWants.len == 0
asyncTest "sync 2 nodes same hashes":
let
@@ -204,14 +191,12 @@ suite "Waku Sync: reconciliation":
client.messageIngress(hash2, msg2)
check:
- localWants.len == 0
remoteNeeds.len == 0
let res = await client.storeSynchronization(some(serverPeerInfo))
assert res.isOk(), $res.error
check:
- localWants.len == 0
remoteNeeds.len == 0
asyncTest "sync 2 nodes 100K msgs 1 diff":
@@ -240,14 +225,12 @@ suite "Waku Sync: reconciliation":
timestamp += Timestamp(part)
check:
- localWants.contains((serverPeerInfo.peerId, WakuMessageHash(diff))) == false
remoteNeeds.contains((clientPeerInfo.peerId, WakuMessageHash(diff))) == false
let res = await client.storeSynchronization(some(serverPeerInfo))
assert res.isOk(), $res.error
check:
- localWants.contains((serverPeerInfo.peerId, WakuMessageHash(diff))) == true
remoteNeeds.contains((clientPeerInfo.peerId, WakuMessageHash(diff))) == true
asyncTest "sync 2 nodes 10K msgs 1K diffs":
@@ -290,7 +273,6 @@ suite "Waku Sync: reconciliation":
continue
check:
- localWants.len == 0
remoteNeeds.len == 0
let res = await client.storeSynchronization(some(serverPeerInfo))
@@ -298,7 +280,6 @@ suite "Waku Sync: reconciliation":
# timimg issue make it hard to match exact numbers
check:
- localWants.len > 900
remoteNeeds.len > 900
suite "Waku Sync: transfer":
@@ -314,10 +295,10 @@ suite "Waku Sync: transfer":
var
serverIds {.threadvar.}: AsyncQueue[SyncID]
- serverLocalWants {.threadvar.}: AsyncQueue[(PeerId, WakuMessageHash)]
+ serverLocalWants {.threadvar.}: AsyncQueue[PeerId]
serverRemoteNeeds {.threadvar.}: AsyncQueue[(PeerId, WakuMessageHash)]
clientIds {.threadvar.}: AsyncQueue[SyncID]
- clientLocalWants {.threadvar.}: AsyncQueue[(PeerId, WakuMessageHash)]
+ clientLocalWants {.threadvar.}: AsyncQueue[PeerId]
clientRemoteNeeds {.threadvar.}: AsyncQueue[(PeerId, WakuMessageHash)]
var
@@ -345,7 +326,7 @@ suite "Waku Sync: transfer":
clientPeerManager = PeerManager.new(clientSwitch)
serverIds = newAsyncQueue[SyncID]()
- serverLocalWants = newAsyncQueue[(PeerId, WakuMessageHash)]()
+ serverLocalWants = newAsyncQueue[PeerId]()
serverRemoteNeeds = newAsyncQueue[(PeerId, WakuMessageHash)]()
server = SyncTransfer.new(
@@ -357,7 +338,7 @@ suite "Waku Sync: transfer":
)
clientIds = newAsyncQueue[SyncID]()
- clientLocalWants = newAsyncQueue[(PeerId, WakuMessageHash)]()
+ clientLocalWants = newAsyncQueue[PeerId]()
clientRemoteNeeds = newAsyncQueue[(PeerId, WakuMessageHash)]()
client = SyncTransfer.new(
@@ -393,8 +374,8 @@ suite "Waku Sync: transfer":
serverDriver = serverDriver.put(DefaultPubsubTopic, msgs)
- # add server info and msg hash to client want channel
- let want = (serverPeerInfo.peerId, hash)
+ # add server info to client want channel
+ let want = serverPeerInfo.peerId
await clientLocalWants.put(want)
# add client info and msg hash to server need channel
diff --git a/tests/waku_store_sync/test_storage.nim b/tests/waku_store_sync/test_storage.nim
index 034eb260e..9e9a80b29 100644
--- a/tests/waku_store_sync/test_storage.nim
+++ b/tests/waku_store_sync/test_storage.nim
@@ -4,7 +4,6 @@ import std/[options, random], testutils/unittests, chronos
import
../../waku/waku_core,
- ../../waku/waku_core/message/digest,
../../waku/waku_store_sync/common,
../../waku/waku_store_sync/storage/seq_storage,
./sync_utils
diff --git a/tests/wakunode2/test_app.nim b/tests/wakunode2/test_app.nim
index 73ffc8f93..2ee933e3f 100644
--- a/tests/wakunode2/test_app.nim
+++ b/tests/wakunode2/test_app.nim
@@ -9,15 +9,14 @@ import
libp2p/crypto/secp,
libp2p/multiaddress,
libp2p/switch
-import
- ../testlib/common, ../testlib/wakucore, ../testlib/wakunode, waku/node/waku_metrics
+import ../testlib/wakucore, ../testlib/wakunode
-include waku/factory/waku
+include waku/factory/waku, waku/common/enr/typed_record
suite "Wakunode2 - Waku":
test "compilation version should be reported":
## Given
- var conf = defaultTestWakuNodeConf()
+ let conf = defaultTestWakuConf()
let waku = Waku.new(conf).valueOr:
raiseAssert error
@@ -32,7 +31,7 @@ suite "Wakunode2 - Waku":
suite "Wakunode2 - Waku initialization":
test "peer persistence setup should be successfully mounted":
## Given
- var conf = defaultTestWakuNodeConf()
+ var conf = defaultTestWakuConf()
conf.peerPersistence = true
let waku = Waku.new(conf).valueOr:
@@ -43,7 +42,7 @@ suite "Wakunode2 - Waku initialization":
test "node setup is successful with default configuration":
## Given
- var conf = defaultTestWakuNodeConf()
+ var conf = defaultTestWakuConf()
## When
var waku = Waku.new(conf).valueOr:
@@ -52,9 +51,6 @@ suite "Wakunode2 - Waku initialization":
(waitFor startWaku(addr waku)).isOkOr:
raiseAssert error
- waku.metricsServer = waku_metrics.startMetricsServerAndLogging(conf).valueOr:
- raiseAssert error
-
## Then
let node = waku.node
check:
@@ -69,8 +65,8 @@ suite "Wakunode2 - Waku initialization":
test "app properly handles dynamic port configuration":
## Given
- var conf = defaultTestWakuNodeConf()
- conf.tcpPort = Port(0)
+ var conf = defaultTestWakuConf()
+ conf.networkConf.p2pTcpPort = Port(0)
## When
var waku = Waku.new(conf).valueOr:
@@ -82,9 +78,12 @@ suite "Wakunode2 - Waku initialization":
## Then
let
node = waku.node
- typedNodeEnr = node.enr.toTypedRecord()
+ typedNodeEnr = node.enr.toTyped()
assert typedNodeEnr.isOk(), $typedNodeEnr.error
+ let tcpPort = typedNodeEnr.value.tcp()
+ assert tcpPort.isSome()
+ check tcpPort.get() != 0
check:
# Waku started properly
diff --git a/tests/wakunode_rest/test_rest_admin.nim b/tests/wakunode_rest/test_rest_admin.nim
index fdb0cbc41..a3546f1f8 100644
--- a/tests/wakunode_rest/test_rest_admin.nim
+++ b/tests/wakunode_rest/test_rest_admin.nim
@@ -2,10 +2,10 @@
import
std/[sequtils, strformat, net],
- stew/shims/net,
testutils/unittests,
presto,
presto/client as presto_client,
+ presto /../ tests/helpers,
libp2p/crypto/crypto
import
@@ -43,10 +43,11 @@ suite "Waku v2 Rest API - Admin":
node3 = newTestWakuNode(generateSecp256k1Key(), getPrimaryIPAddr(), Port(60604))
await allFutures(node1.start(), node2.start(), node3.start())
+ let shards = @[RelayShard(clusterId: 1, shardId: 0)]
await allFutures(
- node1.mountRelay(),
- node2.mountRelay(),
- node3.mountRelay(),
+ node1.mountRelay(shards = shards),
+ node2.mountRelay(shards = shards),
+ node3.mountRelay(shards = shards),
node3.mountPeerExchange(),
)
@@ -119,7 +120,9 @@ suite "Waku v2 Rest API - Admin":
check:
getRes.status == 200
$getRes.contentType == $MIMETYPE_JSON
- getRes.data.len() == 0
+ getRes.data.len() == 1
+ getRes.data[0].multiaddr == nonExistentPeer
+ getRes.data[0].connected == CannotConnect
asyncTest "Get filter data":
await allFutures(
@@ -203,3 +206,96 @@ suite "Waku v2 Rest API - Admin":
getRes.data.anyIt(it.origin == Discv5)
# Check peer 3
getRes.data.anyIt(it.origin == PeerExchange)
+
+ asyncTest "get peers by id":
+ # Connect to nodes 2 and 3 using the Admin API
+ let postRes = await client.postPeers(
+ @[constructMultiaddrStr(peerInfo2), constructMultiaddrStr(peerInfo3)]
+ )
+
+ check:
+ postRes.status == 200
+
+ let getRes = await client.getPeerById($peerInfo2.peerId)
+
+ check:
+ getRes.status == 200
+ $getRes.contentType == $MIMETYPE_JSON
+ getRes.data.protocols.find(WakuRelayCodec) >= 0
+ getRes.data.multiaddr == constructMultiaddrStr(peerInfo2)
+
+ ## nim-presto library's RestClient does not support text error case decode if
+ ## the RestResponse expects a JSON with complex type
+ # let getRes2 = await client.getPeerById("bad peer id")
+ let getRes2 = await httpClient(
+ restServer.httpServer.address, MethodGet, "/admin/v1/peer/bad+peer+id", ""
+ )
+ check:
+ getRes2.status == 400
+ getRes2.data == "Invalid argument:peerid: incorrect PeerId string"
+
+ asyncTest "get connected peers":
+ # Connect to nodes 2 and 3 using the Admin API
+ let postRes = await client.postPeers(
+ @[constructMultiaddrStr(peerInfo2), constructMultiaddrStr(peerInfo3)]
+ )
+
+ check:
+ postRes.status == 200
+
+ let getRes = await client.getConnectedPeers()
+
+ check:
+ getRes.status == 200
+ $getRes.contentType == $MIMETYPE_JSON
+ getRes.data.len() == 2
+ # Check peer 2
+ getRes.data.anyIt(it.multiaddr == constructMultiaddrStr(peerInfo2))
+ # Check peer 3
+ getRes.data.anyIt(it.multiaddr == constructMultiaddrStr(peerInfo3))
+
+ # Seems shard info is not available in the peer manager
+ # let getRes2 = await client.getConnectedPeersByShard(0)
+ # check:
+ # getRes2.status == 200
+ # $getRes2.contentType == $MIMETYPE_JSON
+ # getRes2.data.len() == 2
+
+ let getRes3 = await client.getConnectedPeersByShard(99)
+ check:
+ getRes3.status == 200
+ $getRes3.contentType == $MIMETYPE_JSON
+ getRes3.data.len() == 0
+
+ asyncTest "get relay peers":
+ # Connect to nodes 2 and 3 using the Admin API
+ let postRes = await client.postPeers(
+ @[constructMultiaddrStr(peerInfo2), constructMultiaddrStr(peerInfo3)]
+ )
+
+ check:
+ postRes.status == 200
+
+ let getRes = await client.getRelayPeers()
+
+ check:
+ getRes.status == 200
+ $getRes.contentType == $MIMETYPE_JSON
+ require getRes.data.len() == 1 # Check peer 2
+ check getRes.data[0].peers.anyIt(it.multiaddr == constructMultiaddrStr(peerInfo2))
+ # Check peer 2
+ check getRes.data[0].peers.anyIt(it.multiaddr == constructMultiaddrStr(peerInfo3))
+ # Check peer 3
+
+ # Todo: investigate why the test setup missing remote peer's shard info
+ # let getRes2 = await client.getRelayPeersByShard(0)
+ # check:
+ # getRes2.status == 200
+ # $getRes2.contentType == $MIMETYPE_JSON
+ # getRes2.data.peers.len() == 2
+
+ let getRes3 = await client.getRelayPeersByShard(99)
+ check:
+ getRes3.status == 200
+ $getRes3.contentType == $MIMETYPE_JSON
+ getRes3.data.peers.len() == 0
diff --git a/tests/wakunode_rest/test_rest_cors.nim b/tests/wakunode_rest/test_rest_cors.nim
index 49b05df16..7d29711b1 100644
--- a/tests/wakunode_rest/test_rest_cors.nim
+++ b/tests/wakunode_rest/test_rest_cors.nim
@@ -13,10 +13,7 @@ import
waku_node,
node/waku_node as waku_node2,
waku_api/rest/server,
- waku_api/rest/client,
- waku_api/rest/responses,
waku_api/rest/debug/handlers as debug_api,
- waku_api/rest/debug/client as debug_api_client,
],
../testlib/common,
../testlib/wakucore,
@@ -105,7 +102,8 @@ suite "Waku v2 REST API CORS Handling":
# Given
let node = testWakuNode()
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
var restPort = Port(0)
let restAddress = parseIpAddress("0.0.0.0")
@@ -158,7 +156,8 @@ suite "Waku v2 REST API CORS Handling":
# Given
let node = testWakuNode()
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
var restPort = Port(0)
let restAddress = parseIpAddress("0.0.0.0")
@@ -214,7 +213,8 @@ suite "Waku v2 REST API CORS Handling":
# Given
let node = testWakuNode()
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
var restPort = Port(0)
let restAddress = parseIpAddress("0.0.0.0")
@@ -261,7 +261,8 @@ suite "Waku v2 REST API CORS Handling":
# Given
let node = testWakuNode()
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
var restPort = Port(0)
let restAddress = parseIpAddress("0.0.0.0")
diff --git a/tests/wakunode_rest/test_rest_debug.nim b/tests/wakunode_rest/test_rest_debug.nim
index f4e66eb20..3129b3544 100644
--- a/tests/wakunode_rest/test_rest_debug.nim
+++ b/tests/wakunode_rest/test_rest_debug.nim
@@ -37,7 +37,8 @@ suite "Waku v2 REST API - Debug":
# Given
let node = testWakuNode()
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
var restPort = Port(0)
let restAddress = parseIpAddress("0.0.0.0")
@@ -66,7 +67,8 @@ suite "Waku v2 REST API - Debug":
# Given
let node = testWakuNode()
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
var restPort = Port(0)
let restAddress = parseIpAddress("0.0.0.0")
diff --git a/tests/wakunode_rest/test_rest_debug_serdes.nim b/tests/wakunode_rest/test_rest_debug_serdes.nim
index bf007b8d2..13b791dc9 100644
--- a/tests/wakunode_rest/test_rest_debug_serdes.nim
+++ b/tests/wakunode_rest/test_rest_debug_serdes.nim
@@ -1,6 +1,6 @@
{.used.}
-import stew/results, stew/byteutils, testutils/unittests, json_serialization
+import results, stew/byteutils, testutils/unittests, json_serialization
import waku/waku_api/rest/serdes, waku/waku_api/rest/debug/types
suite "Waku v2 REST API - Debug - serialization":
diff --git a/tests/wakunode_rest/test_rest_filter.nim b/tests/wakunode_rest/test_rest_filter.nim
index 60252f92a..556b6b52e 100644
--- a/tests/wakunode_rest/test_rest_filter.nim
+++ b/tests/wakunode_rest/test_rest_filter.nim
@@ -1,7 +1,6 @@
{.used.}
import
- std/os,
chronos/timer,
stew/byteutils,
stew/shims/net,
@@ -55,7 +54,9 @@ proc init(T: type RestFilterTest): Future[T] {.async.} =
await allFutures(testSetup.serviceNode.start(), testSetup.subscriberNode.start())
- await testSetup.serviceNode.mountRelay()
+ (await testSetup.serviceNode.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay: " & $error
+
await testSetup.serviceNode.mountFilter(messageCacheTTL = 1.seconds)
await testSetup.subscriberNode.mountFilterClient()
@@ -279,7 +280,8 @@ suite "Waku v2 Rest API - Filter V2":
subPeerId = restFilterTest.subscriberNode.peerInfo.toRemotePeerInfo().peerId
restFilterTest.messageCache.pubsubSubscribe(DefaultPubsubTopic)
- restFilterTest.serviceNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic))
+ restFilterTest.serviceNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr:
+ assert false, "Failed to subscribe to topic: " & $error
# When
var requestBody = FilterSubscribeRequest(
@@ -324,7 +326,8 @@ suite "Waku v2 Rest API - Filter V2":
# setup filter service and client node
let restFilterTest = await RestFilterTest.init()
let subPeerId = restFilterTest.subscriberNode.peerInfo.toRemotePeerInfo().peerId
- restFilterTest.serviceNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic))
+ restFilterTest.serviceNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr:
+ assert false, "Failed to subscribe to topic: " & $error
let requestBody = FilterSubscribeRequest(
requestId: "1001",
@@ -395,7 +398,8 @@ suite "Waku v2 Rest API - Filter V2":
# setup filter service and client node
let restFilterTest = await RestFilterTest.init()
let subPeerId = restFilterTest.subscriberNode.peerInfo.toRemotePeerInfo().peerId
- restFilterTest.serviceNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic))
+ restFilterTest.serviceNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr:
+ assert false, "Failed to subscribe to topic: " & $error
let requestBody = FilterSubscribeRequest(
requestId: "1001",
diff --git a/tests/wakunode_rest/test_rest_health.nim b/tests/wakunode_rest/test_rest_health.nim
index f3b27906e..3c7c94e87 100644
--- a/tests/wakunode_rest/test_rest_health.nim
+++ b/tests/wakunode_rest/test_rest_health.nim
@@ -23,7 +23,6 @@ import
node/health_monitor,
],
../testlib/common,
- ../testlib/testutils,
../testlib/wakucore,
../testlib/wakunode
@@ -43,7 +42,8 @@ suite "Waku v2 REST API - health":
let node = testWakuNode()
let healthMonitor = WakuNodeHealthMonitor()
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
healthMonitor.setOverallHealth(HealthStatus.INITIALIZING)
@@ -69,10 +69,10 @@ suite "Waku v2 REST API - health":
# now kick in rln (currently the only check for health)
await node.mountRlnRelay(
WakuRlnConfig(
- rlnRelayDynamic: false,
- rlnRelayCredIndex: some(1.uint),
- rlnEpochSizeSec: 1,
- rlnRelayTreePath: genTempPath("rln_tree", "wakunode"),
+ dynamic: false,
+ credIndex: some(1.uint),
+ epochSizeSec: 1,
+ treePath: genTempPath("rln_tree", "wakunode"),
)
)
healthMonitor.setNode(node)
diff --git a/tests/wakunode_rest/test_rest_lightpush.nim b/tests/wakunode_rest/test_rest_lightpush.nim
new file mode 100644
index 000000000..72e309a13
--- /dev/null
+++ b/tests/wakunode_rest/test_rest_lightpush.nim
@@ -0,0 +1,291 @@
+{.used.}
+
+import
+ std/sequtils,
+ stew/byteutils,
+ stew/shims/net,
+ testutils/unittests,
+ presto,
+ presto/client as presto_client,
+ libp2p/crypto/crypto
+
+import
+ waku/[
+ waku_api/message_cache,
+ waku_core,
+ waku_node,
+ node/peer_manager,
+ waku_lightpush/common,
+ waku_api/rest/server,
+ waku_api/rest/client,
+ waku_api/rest/responses,
+ waku_api/rest/lightpush/types,
+ waku_api/rest/lightpush/handlers as lightpush_api,
+ waku_api/rest/lightpush/client as lightpush_api_client,
+ waku_relay,
+ common/rate_limit/setting,
+ ],
+ ../testlib/wakucore,
+ ../testlib/wakunode
+
+proc testWakuNode(): WakuNode =
+ let
+ privkey = generateSecp256k1Key()
+ bindIp = parseIpAddress("0.0.0.0")
+ extIp = parseIpAddress("127.0.0.1")
+ port = Port(0)
+
+ return newTestWakuNode(privkey, bindIp, port, some(extIp), some(port))
+
+type RestLightPushTest = object
+ serviceNode: WakuNode
+ pushNode: WakuNode
+ consumerNode: WakuNode
+ restServer: WakuRestServerRef
+ restClient: RestClientRef
+
+proc init(
+ T: type RestLightPushTest, rateLimit: RateLimitSetting = (0, 0.millis)
+): Future[T] {.async.} =
+ var testSetup = RestLightPushTest()
+ testSetup.serviceNode = testWakuNode()
+ testSetup.pushNode = testWakuNode()
+ testSetup.consumerNode = testWakuNode()
+
+ await allFutures(
+ testSetup.serviceNode.start(),
+ testSetup.pushNode.start(),
+ testSetup.consumerNode.start(),
+ )
+
+ (await testSetup.consumerNode.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay: " & $error
+ (await testSetup.serviceNode.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay: " & $error
+ await testSetup.serviceNode.mountLightPush(rateLimit)
+ testSetup.pushNode.mountLightPushClient()
+
+ testSetup.serviceNode.peerManager.addServicePeer(
+ testSetup.consumerNode.peerInfo.toRemotePeerInfo(), WakuRelayCodec
+ )
+
+ await testSetup.serviceNode.connectToNodes(
+ @[testSetup.consumerNode.peerInfo.toRemotePeerInfo()]
+ )
+
+ testSetup.pushNode.peerManager.addServicePeer(
+ testSetup.serviceNode.peerInfo.toRemotePeerInfo(), WakuLightPushCodec
+ )
+
+ var restPort = Port(0)
+ let restAddress = parseIpAddress("127.0.0.1")
+ testSetup.restServer = WakuRestServerRef.init(restAddress, restPort).tryGet()
+ restPort = testSetup.restServer.httpServer.address.port
+ # update with bound port for restClient use
+
+ installLightPushRequestHandler(testSetup.restServer.router, testSetup.pushNode)
+
+ testSetup.restServer.start()
+
+ testSetup.restClient = newRestHttpClient(initTAddress(restAddress, restPort))
+
+ return testSetup
+
+proc shutdown(self: RestLightPushTest) {.async.} =
+ await self.restServer.stop()
+ await self.restServer.closeWait()
+ await allFutures(
+ self.serviceNode.stop(), self.pushNode.stop(), self.consumerNode.stop()
+ )
+
+suite "Waku v2 Rest API - lightpush":
+ asyncTest "Push message with proof":
+ let restLightPushTest = await RestLightPushTest.init()
+
+ let message: RelayWakuMessage = fakeWakuMessage(
+ contentTopic = DefaultContentTopic,
+ payload = toBytes("TEST-1"),
+ proof = toBytes("proof-test"),
+ )
+ .toRelayWakuMessage()
+
+ check message.proof.isSome()
+
+ let requestBody =
+ PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: message)
+
+ let response =
+ await restLightPushTest.restClient.sendPushRequest(body = requestBody)
+
+ ## Validate that the push request failed because the node is not
+ ## connected to other node but, doesn't fail because of not properly
+ ## handling the proof message attribute within the REST request.
+ check:
+ response.status == 505
+ response.data.statusDesc == some("No peers for topic, skipping publish")
+ response.data.relayPeerCount == none[uint32]()
+
+ asyncTest "Push message request":
+ # Given
+ let restLightPushTest = await RestLightPushTest.init()
+
+ restLightPushTest.consumerNode.subscribe(
+ (kind: PubsubSub, topic: DefaultPubsubTopic)
+ ).isOkOr:
+ assert false, "Failed to subscribe to relay: " & $error
+
+ restLightPushTest.serviceNode.subscribe(
+ (kind: PubsubSub, topic: DefaultPubsubTopic)
+ ).isOkOr:
+ assert false, "Failed to subscribe to relay: " & $error
+ require:
+ toSeq(restLightPushTest.serviceNode.wakuRelay.subscribedTopics).len == 1
+
+ # When
+ let message: RelayWakuMessage = fakeWakuMessage(
+ contentTopic = DefaultContentTopic, payload = toBytes("TEST-1")
+ )
+ .toRelayWakuMessage()
+
+ let requestBody =
+ PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: message)
+ let response = await restLightPushTest.restClient.sendPushRequest(requestBody)
+
+ echo "response", $response
+
+ # Then
+ check:
+ response.status == 200
+ response.data.relayPeerCount == some(1.uint32)
+
+ await restLightPushTest.shutdown()
+
+ asyncTest "Push message bad-request":
+ # Given
+ let restLightPushTest = await RestLightPushTest.init()
+
+ restLightPushTest.serviceNode.subscribe(
+ (kind: PubsubSub, topic: DefaultPubsubTopic)
+ ).isOkOr:
+ assert false, "Failed to subscribe to relay: " & $error
+ require:
+ toSeq(restLightPushTest.serviceNode.wakuRelay.subscribedTopics).len == 1
+
+ # When
+ let badMessage1: RelayWakuMessage = fakeWakuMessage(
+ contentTopic = DefaultContentTopic, payload = toBytes("")
+ )
+ .toRelayWakuMessage()
+ let badRequestBody1 =
+ PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: badMessage1)
+
+ let badMessage2: RelayWakuMessage =
+ fakeWakuMessage(contentTopic = "", payload = toBytes("Sthg")).toRelayWakuMessage()
+ let badRequestBody2 =
+ PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: badMessage2)
+
+ let badRequestBody3 =
+ PushRequest(pubsubTopic: none(PubsubTopic), message: badMessage2)
+
+ # var response: RestResponse[PushResponse]
+
+ var response = await restLightPushTest.restClient.sendPushRequest(badRequestBody1)
+
+ # Then
+ check:
+ response.status == 400
+ response.data.statusDesc.isSome()
+ response.data.statusDesc.get().startsWith("Invalid push request")
+
+ # when
+ response = await restLightPushTest.restClient.sendPushRequest(badRequestBody2)
+
+ # Then
+ check:
+ response.status == 400
+ response.data.statusDesc.isSome()
+ response.data.statusDesc.get().startsWith("Invalid push request")
+
+ # when
+ response = await restLightPushTest.restClient.sendPushRequest(badRequestBody3)
+
+ # Then
+ check:
+ response.data.statusDesc.isSome()
+ response.data.statusDesc.get().startsWith("Invalid push request")
+
+ await restLightPushTest.shutdown()
+
+ asyncTest "Request rate limit push message":
+ # Given
+ let budgetCap = 3
+ let tokenPeriod = 500.millis
+ let restLightPushTest = await RestLightPushTest.init((budgetCap, tokenPeriod))
+
+ restLightPushTest.consumerNode.subscribe(
+ (kind: PubsubSub, topic: DefaultPubsubTopic)
+ ).isOkOr:
+ assert false, "Failed to subscribe to relay: " & $error
+
+ restLightPushTest.serviceNode.subscribe(
+ (kind: PubsubSub, topic: DefaultPubsubTopic)
+ ).isOkOr:
+ assert false, "Failed to subscribe to relay: " & $error
+ require:
+ toSeq(restLightPushTest.serviceNode.wakuRelay.subscribedTopics).len == 1
+
+ # When
+ let pushProc = proc() {.async.} =
+ let message: RelayWakuMessage = fakeWakuMessage(
+ contentTopic = DefaultContentTopic, payload = toBytes("TEST-1")
+ )
+ .toRelayWakuMessage()
+
+ let requestBody =
+ PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: message)
+ let response = await restLightPushTest.restClient.sendPushRequest(requestBody)
+
+ echo "response", $response
+
+ # Then
+ check:
+ response.status == 200
+ response.data.relayPeerCount == some(1.uint32)
+
+ let pushRejectedProc = proc() {.async.} =
+ let message: RelayWakuMessage = fakeWakuMessage(
+ contentTopic = DefaultContentTopic, payload = toBytes("TEST-1")
+ )
+ .toRelayWakuMessage()
+
+ let requestBody =
+ PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: message)
+ let response = await restLightPushTest.restClient.sendPushRequest(requestBody)
+
+ echo "response", $response
+
+ # Then
+ check:
+ response.status == 429
+ response.data.statusDesc.isSome() # Ensure error status description is present
+ response.data.statusDesc.get().startsWith(
+ "Request rejected due to too many requests"
+ ) # Check specific error message
+
+ await pushProc()
+ await pushProc()
+ await pushProc()
+ await pushRejectedProc()
+
+ await sleepAsync(tokenPeriod)
+
+ for runCnt in 0 ..< 3:
+ let startTime = Moment.now()
+ for sendCnt in 0 ..< budgetCap:
+ await pushProc()
+
+ let endTime = Moment.now()
+ let elapsed: Duration = (endTime - startTime)
+ await sleepAsync(tokenPeriod - elapsed + 10.millis)
+
+ await restLightPushTest.shutdown()
diff --git a/tests/wakunode_rest/test_rest_lightpush_legacy.nim b/tests/wakunode_rest/test_rest_lightpush_legacy.nim
index 3490a5f80..e1d6dca30 100644
--- a/tests/wakunode_rest/test_rest_lightpush_legacy.nim
+++ b/tests/wakunode_rest/test_rest_lightpush_legacy.nim
@@ -26,8 +26,7 @@ import
common/rate_limit/setting,
],
../testlib/wakucore,
- ../testlib/wakunode,
- ../testlib/testutils
+ ../testlib/wakunode
proc testWakuNode(): WakuNode =
let
@@ -59,8 +58,10 @@ proc init(
testSetup.consumerNode.start(),
)
- await testSetup.consumerNode.mountRelay()
- await testSetup.serviceNode.mountRelay()
+ (await testSetup.consumerNode.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
+ (await testSetup.serviceNode.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
await testSetup.serviceNode.mountLegacyLightPush(rateLimit)
testSetup.pushNode.mountLegacyLightPushClient()
@@ -125,10 +126,13 @@ suite "Waku v2 Rest API - lightpush":
restLightPushTest.consumerNode.subscribe(
(kind: PubsubSub, topic: DefaultPubsubTopic)
- )
+ ).isOkOr:
+ assert false, "Failed to subscribe to topic"
+
restLightPushTest.serviceNode.subscribe(
(kind: PubsubSub, topic: DefaultPubsubTopic)
- )
+ ).isOkOr:
+ assert false, "Failed to subscribe to topic"
require:
toSeq(restLightPushTest.serviceNode.wakuRelay.subscribedTopics).len == 1
@@ -157,7 +161,8 @@ suite "Waku v2 Rest API - lightpush":
restLightPushTest.serviceNode.subscribe(
(kind: PubsubSub, topic: DefaultPubsubTopic)
- )
+ ).isOkOr:
+ assert false, "Failed to subscribe to topic"
require:
toSeq(restLightPushTest.serviceNode.wakuRelay.subscribedTopics).len == 1
@@ -217,10 +222,13 @@ suite "Waku v2 Rest API - lightpush":
restLightPushTest.consumerNode.subscribe(
(kind: PubsubSub, topic: DefaultPubsubTopic)
- )
+ ).isOkOr:
+ assert false, "Failed to subscribe to topic"
+
restLightPushTest.serviceNode.subscribe(
(kind: PubsubSub, topic: DefaultPubsubTopic)
- )
+ ).isOkOr:
+ assert false, "Failed to subscribe to topic"
require:
toSeq(restLightPushTest.serviceNode.wakuRelay.subscribedTopics).len == 1
@@ -275,28 +283,3 @@ suite "Waku v2 Rest API - lightpush":
await sleepAsync(tokenPeriod - elapsed + 10.millis)
await restLightPushTest.shutdown()
-
- ## TODO: Re-work this test when lightpush protocol change is done: https://github.com/waku-org/pm/issues/93
- ## This test is similar when no available peer exists for publish. Currently it is returning success,
- ## that makes this test not useful.
- # asyncTest "Push message request service not available":
- # # Given
- # let restLightPushTest = await RestLightPushTest.init()
-
- # # When
- # let message : RelayWakuMessage = fakeWakuMessage(contentTopic = DefaultContentTopic,
- # payload = toBytes("TEST-1")).toRelayWakuMessage()
-
- # let requestBody = PushRequest(pubsubTopic: some("NoExistTopic"),
- # message: message)
- # let response = await restLightPushTest.client.sendPushRequest(requestBody)
-
- # echo "response", $response
-
- # # Then
- # check:
- # response.status == 503
- # $response.contentType == $MIMETYPE_TEXT
- # response.data == "Failed to request a message push: Can not publish to any peers"
-
- # await restLightPushTest.shutdown()
diff --git a/tests/wakunode_rest/test_rest_relay.nim b/tests/wakunode_rest/test_rest_relay.nim
index 9732d114b..719e66b8a 100644
--- a/tests/wakunode_rest/test_rest_relay.nim
+++ b/tests/wakunode_rest/test_rest_relay.nim
@@ -41,7 +41,8 @@ suite "Waku v2 Rest API - Relay":
# Given
let node = testWakuNode()
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
var restPort = Port(0)
let restAddress = parseIpAddress("0.0.0.0")
@@ -95,7 +96,8 @@ suite "Waku v2 Rest API - Relay":
shard3 = RelayShard(clusterId: DefaultClusterId, shardId: 3)
shard4 = RelayShard(clusterId: DefaultClusterId, shardId: 4)
- await node.mountRelay(@[shard0, shard1, shard2, shard3])
+ (await node.mountRelay(@[shard0, shard1, shard2, shard3, shard4])).isOkOr:
+ assert false, "Failed to mount relay"
var restPort = Port(0)
let restAddress = parseIpAddress("0.0.0.0")
@@ -144,7 +146,8 @@ suite "Waku v2 Rest API - Relay":
# Given
let node = testWakuNode()
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
var restPort = Port(0)
let restAddress = parseIpAddress("0.0.0.0")
@@ -220,13 +223,14 @@ suite "Waku v2 Rest API - Relay":
# Given
let node = testWakuNode()
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
let wakuRlnConfig = WakuRlnConfig(
- rlnRelayDynamic: false,
- rlnRelayCredIndex: some(1.uint),
- rlnRelayUserMessageLimit: 20,
- rlnEpochSizeSec: 1,
- rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
+ dynamic: false,
+ credIndex: some(1.uint),
+ userMessageLimit: 20,
+ epochSizeSec: 1,
+ treePath: genTempPath("rln_tree", "wakunode_1"),
)
await node.mountRlnRelay(wakuRlnConfig)
@@ -245,7 +249,8 @@ suite "Waku v2 Rest API - Relay":
let client = newRestHttpClient(initTAddress(restAddress, restPort))
- node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic))
+ node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr:
+ assert false, "Failed to subscribe to pubsub topic"
require:
toSeq(node.wakuRelay.subscribedTopics).len == 1
@@ -275,7 +280,8 @@ suite "Waku v2 Rest API - Relay":
# Given
let node = testWakuNode()
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
require node.mountSharding(1, 8).isOk
var restPort = Port(0)
@@ -324,11 +330,13 @@ suite "Waku v2 Rest API - Relay":
# Given
let node = testWakuNode()
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
var restPort = Port(0)
let restAddress = parseIpAddress("0.0.0.0")
let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet()
+ restServer.start()
restPort = restServer.httpServer.address.port # update with bound port for client use
@@ -347,11 +355,18 @@ suite "Waku v2 Rest API - Relay":
cache.contentSubscribe("/waku/2/default-contentY/proto")
installRelayApiHandlers(restServer.router, node, cache)
- restServer.start()
# When
let client = newRestHttpClient(initTAddress(restAddress, restPort))
- let response = await client.relayDeleteAutoSubscriptionsV1(contentTopics)
+
+ var response = await client.relayPostAutoSubscriptionsV1(contentTopics)
+
+ check:
+ response.status == 200
+ $response.contentType == $MIMETYPE_TEXT
+ response.data == "OK"
+
+ response = await client.relayDeleteAutoSubscriptionsV1(contentTopics)
# Then
check:
@@ -373,7 +388,8 @@ suite "Waku v2 Rest API - Relay":
# Given
let node = testWakuNode()
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
var restPort = Port(0)
let restAddress = parseIpAddress("0.0.0.0")
@@ -437,13 +453,14 @@ suite "Waku v2 Rest API - Relay":
# Given
let node = testWakuNode()
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
let wakuRlnConfig = WakuRlnConfig(
- rlnRelayDynamic: false,
- rlnRelayCredIndex: some(1.uint),
- rlnRelayUserMessageLimit: 20,
- rlnEpochSizeSec: 1,
- rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
+ dynamic: false,
+ credIndex: some(1.uint),
+ userMessageLimit: 20,
+ epochSizeSec: 1,
+ treePath: genTempPath("rln_tree", "wakunode_1"),
)
await node.mountRlnRelay(wakuRlnConfig)
@@ -461,7 +478,8 @@ suite "Waku v2 Rest API - Relay":
let client = newRestHttpClient(initTAddress(restAddress, restPort))
- node.subscribe((kind: ContentSub, topic: DefaultContentTopic))
+ node.subscribe((kind: ContentSub, topic: DefaultContentTopic)).isOkOr:
+ assert false, "Failed to subscribe to content topic: " & $error
require:
toSeq(node.wakuRelay.subscribedTopics).len == 1
@@ -489,13 +507,14 @@ suite "Waku v2 Rest API - Relay":
# Given
let node = testWakuNode()
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
let wakuRlnConfig = WakuRlnConfig(
- rlnRelayDynamic: false,
- rlnRelayCredIndex: some(1.uint),
- rlnRelayUserMessageLimit: 20,
- rlnEpochSizeSec: 1,
- rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
+ dynamic: false,
+ credIndex: some(1.uint),
+ userMessageLimit: 20,
+ epochSizeSec: 1,
+ treePath: genTempPath("rln_tree", "wakunode_1"),
)
await node.mountRlnRelay(wakuRlnConfig)
@@ -539,13 +558,14 @@ suite "Waku v2 Rest API - Relay":
# Given
let node = testWakuNode()
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
let wakuRlnConfig = WakuRlnConfig(
- rlnRelayDynamic: false,
- rlnRelayCredIndex: some(1.uint),
- rlnRelayUserMessageLimit: 20,
- rlnEpochSizeSec: 1,
- rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
+ dynamic: false,
+ credIndex: some(1.uint),
+ userMessageLimit: 20,
+ epochSizeSec: 1,
+ treePath: genTempPath("rln_tree", "wakunode_1"),
)
await node.mountRlnRelay(wakuRlnConfig)
@@ -564,7 +584,8 @@ suite "Waku v2 Rest API - Relay":
let client = newRestHttpClient(initTAddress(restAddress, restPort))
- node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic))
+ node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr:
+ assert false, "Failed to subscribe to pubsub topic: " & $error
require:
toSeq(node.wakuRelay.subscribedTopics).len == 1
@@ -594,13 +615,14 @@ suite "Waku v2 Rest API - Relay":
# Given
let node = testWakuNode()
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ assert false, "Failed to mount relay"
let wakuRlnConfig = WakuRlnConfig(
- rlnRelayDynamic: false,
- rlnRelayCredIndex: some(1.uint),
- rlnRelayUserMessageLimit: 20,
- rlnEpochSizeSec: 1,
- rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
+ dynamic: false,
+ credIndex: some(1.uint),
+ userMessageLimit: 20,
+ epochSizeSec: 1,
+ treePath: genTempPath("rln_tree", "wakunode_1"),
)
await node.mountRlnRelay(wakuRlnConfig)
@@ -619,7 +641,8 @@ suite "Waku v2 Rest API - Relay":
let client = newRestHttpClient(initTAddress(restAddress, restPort))
- node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic))
+ node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr:
+ assert false, "Failed to subscribe to pubsub topic: " & $error
require:
toSeq(node.wakuRelay.subscribedTopics).len == 1
diff --git a/tests/wakunode_rest/test_rest_relay_serdes.nim b/tests/wakunode_rest/test_rest_relay_serdes.nim
index 8cc5835f0..086aba22b 100644
--- a/tests/wakunode_rest/test_rest_relay_serdes.nim
+++ b/tests/wakunode_rest/test_rest_relay_serdes.nim
@@ -1,6 +1,6 @@
{.used.}
-import stew/[results, byteutils], chronicles, unittest2, json_serialization
+import results, stew/byteutils, unittest2, json_serialization
import waku/[common/base64, waku_api/rest/serdes, waku_api/rest/relay/types, waku_core]
suite "Waku v2 Rest API - Relay - serialization":
diff --git a/tests/wakunode_rest/test_rest_serdes.nim b/tests/wakunode_rest/test_rest_serdes.nim
index c50bba983..719742bf8 100644
--- a/tests/wakunode_rest/test_rest_serdes.nim
+++ b/tests/wakunode_rest/test_rest_serdes.nim
@@ -1,6 +1,6 @@
{.used.}
-import stew/[results, byteutils], chronicles, unittest2, json_serialization
+import results, stew/byteutils, chronicles, unittest2, json_serialization
import waku/waku_api/rest/serdes, waku/waku_api/rest/debug/types
# TODO: Decouple this test suite from the `debug_api` module by defining
diff --git a/tests/wakunode_rest/test_rest_store.nim b/tests/wakunode_rest/test_rest_store.nim
index c31e3939c..d0631bfbf 100644
--- a/tests/wakunode_rest/test_rest_store.nim
+++ b/tests/wakunode_rest/test_rest_store.nim
@@ -86,7 +86,8 @@ procSuite "Waku Rest API - Store v3":
asyncTest "invalid cursor":
let node = testWakuNode()
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ error "failed to mount relay", error = error
var restPort = Port(0)
let restAddress = parseIpAddress("0.0.0.0")
@@ -165,7 +166,8 @@ procSuite "Waku Rest API - Store v3":
asyncTest "Filter by start and end time":
let node = testWakuNode()
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ error "failed to mount relay", error = error
var restPort = Port(0)
let restAddress = parseIpAddress("0.0.0.0")
@@ -330,7 +332,8 @@ procSuite "Waku Rest API - Store v3":
# Given
let node = testWakuNode()
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ error "failed to mount relay", error = error
var restPort = Port(0)
let restAddress = parseIpAddress("0.0.0.0")
@@ -403,7 +406,8 @@ procSuite "Waku Rest API - Store v3":
# Given
let node = testWakuNode()
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ error "failed to mount relay", error = error
var restPort = Port(0)
let restAddress = parseIpAddress("0.0.0.0")
@@ -492,7 +496,8 @@ procSuite "Waku Rest API - Store v3":
# Given
let node = testWakuNode()
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ error "failed to mount relay", error = error
var restPort = Port(0)
let restAddress = parseIpAddress("0.0.0.0")
@@ -548,7 +553,8 @@ procSuite "Waku Rest API - Store v3":
# Given
let node = testWakuNode()
await node.start()
- await node.mountRelay()
+ (await node.mountRelay()).isOkOr:
+ error "failed to mount relay", error = error
var restPort = Port(0)
let restAddress = parseIpAddress("0.0.0.0")
diff --git a/tools/rln_db_inspector/rln_db_inspector.nim b/tools/rln_db_inspector/rln_db_inspector.nim
index 198f4469d..e1d093e86 100644
--- a/tools/rln_db_inspector/rln_db_inspector.nim
+++ b/tools/rln_db_inspector/rln_db_inspector.nim
@@ -5,13 +5,15 @@ else:
import chronicles, sequtils, results
-import
- waku/[waku_rln_relay/rln, waku_rln_relay/conversion_utils, factory/external_config]
+import waku/[waku_rln_relay/rln, waku_rln_relay/conversion_utils]
logScope:
topics = "rln_db_inspector"
-proc doInspectRlnDb*(conf: WakuNodeConf) =
+type InspectRlnDbConf* = object
+ treePath*: string
+
+proc doInspectRlnDb*(conf: InspectRlnDbConf) =
# 1. load configuration
trace "configuration", conf = $conf
diff --git a/tools/rln_keystore_generator/rln_keystore_generator.nim b/tools/rln_keystore_generator/rln_keystore_generator.nim
index 1bde9ae01..cd501e52d 100644
--- a/tools/rln_keystore_generator/rln_keystore_generator.nim
+++ b/tools/rln_keystore_generator/rln_keystore_generator.nim
@@ -3,7 +3,7 @@ when (NimMajor, NimMinor) < (1, 4):
else:
{.push raises: [].}
-import chronicles, results, std/tempfiles
+import chronicles, results, std/[tempfiles, sequtils]
import
waku/[
@@ -11,13 +11,22 @@ import
waku_rln_relay/rln,
waku_rln_relay/conversion_utils,
waku_rln_relay/group_manager/on_chain,
- factory/external_config,
]
logScope:
topics = "rln_keystore_generator"
-proc doRlnKeystoreGenerator*(conf: WakuNodeConf) =
+type RlnKeystoreGeneratorConf* = object
+ execute*: bool
+ ethContractAddress*: string
+ ethClientUrls*: seq[string]
+ chainId*: uint
+ credPath*: string
+ credPassword*: string
+ userMessageLimit*: uint64
+ ethPrivateKey*: string
+
+proc doRlnKeystoreGenerator*(conf: RlnKeystoreGeneratorConf) =
# 1. load configuration
trace "configuration", conf = $conf
@@ -56,13 +65,13 @@ proc doRlnKeystoreGenerator*(conf: WakuNodeConf) =
# 4. initialize OnchainGroupManager
let groupManager = OnchainGroupManager(
- ethClientUrl: string(conf.rlnRelayethClientAddress),
- chainId: conf.rlnRelayChainId,
- ethContractAddress: conf.rlnRelayEthContractAddress,
+ ethClientUrls: conf.ethClientUrls,
+ chainId: conf.chainId,
+ ethContractAddress: conf.ethContractAddress,
rlnInstance: rlnInstance,
keystorePath: none(string),
keystorePassword: none(string),
- ethPrivateKey: some(conf.rlnRelayEthPrivateKey),
+ ethPrivateKey: some(conf.ethPrivateKey),
onFatalErrorAction: onFatalErrorAction,
)
try:
@@ -77,7 +86,7 @@ proc doRlnKeystoreGenerator*(conf: WakuNodeConf) =
# 5. register on-chain
try:
- waitFor groupManager.register(credential, conf.rlnRelayUserMessageLimit)
+ waitFor groupManager.register(credential, conf.userMessageLimit)
except Exception, CatchableError:
error "failure while registering credentials on-chain",
error = getCurrentExceptionMsg()
@@ -87,28 +96,27 @@ proc doRlnKeystoreGenerator*(conf: WakuNodeConf) =
info "Your membership has been registered on-chain.",
chainId = $groupManager.chainId,
- contractAddress = conf.rlnRelayEthContractAddress,
+ contractAddress = conf.ethContractAddress,
membershipIndex = groupManager.membershipIndex.get()
- info "Your user message limit is", userMessageLimit = conf.rlnRelayUserMessageLimit
+ info "Your user message limit is", userMessageLimit = conf.userMessageLimit
# 6. write to keystore
let keystoreCred = KeystoreMembership(
membershipContract: MembershipContract(
- chainId: $groupManager.chainId, address: conf.rlnRelayEthContractAddress
+ chainId: $groupManager.chainId, address: conf.ethContractAddress
),
treeIndex: groupManager.membershipIndex.get(),
identityCredential: credential,
- userMessageLimit: conf.rlnRelayUserMessageLimit,
+ userMessageLimit: conf.userMessageLimit,
)
- let persistRes = addMembershipCredentials(
- conf.rlnRelayCredPath, keystoreCred, conf.rlnRelayCredPassword, RLNAppInfo
- )
+ let persistRes =
+ addMembershipCredentials(conf.credPath, keystoreCred, conf.credPassword, RLNAppInfo)
if persistRes.isErr():
error "failed to persist credentials", error = persistRes.error
quit(1)
- info "credentials persisted", path = conf.rlnRelayCredPath
+ info "credentials persisted", path = conf.credPath
try:
waitFor groupManager.stop()
diff --git a/vendor/zerokit b/vendor/zerokit
index b9d27039c..ba467d370 160000
--- a/vendor/zerokit
+++ b/vendor/zerokit
@@ -1 +1 @@
-Subproject commit b9d27039c3266af108882d7a8bafc37400d29855
+Subproject commit ba467d370c56b7432522227de22fbd664d44ef3e
diff --git a/waku.nimble b/waku.nimble
index 9c0e819fb..9cf73295f 100644
--- a/waku.nimble
+++ b/waku.nimble
@@ -23,6 +23,7 @@ requires "nim >= 2.0.8",
"web3",
"presto",
"regex",
+ "results",
"db_connector",
"minilru",
"quic"
diff --git a/waku/common/databases/db_postgres/dbconn.nim b/waku/common/databases/db_postgres/dbconn.nim
index 0edb74ede..317cc3003 100644
--- a/waku/common/databases/db_postgres/dbconn.nim
+++ b/waku/common/databases/db_postgres/dbconn.nim
@@ -1,6 +1,5 @@
import
- std/[times, strutils, asyncnet, os, sequtils, sets, strformat],
- regex,
+ std/[times, strutils, os, sets, strformat],
results,
chronos,
chronos/threadsync,
diff --git a/waku/common/rate_limit/request_limiter.nim b/waku/common/rate_limit/request_limiter.nim
index 7f33d0348..0ede20be4 100644
--- a/waku/common/rate_limit/request_limiter.nim
+++ b/waku/common/rate_limit/request_limiter.nim
@@ -78,14 +78,14 @@ template checkUsageLimit*(
bodyWithinLimit, bodyRejected: untyped,
) =
if t.checkUsage(proto, conn):
- let requestStartTime = getTime().toUnixFloat()
+ let requestStartTime = Moment.now()
waku_service_requests.inc(labelValues = [proto, "served"])
bodyWithinLimit
- let requestDurationSec = getTime().toUnixFloat() - requestStartTime
+ let requestDuration = Moment.now() - requestStartTime
waku_service_request_handling_duration_seconds.observe(
- requestDurationSec, labelValues = [proto]
+ requestDuration.milliseconds.float / 1000, labelValues = [proto]
)
else:
waku_service_requests.inc(labelValues = [proto, "rejected"])
diff --git a/waku/common/rate_limit/service_metrics.nim b/waku/common/rate_limit/service_metrics.nim
index 7d24d9530..bff91f622 100644
--- a/waku/common/rate_limit/service_metrics.nim
+++ b/waku/common/rate_limit/service_metrics.nim
@@ -1,8 +1,11 @@
{.push raises: [].}
import std/options
+import chronos/timer
import metrics, setting
+export metrics
+
declarePublicGauge waku_service_requests_limit,
"Applied rate limit of non-relay service", ["service"]
@@ -19,4 +22,9 @@ proc setServiceLimitMetric*(service: string, limit: Option[RateLimitSetting]) =
)
declarePublicHistogram waku_service_request_handling_duration_seconds,
- "duration of non-relay service handling", ["service"]
+ "duration of non-relay service handling",
+ labels = ["service"],
+ buckets = [
+ 0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1.0, 2.5, 5.0, 7.5, 10.0,
+ 15.0, 20.0, 30.0, Inf,
+ ]
diff --git a/waku/common/rate_limit/single_token_limiter.nim b/waku/common/rate_limit/single_token_limiter.nim
index da01f61bb..50fb2d64c 100644
--- a/waku/common/rate_limit/single_token_limiter.nim
+++ b/waku/common/rate_limit/single_token_limiter.nim
@@ -45,14 +45,14 @@ template checkUsageLimit*(
bodyWithinLimit, bodyRejected: untyped,
) =
if t.checkUsage(proto):
- let requestStartTime = getTime().toUnixFloat()
+ let requestStartTime = Moment.now()
waku_service_requests.inc(labelValues = [proto, "served"])
bodyWithinLimit
- let requestDurationSec = getTime().toUnixFloat() - requestStartTime
+ let requestDuration = Moment.now() - requestStartTime
waku_service_request_handling_duration_seconds.observe(
- requestDurationSec, labelValues = [proto]
+ requestDuration.milliseconds.float / 1000, labelValues = [proto]
)
else:
waku_service_requests.inc(labelValues = [proto, "rejected"])
diff --git a/waku/common/utils/nat.nim b/waku/common/utils/nat.nim
index 698ba68be..125a48935 100644
--- a/waku/common/utils/nat.nim
+++ b/waku/common/utils/nat.nim
@@ -8,13 +8,14 @@ logScope:
## Due to the design of nim-eth/nat module we must ensure it is only initialized once.
## see: https://github.com/waku-org/nwaku/issues/2628
-## Details: nim-eth/nat module starts a meaintenance thread for refreshing the NAT mappings, but everything in the module is global,
+## Details: nim-eth/nat module starts a maintenance thread for refreshing the NAT mappings, but everything in the module is global,
## there is no room to store multiple configurations.
## Exact meaning: redirectPorts cannot be called twice in a program lifetime.
## During waku tests we happen to start several node instances in parallel thus resulting in multiple NAT configurations and multiple threads.
## Those threads will dead lock each other in tear down.
var singletonNat: bool = false
+# TODO: pass `NatStrategy`, not a string
proc setupNat*(
natConf, clientId: string, tcpPort, udpPort: Port
): Result[
diff --git a/waku/discovery/waku_discv5.nim b/waku/discovery/waku_discv5.nim
index 91649280a..221acef42 100644
--- a/waku/discovery/waku_discv5.nim
+++ b/waku/discovery/waku_discv5.nim
@@ -10,11 +10,7 @@ import
eth/keys as eth_keys,
eth/p2p/discoveryv5/node,
eth/p2p/discoveryv5/protocol
-import
- ../node/peer_manager/peer_manager,
- ../waku_core,
- ../waku_enr,
- ../factory/external_config
+import ../node/peer_manager/peer_manager, ../waku_core, ../waku_enr
export protocol, waku_enr
@@ -26,6 +22,18 @@ logScope:
## Config
+# TODO: merge both conf
+type Discv5Conf* {.requiresInit.} = object
+ # TODO: This should probably be an option on the builder
+ # But translated to everything else "false" on the config
+ discv5Only*: bool
+ bootstrapNodes*: seq[string]
+ udpPort*: Port
+ tableIpLimit*: uint
+ bucketIpLimit*: uint
+ bitsPerHop*: int
+ enrAutoUpdate*: bool
+
type WakuDiscoveryV5Config* = object
discv5Config*: Option[DiscoveryConfig]
address*: IpAddress
@@ -126,13 +134,13 @@ proc updateENRShards(
): Result[void, string] =
## Add or remove shards from the Discv5 ENR
let newShardOp = topicsToRelayShards(newTopics).valueOr:
- return err("ENR update failed: " & error)
+ return err("ENR update failed topicsToRelayShards: " & error)
let newShard = newShardOp.valueOr:
return ok()
let typedRecord = wd.protocol.localNode.record.toTyped().valueOr:
- return err("ENR update failed: " & $error)
+ return err("ENR update failed toTyped: " & $error)
let currentShardsOp = typedRecord.relaySharding()
@@ -141,17 +149,17 @@ proc updateENRShards(
let currentShard = currentShardsOp.get()
if currentShard.clusterId != newShard.clusterId:
- return err("ENR update failed: clusterId id mismatch")
+ return err("ENR update failed: clusterId id mismatch in add")
RelayShards.init(
currentShard.clusterId, currentShard.shardIds & newShard.shardIds
).valueOr:
- return err("ENR update failed: " & error)
+ return err("ENR update failed RelayShards.init in add: " & error)
elif not add and currentShardsOp.isSome():
let currentShard = currentShardsOp.get()
if currentShard.clusterId != newShard.clusterId:
- return err("ENR update failed: clusterId id mismatch")
+ return err("ENR update failed: clusterId id mismatch in not add")
let currentSet = toHashSet(currentShard.shardIds)
let newSet = toHashSet(newShard.shardIds)
@@ -162,7 +170,7 @@ proc updateENRShards(
return err("ENR update failed: cannot remove all shards")
RelayShards.init(currentShard.clusterId, indices).valueOr:
- return err("ENR update failed: " & error)
+ return err("ENR update failed RelayShards.init in not add: " & error)
elif add and currentShardsOp.isNone():
newShard
else:
@@ -173,12 +181,12 @@ proc updateENRShards(
(ShardingBitVectorEnrField, resultShard.toBitVector())
else:
let list = resultShard.toIndicesList().valueOr:
- return err("ENR update failed: " & $error)
+ return err("ENR update failed toIndicesList: " & $error)
(ShardingIndicesListEnrField, list)
wd.protocol.updateRecord([(field, value)]).isOkOr:
- return err("ENR update failed: " & $error)
+ return err("ENR update failed updateRecord: " & $error)
return ok()
@@ -383,10 +391,12 @@ proc setupDiscoveryV5*(
myENR: enr.Record,
nodePeerManager: PeerManager,
nodeTopicSubscriptionQueue: AsyncEventQueue[SubscriptionEvent],
- conf: WakuNodeConf,
+ conf: Discv5Conf,
dynamicBootstrapNodes: seq[RemotePeerInfo],
rng: ref HmacDrbgContext,
key: crypto.PrivateKey,
+ p2pListenAddress: IpAddress,
+ portsShift: uint16,
): WakuDiscoveryV5 =
let dynamicBootstrapEnrs =
dynamicBootstrapNodes.filterIt(it.hasUdpPort()).mapIt(it.enr.get())
@@ -394,7 +404,7 @@ proc setupDiscoveryV5*(
var discv5BootstrapEnrs: seq[enr.Record]
# parse enrURIs from the configuration and add the resulting ENRs to the discv5BootstrapEnrs seq
- for enrUri in conf.discv5BootstrapNodes:
+ for enrUri in conf.bootstrapNodes:
addBootstrapNode(enrUri, discv5BootstrapEnrs)
for enr in discv5BootstrapEnrs:
@@ -407,19 +417,18 @@ proc setupDiscoveryV5*(
discv5BootstrapEnrs.add(dynamicBootstrapEnrs)
- let discv5Config = DiscoveryConfig.init(
- conf.discv5TableIpLimit, conf.discv5BucketIpLimit, conf.discv5BitsPerHop
- )
+ let discv5Config =
+ DiscoveryConfig.init(conf.tableIpLimit, conf.bucketIpLimit, conf.bitsPerHop)
- let discv5UdpPort = Port(uint16(conf.discv5UdpPort) + conf.portsShift)
+ let discv5UdpPort = Port(uint16(conf.udpPort) + portsShift)
let discv5Conf = WakuDiscoveryV5Config(
discv5Config: some(discv5Config),
- address: conf.listenAddress,
+ address: p2pListenAddress,
port: discv5UdpPort,
privateKey: eth_keys.PrivateKey(key.skkey),
bootstrapRecords: discv5BootstrapEnrs,
- autoupdateRecord: conf.discv5EnrAutoUpdate,
+ autoupdateRecord: conf.enrAutoUpdate,
)
WakuDiscoveryV5.new(
diff --git a/waku/factory/builder.nim b/waku/factory/builder.nim
index e896ecdbb..d1cede969 100644
--- a/waku/factory/builder.nim
+++ b/waku/factory/builder.nim
@@ -8,7 +8,6 @@ import
libp2p/builders,
libp2p/nameresolving/nameresolver,
libp2p/transports/wstransport,
- libp2p/protocols/connectivity/relay/client,
libp2p/protocols/connectivity/relay/relay
import
../waku_enr,
@@ -90,7 +89,7 @@ proc withNetworkConfigurationDetails*(
extIp = extIp,
extPort = extPort,
extMultiAddrs = extMultiAddrs,
- wsBindPort = wsBindPort,
+ wsBindPort = some(wsBindPort),
wsEnabled = wsEnabled,
wssEnabled = wssEnabled,
wakuFlags = wakuFlags,
@@ -197,6 +196,8 @@ proc build*(builder: WakuNodeBuilder): Result[WakuNode, string] =
except CatchableError:
return err("failed to create switch: " & getCurrentExceptionMsg())
+ let netConfig = builder.netConfig.get()
+
let peerManager = PeerManager.new(
switch = switch,
storage = builder.peerStorage.get(nil),
@@ -204,12 +205,13 @@ proc build*(builder: WakuNodeBuilder): Result[WakuNode, string] =
maxServicePeers = some(builder.maxServicePeers),
colocationLimit = builder.colocationLimit,
shardedPeerManagement = builder.shardAware,
+ dnsNameServers = netConfig.dnsNameServers,
)
var node: WakuNode
try:
node = WakuNode.new(
- netConfig = builder.netConfig.get(),
+ netConfig = netConfig,
enr = builder.record.get(),
switch = switch,
peerManager = peerManager,
diff --git a/waku/factory/conf_builder/conf_builder.nim b/waku/factory/conf_builder/conf_builder.nim
new file mode 100644
index 000000000..9b7f44ada
--- /dev/null
+++ b/waku/factory/conf_builder/conf_builder.nim
@@ -0,0 +1,17 @@
+import
+ ./waku_conf_builder,
+ ./filter_service_conf_builder,
+ ./store_sync_conf_builder,
+ ./store_service_conf_builder,
+ ./rest_server_conf_builder,
+ ./dns_discovery_conf_builder,
+ ./discv5_conf_builder,
+ ./web_socket_conf_builder,
+ ./metrics_server_conf_builder,
+ ./rln_relay_conf_builder
+
+export
+ waku_conf_builder, filter_service_conf_builder, store_sync_conf_builder,
+ store_service_conf_builder, rest_server_conf_builder, dns_discovery_conf_builder,
+ discv5_conf_builder, web_socket_conf_builder, metrics_server_conf_builder,
+ rln_relay_conf_builder
diff --git a/waku/factory/conf_builder/discv5_conf_builder.nim b/waku/factory/conf_builder/discv5_conf_builder.nim
new file mode 100644
index 000000000..950b2a4f6
--- /dev/null
+++ b/waku/factory/conf_builder/discv5_conf_builder.nim
@@ -0,0 +1,65 @@
+import chronicles, std/[net, options, sequtils], results
+import ../waku_conf
+
+logScope:
+ topics = "waku conf builder discv5"
+
+###########################
+## Discv5 Config Builder ##
+###########################
+type Discv5ConfBuilder* = object
+ enabled*: Option[bool]
+
+ bootstrapNodes*: seq[string]
+ bitsPerHop*: Option[int]
+ bucketIpLimit*: Option[uint]
+ discv5Only*: Option[bool]
+ enrAutoUpdate*: Option[bool]
+ tableIpLimit*: Option[uint]
+ udpPort*: Option[Port]
+
+proc init*(T: type Discv5ConfBuilder): Discv5ConfBuilder =
+ Discv5ConfBuilder()
+
+proc withEnabled*(b: var Discv5ConfBuilder, enabled: bool) =
+ b.enabled = some(enabled)
+
+proc withBitsPerHop*(b: var Discv5ConfBuilder, bitsPerHop: int) =
+ b.bitsPerHop = some(bitsPerHop)
+
+proc withBucketIpLimit*(b: var Discv5ConfBuilder, bucketIpLimit: uint) =
+ b.bucketIpLimit = some(bucketIpLimit)
+
+proc withDiscv5Only*(b: var Discv5ConfBuilder, discv5Only: bool) =
+ b.discv5Only = some(discv5Only)
+
+proc withEnrAutoUpdate*(b: var Discv5ConfBuilder, enrAutoUpdate: bool) =
+ b.enrAutoUpdate = some(enrAutoUpdate)
+
+proc withTableIpLimit*(b: var Discv5ConfBuilder, tableIpLimit: uint) =
+ b.tableIpLimit = some(tableIpLimit)
+
+proc withUdpPort*(b: var Discv5ConfBuilder, udpPort: Port) =
+ b.udpPort = some(udpPort)
+
+proc withBootstrapNodes*(b: var Discv5ConfBuilder, bootstrapNodes: seq[string]) =
+ # TODO: validate ENRs?
+ b.bootstrapNodes = concat(b.bootstrapNodes, bootstrapNodes)
+
+proc build*(b: Discv5ConfBuilder): Result[Option[Discv5Conf], string] =
+ if not b.enabled.get(false):
+ return ok(none(Discv5Conf))
+
+ return ok(
+ some(
+ Discv5Conf(
+ bootstrapNodes: b.bootstrapNodes,
+ bitsPerHop: b.bitsPerHop.get(1),
+ bucketIpLimit: b.bucketIpLimit.get(2),
+ discv5Only: b.discv5Only.get(false),
+ enrAutoUpdate: b.enrAutoUpdate.get(true),
+ tableIpLimit: b.tableIpLimit.get(10),
+ udpPort: b.udpPort.get(9000.Port),
+ )
+ )
+ )
diff --git a/waku/factory/conf_builder/dns_discovery_conf_builder.nim b/waku/factory/conf_builder/dns_discovery_conf_builder.nim
new file mode 100644
index 000000000..8ac33a18f
--- /dev/null
+++ b/waku/factory/conf_builder/dns_discovery_conf_builder.nim
@@ -0,0 +1,38 @@
+import chronicles, std/[net, options, sequtils], results
+import ../waku_conf
+
+logScope:
+ topics = "waku conf builder dns discovery"
+
+##################################
+## DNS Discovery Config Builder ##
+##################################
+type DnsDiscoveryConfBuilder* = object
+ enabled*: Option[bool]
+ enrTreeUrl*: Option[string]
+ nameServers*: seq[IpAddress]
+
+proc init*(T: type DnsDiscoveryConfBuilder): DnsDiscoveryConfBuilder =
+ DnsDiscoveryConfBuilder()
+
+proc withEnabled*(b: var DnsDiscoveryConfBuilder, enabled: bool) =
+ b.enabled = some(enabled)
+
+proc withEnrTreeUrl*(b: var DnsDiscoveryConfBuilder, enrTreeUrl: string) =
+ b.enrTreeUrl = some(enrTreeUrl)
+
+proc withNameServers*(b: var DnsDiscoveryConfBuilder, nameServers: seq[IpAddress]) =
+ b.nameServers = concat(b.nameServers, nameServers)
+
+proc build*(b: DnsDiscoveryConfBuilder): Result[Option[DnsDiscoveryConf], string] =
+ if not b.enabled.get(false):
+ return ok(none(DnsDiscoveryConf))
+
+ if b.nameServers.len == 0:
+ return err("dnsDiscovery.nameServers is not specified")
+ if b.enrTreeUrl.isNone():
+ return err("dnsDiscovery.enrTreeUrl is not specified")
+
+ return ok(
+ some(DnsDiscoveryConf(nameServers: b.nameServers, enrTreeUrl: b.enrTreeUrl.get()))
+ )
diff --git a/waku/factory/conf_builder/filter_service_conf_builder.nim b/waku/factory/conf_builder/filter_service_conf_builder.nim
new file mode 100644
index 000000000..a3f056b01
--- /dev/null
+++ b/waku/factory/conf_builder/filter_service_conf_builder.nim
@@ -0,0 +1,45 @@
+import chronicles, std/options, results
+import ../waku_conf
+
+logScope:
+ topics = "waku conf builder filter service"
+
+###################################
+## Filter Service Config Builder ##
+###################################
+type FilterServiceConfBuilder* = object
+ enabled*: Option[bool]
+ maxPeersToServe*: Option[uint32]
+ subscriptionTimeout*: Option[uint16]
+ maxCriteria*: Option[uint32]
+
+proc init*(T: type FilterServiceConfBuilder): FilterServiceConfBuilder =
+ FilterServiceConfBuilder()
+
+proc withEnabled*(b: var FilterServiceConfBuilder, enabled: bool) =
+ b.enabled = some(enabled)
+
+proc withMaxPeersToServe*(b: var FilterServiceConfBuilder, maxPeersToServe: uint32) =
+ b.maxPeersToServe = some(maxPeersToServe)
+
+proc withSubscriptionTimeout*(
+ b: var FilterServiceConfBuilder, subscriptionTimeout: uint16
+) =
+ b.subscriptionTimeout = some(subscriptionTimeout)
+
+proc withMaxCriteria*(b: var FilterServiceConfBuilder, maxCriteria: uint32) =
+ b.maxCriteria = some(maxCriteria)
+
+proc build*(b: FilterServiceConfBuilder): Result[Option[FilterServiceConf], string] =
+ if not b.enabled.get(false):
+ return ok(none(FilterServiceConf))
+
+ return ok(
+ some(
+ FilterServiceConf(
+ maxPeersToServe: b.maxPeersToServe.get(500),
+ subscriptionTimeout: b.subscriptionTimeout.get(300),
+ maxCriteria: b.maxCriteria.get(1000),
+ )
+ )
+ )
diff --git a/waku/factory/conf_builder/metrics_server_conf_builder.nim b/waku/factory/conf_builder/metrics_server_conf_builder.nim
new file mode 100644
index 000000000..0f0d18564
--- /dev/null
+++ b/waku/factory/conf_builder/metrics_server_conf_builder.nim
@@ -0,0 +1,47 @@
+import chronicles, std/[net, options], results
+import ../waku_conf
+
+logScope:
+ topics = "waku conf builder metrics server"
+
+###################################
+## Metrics Server Config Builder ##
+###################################
+type MetricsServerConfBuilder* = object
+ enabled*: Option[bool]
+
+ httpAddress*: Option[IpAddress]
+ httpPort*: Option[Port]
+ logging*: Option[bool]
+
+proc init*(T: type MetricsServerConfBuilder): MetricsServerConfBuilder =
+ MetricsServerConfBuilder()
+
+proc withEnabled*(b: var MetricsServerConfBuilder, enabled: bool) =
+ b.enabled = some(enabled)
+
+proc withHttpAddress*(b: var MetricsServerConfBuilder, httpAddress: IpAddress) =
+ b.httpAddress = some(httpAddress)
+
+proc withHttpPort*(b: var MetricsServerConfBuilder, httpPort: Port) =
+ b.httpPort = some(httpPort)
+
+proc withHttpPort*(b: var MetricsServerConfBuilder, httpPort: uint16) =
+ b.httpPort = some(Port(httpPort))
+
+proc withLogging*(b: var MetricsServerConfBuilder, logging: bool) =
+ b.logging = some(logging)
+
+proc build*(b: MetricsServerConfBuilder): Result[Option[MetricsServerConf], string] =
+ if not b.enabled.get(false):
+ return ok(none(MetricsServerConf))
+
+ return ok(
+ some(
+ MetricsServerConf(
+ httpAddress: b.httpAddress.get(static parseIpAddress("127.0.0.1")),
+ httpPort: b.httpPort.get(8008.Port),
+ logging: b.logging.get(false),
+ )
+ )
+ )
diff --git a/waku/factory/conf_builder/rest_server_conf_builder.nim b/waku/factory/conf_builder/rest_server_conf_builder.nim
new file mode 100644
index 000000000..2efd91f02
--- /dev/null
+++ b/waku/factory/conf_builder/rest_server_conf_builder.nim
@@ -0,0 +1,64 @@
+import chronicles, std/[net, options, sequtils], results
+import ../waku_conf
+
+logScope:
+ topics = "waku conf builder rest server"
+
+################################
+## REST Server Config Builder ##
+################################
+type RestServerConfBuilder* = object
+ enabled*: Option[bool]
+
+ allowOrigin*: seq[string]
+ listenAddress*: Option[IpAddress]
+ port*: Option[Port]
+ admin*: Option[bool]
+ relayCacheCapacity*: Option[uint32]
+
+proc init*(T: type RestServerConfBuilder): RestServerConfBuilder =
+ RestServerConfBuilder()
+
+proc withEnabled*(b: var RestServerConfBuilder, enabled: bool) =
+ b.enabled = some(enabled)
+
+proc withAllowOrigin*(b: var RestServerConfBuilder, allowOrigin: seq[string]) =
+ b.allowOrigin = concat(b.allowOrigin, allowOrigin)
+
+proc withListenAddress*(b: var RestServerConfBuilder, listenAddress: IpAddress) =
+ b.listenAddress = some(listenAddress)
+
+proc withPort*(b: var RestServerConfBuilder, port: Port) =
+ b.port = some(port)
+
+proc withPort*(b: var RestServerConfBuilder, port: uint16) =
+ b.port = some(Port(port))
+
+proc withAdmin*(b: var RestServerConfBuilder, admin: bool) =
+ b.admin = some(admin)
+
+proc withRelayCacheCapacity*(b: var RestServerConfBuilder, relayCacheCapacity: uint32) =
+ b.relayCacheCapacity = some(relayCacheCapacity)
+
+proc build*(b: RestServerConfBuilder): Result[Option[RestServerConf], string] =
+ if not b.enabled.get(false):
+ return ok(none(RestServerConf))
+
+ if b.listenAddress.isNone():
+ return err("restServer.listenAddress is not specified")
+ if b.port.isNone():
+ return err("restServer.port is not specified")
+ if b.relayCacheCapacity.isNone():
+ return err("restServer.relayCacheCapacity is not specified")
+
+ return ok(
+ some(
+ RestServerConf(
+ allowOrigin: b.allowOrigin,
+ listenAddress: b.listenAddress.get(),
+ port: b.port.get(),
+ admin: b.admin.get(false),
+ relayCacheCapacity: b.relayCacheCapacity.get(),
+ )
+ )
+ )
diff --git a/waku/factory/conf_builder/rln_relay_conf_builder.nim b/waku/factory/conf_builder/rln_relay_conf_builder.nim
new file mode 100644
index 000000000..ea87eb278
--- /dev/null
+++ b/waku/factory/conf_builder/rln_relay_conf_builder.nim
@@ -0,0 +1,103 @@
+import chronicles, std/options, results
+import ../waku_conf
+
+logScope:
+ topics = "waku conf builder rln relay"
+
+##############################
+## RLN Relay Config Builder ##
+##############################
+type RlnRelayConfBuilder* = object
+ enabled*: Option[bool]
+ chainId*: Option[uint]
+ ethClientUrls*: Option[seq[string]]
+ ethContractAddress*: Option[string]
+ credIndex*: Option[uint]
+ credPassword*: Option[string]
+ credPath*: Option[string]
+ dynamic*: Option[bool]
+ epochSizeSec*: Option[uint64]
+ userMessageLimit*: Option[uint64]
+ treePath*: Option[string]
+
+proc init*(T: type RlnRelayConfBuilder): RlnRelayConfBuilder =
+ RlnRelayConfBuilder()
+
+proc withEnabled*(b: var RlnRelayConfBuilder, enabled: bool) =
+ b.enabled = some(enabled)
+
+proc withChainId*(b: var RlnRelayConfBuilder, chainId: uint) =
+ b.chainId = some(chainId)
+
+proc withCredIndex*(b: var RlnRelayConfBuilder, credIndex: uint) =
+ b.credIndex = some(credIndex)
+
+proc withCredPassword*(b: var RlnRelayConfBuilder, credPassword: string) =
+ b.credPassword = some(credPassword)
+
+proc withCredPath*(b: var RlnRelayConfBuilder, credPath: string) =
+ b.credPath = some(credPath)
+
+proc withDynamic*(b: var RlnRelayConfBuilder, dynamic: bool) =
+ b.dynamic = some(dynamic)
+
+proc withEthClientUrls*(b: var RlnRelayConfBuilder, ethClientUrls: seq[string]) =
+ b.ethClientUrls = some(ethClientUrls)
+
+proc withEthContractAddress*(b: var RlnRelayConfBuilder, ethContractAddress: string) =
+ b.ethContractAddress = some(ethContractAddress)
+
+proc withEpochSizeSec*(b: var RlnRelayConfBuilder, epochSizeSec: uint64) =
+ b.epochSizeSec = some(epochSizeSec)
+
+proc withUserMessageLimit*(b: var RlnRelayConfBuilder, userMessageLimit: uint64) =
+ b.userMessageLimit = some(userMessageLimit)
+
+proc withTreePath*(b: var RlnRelayConfBuilder, treePath: string) =
+ b.treePath = some(treePath)
+
+proc build*(b: RlnRelayConfBuilder): Result[Option[RlnRelayConf], string] =
+ if not b.enabled.get(false):
+ return ok(none(RlnRelayConf))
+
+ if b.chainId.isNone():
+ return err("RLN Relay Chain Id is not specified")
+
+ let creds =
+ if b.credPath.isSome() and b.credPassword.isSome():
+ some(RlnRelayCreds(path: b.credPath.get(), password: b.credPassword.get()))
+ elif b.credPath.isSome() and b.credPassword.isNone():
+ return err("RLN Relay Credential Password is not specified but path is")
+ elif b.credPath.isNone() and b.credPassword.isSome():
+ return err("RLN Relay Credential Path is not specified but password is")
+ else:
+ none(RlnRelayCreds)
+
+ if b.dynamic.isNone():
+ return err("rlnRelay.dynamic is not specified")
+ if b.ethClientUrls.get(newSeq[string](0)).len == 0:
+ return err("rlnRelay.ethClientUrls is not specified")
+ if b.ethContractAddress.get("") == "":
+ return err("rlnRelay.ethContractAddress is not specified")
+ if b.epochSizeSec.isNone():
+ return err("rlnRelay.epochSizeSec is not specified")
+ if b.userMessageLimit.isNone():
+ return err("rlnRelay.userMessageLimit is not specified")
+ if b.treePath.isNone():
+ return err("rlnRelay.treePath is not specified")
+
+ return ok(
+ some(
+ RlnRelayConf(
+ chainId: b.chainId.get(),
+ credIndex: b.credIndex,
+ creds: creds,
+ dynamic: b.dynamic.get(),
+ ethClientUrls: b.ethClientUrls.get(),
+ ethContractAddress: b.ethContractAddress.get(),
+ epochSizeSec: b.epochSizeSec.get(),
+ userMessageLimit: b.userMessageLimit.get(),
+ treePath: b.treePath.get(),
+ )
+ )
+ )
diff --git a/waku/factory/conf_builder/store_service_conf_builder.nim b/waku/factory/conf_builder/store_service_conf_builder.nim
new file mode 100644
index 000000000..d12bc8150
--- /dev/null
+++ b/waku/factory/conf_builder/store_service_conf_builder.nim
@@ -0,0 +1,74 @@
+import chronicles, std/options, results, chronos
+import ../waku_conf, ./store_sync_conf_builder
+
+logScope:
+ topics = "waku conf builder store service"
+
+##################################
+## Store Service Config Builder ##
+##################################
+type StoreServiceConfBuilder* = object
+ enabled*: Option[bool]
+
+ dbMigration*: Option[bool]
+ dbURl*: Option[string]
+ dbVacuum*: Option[bool]
+ supportV2*: Option[bool]
+ maxNumDbConnections*: Option[int]
+ retentionPolicy*: Option[string]
+ resume*: Option[bool]
+ storeSyncConf*: StoreSyncConfBuilder
+
+proc init*(T: type StoreServiceConfBuilder): StoreServiceConfBuilder =
+ StoreServiceConfBuilder(storeSyncConf: StoreSyncConfBuilder.init())
+
+proc withEnabled*(b: var StoreServiceConfBuilder, enabled: bool) =
+ b.enabled = some(enabled)
+
+proc withDbMigration*(b: var StoreServiceConfBuilder, dbMigration: bool) =
+ b.dbMigration = some(dbMigration)
+
+proc withDbUrl*(b: var StoreServiceConfBuilder, dbUrl: string) =
+ b.dbURl = some(dbUrl)
+
+proc withDbVacuum*(b: var StoreServiceConfBuilder, dbVacuum: bool) =
+ b.dbVacuum = some(dbVacuum)
+
+proc withSupportV2*(b: var StoreServiceConfBuilder, supportV2: bool) =
+ b.supportV2 = some(supportV2)
+
+proc withMaxNumDbConnections*(
+ b: var StoreServiceConfBuilder, maxNumDbConnections: int
+) =
+ b.maxNumDbConnections = some(maxNumDbConnections)
+
+proc withRetentionPolicy*(b: var StoreServiceConfBuilder, retentionPolicy: string) =
+ b.retentionPolicy = some(retentionPolicy)
+
+proc withResume*(b: var StoreServiceConfBuilder, resume: bool) =
+ b.resume = some(resume)
+
+proc build*(b: StoreServiceConfBuilder): Result[Option[StoreServiceConf], string] =
+ if not b.enabled.get(false):
+ return ok(none(StoreServiceConf))
+
+ if b.dbUrl.get("") == "":
+ return err "store.dbUrl is not specified"
+
+ let storeSyncConf = b.storeSyncConf.build().valueOr:
+ return err("Store Sync Conf failed to build")
+
+ return ok(
+ some(
+ StoreServiceConf(
+ dbMigration: b.dbMigration.get(true),
+ dbURl: b.dbUrl.get(),
+ dbVacuum: b.dbVacuum.get(false),
+ supportV2: b.supportV2.get(true),
+ maxNumDbConnections: b.maxNumDbConnections.get(50),
+ retentionPolicy: b.retentionPolicy.get("time:" & $2.days.seconds),
+ resume: b.resume.get(false),
+ storeSyncConf: storeSyncConf,
+ )
+ )
+ )
diff --git a/waku/factory/conf_builder/store_sync_conf_builder.nim b/waku/factory/conf_builder/store_sync_conf_builder.nim
new file mode 100644
index 000000000..4c7177b71
--- /dev/null
+++ b/waku/factory/conf_builder/store_sync_conf_builder.nim
@@ -0,0 +1,51 @@
+import chronicles, std/options, results
+import ../waku_conf
+
+logScope:
+ topics = "waku conf builder store sync"
+
+##################################
+## Store Sync Config Builder ##
+##################################
+type StoreSyncConfBuilder* = object
+ enabled*: Option[bool]
+
+ rangeSec*: Option[uint32]
+ intervalSec*: Option[uint32]
+ relayJitterSec*: Option[uint32]
+
+proc init*(T: type StoreSyncConfBuilder): StoreSyncConfBuilder =
+ StoreSyncConfBuilder()
+
+proc withEnabled*(b: var StoreSyncConfBuilder, enabled: bool) =
+ b.enabled = some(enabled)
+
+proc withRangeSec*(b: var StoreSyncConfBuilder, rangeSec: uint32) =
+ b.rangeSec = some(rangeSec)
+
+proc withIntervalSec*(b: var StoreSyncConfBuilder, intervalSec: uint32) =
+ b.intervalSec = some(intervalSec)
+
+proc withRelayJitterSec*(b: var StoreSyncConfBuilder, relayJitterSec: uint32) =
+ b.relayJitterSec = some(relayJitterSec)
+
+proc build*(b: StoreSyncConfBuilder): Result[Option[StoreSyncConf], string] =
+ if not b.enabled.get(false):
+ return ok(none(StoreSyncConf))
+
+ if b.rangeSec.isNone():
+ return err "store.rangeSec is not specified"
+ if b.intervalSec.isNone():
+ return err "store.intervalSec is not specified"
+ if b.relayJitterSec.isNone():
+ return err "store.relayJitterSec is not specified"
+
+ return ok(
+ some(
+ StoreSyncConf(
+ rangeSec: b.rangeSec.get(),
+ intervalSec: b.intervalSec.get(),
+ relayJitterSec: b.relayJitterSec.get(),
+ )
+ )
+ )
diff --git a/waku/factory/conf_builder/waku_conf_builder.nim b/waku/factory/conf_builder/waku_conf_builder.nim
new file mode 100644
index 000000000..44cb706af
--- /dev/null
+++ b/waku/factory/conf_builder/waku_conf_builder.nim
@@ -0,0 +1,649 @@
+import
+ libp2p/crypto/crypto,
+ libp2p/multiaddress,
+ std/[net, options, sequtils, strutils],
+ chronicles,
+ chronos,
+ results
+
+import
+ ../waku_conf,
+ ../networks_config,
+ ../../common/logging,
+ ../../common/utils/parse_size_units,
+ ../../waku_enr/capabilities
+
+import
+ ./filter_service_conf_builder,
+ ./store_sync_conf_builder,
+ ./store_service_conf_builder,
+ ./rest_server_conf_builder,
+ ./dns_discovery_conf_builder,
+ ./discv5_conf_builder,
+ ./web_socket_conf_builder,
+ ./metrics_server_conf_builder,
+ ./rln_relay_conf_builder
+
+logScope:
+ topics = "waku conf builder"
+
+type MaxMessageSizeKind* = enum
+ mmskNone
+ mmskStr
+ mmskInt
+
+type MaxMessageSize* = object
+ case kind*: MaxMessageSizeKind
+ of mmskNone:
+ discard
+ of mmskStr:
+ str*: string
+ of mmskInt:
+ bytes*: uint64
+
+## `WakuConfBuilder` is a convenient tool to accumulate
+## Config parameters to build a `WakuConfig`.
+## It provides some type conversion, as well as applying
+## defaults in an agnostic manner (for any usage of Waku node)
+#
+# TODO: Sub protocol builder (eg `StoreServiceConfBuilder`
+# is be better defined in the protocol module (eg store)
+# and apply good defaults from this protocol PoV and make the
+# decision when the dev must specify a value vs when a default
+# is fine to have.
+#
+# TODO: Add default to most values so that when a developer uses
+# the builder, it works out-of-the-box
+type WakuConfBuilder* = object
+ nodeKey: Option[crypto.PrivateKey]
+
+ clusterId: Option[uint16]
+ numShardsInNetwork: Option[uint32]
+ shards: Option[seq[uint16]]
+ protectedShards: Option[seq[ProtectedShard]]
+ contentTopics: Option[seq[string]]
+
+ # Conf builders
+ dnsDiscoveryConf*: DnsDiscoveryConfBuilder
+ discv5Conf*: Discv5ConfBuilder
+ filterServiceConf*: FilterServiceConfBuilder
+ metricsServerConf*: MetricsServerConfBuilder
+ restServerConf*: RestServerConfBuilder
+ rlnRelayConf*: RlnRelayConfBuilder
+ storeServiceConf*: StoreServiceConfBuilder
+ webSocketConf*: WebSocketConfBuilder
+ # End conf builders
+ relay: Option[bool]
+ lightPush: Option[bool]
+ peerExchange: Option[bool]
+ storeSync: Option[bool]
+ relayPeerExchange: Option[bool]
+
+ # TODO: move within a relayConf
+ rendezvous: Option[bool]
+ discv5Only: Option[bool]
+
+ clusterConf: Option[ClusterConf]
+
+ staticNodes: seq[string]
+
+ remoteStoreNode: Option[string]
+ remoteLightPushNode: Option[string]
+ remoteFilterNode: Option[string]
+ remotePeerExchangeNode: Option[string]
+
+ maxMessageSize: MaxMessageSize
+
+ logLevel: Option[logging.LogLevel]
+ logFormat: Option[logging.LogFormat]
+
+ natStrategy: Option[string]
+
+ p2pTcpPort: Option[Port]
+ p2pListenAddress: Option[IpAddress]
+ portsShift: Option[uint16]
+ dns4DomainName: Option[string]
+ extMultiAddrs: seq[string]
+ extMultiAddrsOnly: Option[bool]
+
+ dnsAddrs: Option[bool]
+ dnsAddrsNameServers: seq[IpAddress]
+
+ peerPersistence: Option[bool]
+ peerStoreCapacity: Option[int]
+ maxConnections: Option[int]
+ colocationLimit: Option[int]
+
+ agentString: Option[string]
+
+ rateLimits: Option[seq[string]]
+
+ maxRelayPeers: Option[int]
+ relayShardedPeerManagement: Option[bool]
+ relayServiceRatio: Option[string]
+ circuitRelayClient: Option[bool]
+ keepAlive: Option[bool]
+ p2pReliability: Option[bool]
+
+proc init*(T: type WakuConfBuilder): WakuConfBuilder =
+ WakuConfBuilder(
+ dnsDiscoveryConf: DnsDiscoveryConfBuilder.init(),
+ discv5Conf: Discv5ConfBuilder.init(),
+ filterServiceConf: FilterServiceConfBuilder.init(),
+ metricsServerConf: MetricsServerConfBuilder.init(),
+ restServerConf: RestServerConfBuilder.init(),
+ rlnRelayConf: RlnRelayConfBuilder.init(),
+ storeServiceConf: StoreServiceConfBuilder.init(),
+ webSocketConf: WebSocketConfBuilder.init(),
+ )
+
+proc withClusterConf*(b: var WakuConfBuilder, clusterConf: ClusterConf) =
+ b.clusterConf = some(clusterConf)
+
+proc withNodeKey*(b: var WakuConfBuilder, nodeKey: crypto.PrivateKey) =
+ b.nodeKey = some(nodeKey)
+
+proc withClusterId*(b: var WakuConfBuilder, clusterId: uint16) =
+ b.clusterId = some(clusterId)
+
+proc withNumShardsInNetwork*(b: var WakuConfBuilder, numShardsInNetwork: uint32) =
+ b.numShardsInNetwork = some(numShardsInNetwork)
+
+proc withShards*(b: var WakuConfBuilder, shards: seq[uint16]) =
+ b.shards = some(shards)
+
+proc withProtectedShards*(
+ b: var WakuConfBuilder, protectedShards: seq[ProtectedShard]
+) =
+ b.protectedShards = some(protectedShards)
+
+proc withContentTopics*(b: var WakuConfBuilder, contentTopics: seq[string]) =
+ b.contentTopics = some(contentTopics)
+
+proc withRelay*(b: var WakuConfBuilder, relay: bool) =
+ b.relay = some(relay)
+
+proc withLightPush*(b: var WakuConfBuilder, lightPush: bool) =
+ b.lightPush = some(lightPush)
+
+proc withStoreSync*(b: var WakuConfBuilder, storeSync: bool) =
+ b.storeSync = some(storeSync)
+
+proc withPeerExchange*(b: var WakuConfBuilder, peerExchange: bool) =
+ b.peerExchange = some(peerExchange)
+
+proc withRelayPeerExchange*(b: var WakuConfBuilder, relayPeerExchange: bool) =
+ b.relayPeerExchange = some(relayPeerExchange)
+
+proc withRendezvous*(b: var WakuConfBuilder, rendezvous: bool) =
+ b.rendezvous = some(rendezvous)
+
+proc withRemoteStoreNode*(b: var WakuConfBuilder, remoteStoreNode: string) =
+ b.remoteStoreNode = some(remoteStoreNode)
+
+proc withRemoteLightPushNode*(b: var WakuConfBuilder, remoteLightPushNode: string) =
+ b.remoteLightPushNode = some(remoteLightPushNode)
+
+proc withRemoteFilterNode*(b: var WakuConfBuilder, remoteFilterNode: string) =
+ b.remoteFilterNode = some(remoteFilterNode)
+
+proc withRemotePeerExchangeNode*(
+ b: var WakuConfBuilder, remotePeerExchangeNode: string
+) =
+ b.remotePeerExchangeNode = some(remotePeerExchangeNode)
+
+proc withDnsAddrs*(b: var WakuConfBuilder, dnsAddrs: bool) =
+ b.dnsAddrs = some(dnsAddrs)
+
+proc withPeerPersistence*(b: var WakuConfBuilder, peerPersistence: bool) =
+ b.peerPersistence = some(peerPersistence)
+
+proc withPeerStoreCapacity*(b: var WakuConfBuilder, peerStoreCapacity: int) =
+ b.peerStoreCapacity = some(peerStoreCapacity)
+
+proc withMaxConnections*(b: var WakuConfBuilder, maxConnections: int) =
+ b.maxConnections = some(maxConnections)
+
+proc withDnsAddrsNameServers*(
+ b: var WakuConfBuilder, dnsAddrsNameServers: seq[IpAddress]
+) =
+ b.dnsAddrsNameServers = concat(b.dnsAddrsNameServers, dnsAddrsNameServers)
+
+proc withLogLevel*(b: var WakuConfBuilder, logLevel: logging.LogLevel) =
+ b.logLevel = some(logLevel)
+
+proc withLogFormat*(b: var WakuConfBuilder, logFormat: logging.LogFormat) =
+ b.logFormat = some(logFormat)
+
+proc withP2pTcpPort*(b: var WakuConfBuilder, p2pTcpPort: Port) =
+ b.p2pTcpPort = some(p2pTcpPort)
+
+proc withP2pTcpPort*(b: var WakuConfBuilder, p2pTcpPort: uint16) =
+ b.p2pTcpPort = some(Port(p2pTcpPort))
+
+proc withPortsShift*(b: var WakuConfBuilder, portsShift: uint16) =
+ b.portsShift = some(portsShift)
+
+proc withP2pListenAddress*(b: var WakuConfBuilder, p2pListenAddress: IpAddress) =
+ b.p2pListenAddress = some(p2pListenAddress)
+
+proc withExtMultiAddrsOnly*(b: var WakuConfBuilder, extMultiAddrsOnly: bool) =
+ b.extMultiAddrsOnly = some(extMultiAddrsOnly)
+
+proc withDns4DomainName*(b: var WakuConfBuilder, dns4DomainName: string) =
+ b.dns4DomainName = some(dns4DomainName)
+
+proc withNatStrategy*(b: var WakuConfBuilder, natStrategy: string) =
+ b.natStrategy = some(natStrategy)
+
+proc withAgentString*(b: var WakuConfBuilder, agentString: string) =
+ b.agentString = some(agentString)
+
+proc withColocationLimit*(b: var WakuConfBuilder, colocationLimit: int) =
+ b.colocationLimit = some(colocationLimit)
+
+proc withRateLimits*(b: var WakuConfBuilder, rateLimits: seq[string]) =
+ b.rateLimits = some(rateLimits)
+
+proc withMaxRelayPeers*(b: var WakuConfBuilder, maxRelayPeers: int) =
+ b.maxRelayPeers = some(maxRelayPeers)
+
+proc withRelayServiceRatio*(b: var WakuConfBuilder, relayServiceRatio: string) =
+ b.relayServiceRatio = some(relayServiceRatio)
+
+proc withCircuitRelayClient*(b: var WakuConfBuilder, circuitRelayClient: bool) =
+ b.circuitRelayClient = some(circuitRelayClient)
+
+proc withRelayShardedPeerManagement*(
+ b: var WakuConfBuilder, relayShardedPeerManagement: bool
+) =
+ b.relayShardedPeerManagement = some(relayShardedPeerManagement)
+
+proc withKeepAlive*(b: var WakuConfBuilder, keepAlive: bool) =
+ b.keepAlive = some(keepAlive)
+
+proc withP2pReliability*(b: var WakuConfBuilder, p2pReliability: bool) =
+ b.p2pReliability = some(p2pReliability)
+
+proc withExtMultiAddrs*(builder: var WakuConfBuilder, extMultiAddrs: seq[string]) =
+ builder.extMultiAddrs = concat(builder.extMultiAddrs, extMultiAddrs)
+
+proc withMaxMessageSize*(builder: var WakuConfBuilder, maxMessageSizeBytes: uint64) =
+ builder.maxMessageSize = MaxMessageSize(kind: mmskInt, bytes: maxMessageSizeBytes)
+
+proc withMaxMessageSize*(builder: var WakuConfBuilder, maxMessageSize: string) =
+ builder.maxMessageSize = MaxMessageSize(kind: mmskStr, str: maxMessageSize)
+
+proc withStaticNodes*(builder: var WakuConfBuilder, staticNodes: seq[string]) =
+ builder.staticNodes = concat(builder.staticNodes, staticNodes)
+
+proc nodeKey(
+ builder: WakuConfBuilder, rng: ref HmacDrbgContext
+): Result[crypto.PrivateKey, string] =
+ if builder.nodeKey.isSome():
+ return ok(builder.nodeKey.get())
+ else:
+ warn "missing node key, generating new set"
+ let nodeKey = crypto.PrivateKey.random(Secp256k1, rng[]).valueOr:
+ error "Failed to generate key", error = error
+ return err("Failed to generate key: " & $error)
+ return ok(nodeKey)
+
+proc applyClusterConf(builder: var WakuConfBuilder) =
+ # Apply cluster conf, overrides most values passed individually
+ # If you want to tweak values, don't use clusterConf
+ if builder.clusterConf.isNone:
+ return
+ let clusterConf = builder.clusterConf.get()
+
+ if builder.clusterId.isSome():
+ warn "Cluster id was provided alongside a cluster conf",
+ used = clusterConf.clusterId, discarded = builder.clusterId.get()
+ builder.clusterId = some(clusterConf.clusterId)
+
+ # Apply relay parameters
+ if builder.relay.get(false) and clusterConf.rlnRelay:
+ if builder.rlnRelayConf.enabled.isSome():
+ warn "RLN Relay was provided alongside a cluster conf",
+ used = clusterConf.rlnRelay, discarded = builder.rlnRelayConf.enabled
+ builder.rlnRelayConf.withEnabled(true)
+
+ if builder.rlnRelayConf.ethContractAddress.get("") != "":
+ warn "RLN Relay ETH Contract Address was provided alongside a cluster conf",
+ used = clusterConf.rlnRelayEthContractAddress.string,
+ discarded = builder.rlnRelayConf.ethContractAddress.get().string
+ builder.rlnRelayConf.withEthContractAddress(clusterConf.rlnRelayEthContractAddress)
+
+ if builder.rlnRelayConf.chainId.isSome():
+ warn "RLN Relay Chain Id was provided alongside a cluster conf",
+ used = clusterConf.rlnRelayChainId, discarded = builder.rlnRelayConf.chainId
+ builder.rlnRelayConf.withChainId(clusterConf.rlnRelayChainId)
+
+ if builder.rlnRelayConf.dynamic.isSome():
+ warn "RLN Relay Dynamic was provided alongside a cluster conf",
+ used = clusterConf.rlnRelayDynamic, discarded = builder.rlnRelayConf.dynamic
+ builder.rlnRelayConf.withDynamic(clusterConf.rlnRelayDynamic)
+
+ if builder.rlnRelayConf.epochSizeSec.isSome():
+ warn "RLN Epoch Size in Seconds was provided alongside a cluster conf",
+ used = clusterConf.rlnEpochSizeSec,
+ discarded = builder.rlnRelayConf.epochSizeSec
+ builder.rlnRelayConf.withEpochSizeSec(clusterConf.rlnEpochSizeSec)
+
+ if builder.rlnRelayConf.userMessageLimit.isSome():
+ warn "RLN Relay Dynamic was provided alongside a cluster conf",
+ used = clusterConf.rlnRelayUserMessageLimit,
+ discarded = builder.rlnRelayConf.userMessageLimit
+ builder.rlnRelayConf.withUserMessageLimit(clusterConf.rlnRelayUserMessageLimit)
+ # End Apply relay parameters
+
+ case builder.maxMessageSize.kind
+ of mmskNone:
+ discard
+ of mmskStr, mmskInt:
+ warn "Max Message Size was provided alongside a cluster conf",
+ used = clusterConf.maxMessageSize, discarded = $builder.maxMessageSize
+ builder.withMaxMessageSize(parseCorrectMsgSize(clusterConf.maxMessageSize))
+
+ if builder.numShardsInNetwork.isSome():
+ warn "Num Shards In Network was provided alongside a cluster conf",
+ used = clusterConf.numShardsInNetwork, discarded = builder.numShardsInNetwork
+ builder.numShardsInNetwork = some(clusterConf.numShardsInNetwork)
+
+ if clusterConf.discv5Discovery:
+ if builder.discv5Conf.enabled.isNone:
+ builder.discv5Conf.withEnabled(clusterConf.discv5Discovery)
+
+ if builder.discv5Conf.bootstrapNodes.len == 0 and
+ clusterConf.discv5BootstrapNodes.len > 0:
+ warn "Discv5 Boostrap nodes were provided alongside a cluster conf",
+ used = clusterConf.discv5BootstrapNodes,
+ discarded = builder.discv5Conf.bootstrapNodes
+ builder.discv5Conf.withBootstrapNodes(clusterConf.discv5BootstrapNodes)
+
+proc build*(
+ builder: var WakuConfBuilder, rng: ref HmacDrbgContext = crypto.newRng()
+): Result[WakuConf, string] =
+ ## Return a WakuConf that contains all mandatory parameters
+ ## Applies some sane defaults that are applicable across any usage
+ ## of libwaku. It aims to be agnostic so it does not apply a
+ ## default when it is opinionated.
+
+ applyClusterConf(builder)
+
+ let relay =
+ if builder.relay.isSome():
+ builder.relay.get()
+ else:
+ warn "whether to mount relay is not specified, defaulting to not mounting"
+ false
+
+ let lightPush =
+ if builder.lightPush.isSome():
+ builder.lightPush.get()
+ else:
+ warn "whether to mount lightPush is not specified, defaulting to not mounting"
+ false
+
+ let peerExchange =
+ if builder.peerExchange.isSome():
+ builder.peerExchange.get()
+ else:
+ warn "whether to mount peerExchange is not specified, defaulting to not mounting"
+ false
+
+ let storeSync =
+ if builder.storeSync.isSome():
+ builder.storeSync.get()
+ else:
+ warn "whether to mount storeSync is not specified, defaulting to not mounting"
+ false
+
+ let rendezvous =
+ if builder.rendezvous.isSome():
+ builder.rendezvous.get()
+ else:
+ warn "whether to mount rendezvous is not specified, defaulting to not mounting"
+ false
+
+ let relayPeerExchange = builder.relayPeerExchange.get(false)
+
+ let nodeKey = ?nodeKey(builder, rng)
+
+ let clusterId =
+ if builder.clusterId.isNone():
+ # TODO: ClusterId should never be defaulted, instead, presets
+ # should be defined and used
+ warn("Cluster Id was not specified, defaulting to 0")
+ 0.uint16
+ else:
+ builder.clusterId.get()
+
+ let numShardsInNetwork =
+ if builder.numShardsInNetwork.isSome():
+ builder.numShardsInNetwork.get()
+ else:
+ warn "Number of shards in network not specified, defaulting to zero (improve is wip)"
+ 0
+
+ let shards =
+ if builder.shards.isSome():
+ builder.shards.get()
+ else:
+ warn "shards not specified, defaulting to all shards in network"
+ # TODO: conversion should not be needed
+ let upperShard: uint16 = uint16(numShardsInNetwork - 1)
+ toSeq(0.uint16 .. upperShard)
+
+ let protectedShards = builder.protectedShards.get(@[])
+
+ let maxMessageSizeBytes =
+ case builder.maxMessageSize.kind
+ of mmskInt:
+ builder.maxMessageSize.bytes
+ of mmskStr:
+ ?parseMsgSize(builder.maxMessageSize.str)
+ else:
+ warn "Max Message Size not specified, defaulting to 150KiB"
+ parseCorrectMsgSize("150KiB")
+
+ let contentTopics = builder.contentTopics.get(@[])
+
+ # Build sub-configs
+ let discv5Conf = builder.discv5Conf.build().valueOr:
+ return err("Discv5 Conf building failed: " & $error)
+
+ let dnsDiscoveryConf = builder.dnsDiscoveryConf.build().valueOr:
+ return err("DNS Discovery Conf building failed: " & $error)
+
+ let filterServiceConf = builder.filterServiceConf.build().valueOr:
+ return err("Filter Service Conf building failed: " & $error)
+
+ let metricsServerConf = builder.metricsServerConf.build().valueOr:
+ return err("Metrics Server Conf building failed: " & $error)
+
+ let restServerConf = builder.restServerConf.build().valueOr:
+ return err("REST Server Conf building failed: " & $error)
+
+ let rlnRelayConf = builder.rlnRelayConf.build().valueOr:
+ return err("RLN Relay Conf building failed: " & $error)
+
+ let storeServiceConf = builder.storeServiceConf.build().valueOr:
+ return err("Store Conf building failed: " & $error)
+
+ let webSocketConf = builder.webSocketConf.build().valueOr:
+ return err("WebSocket Conf building failed: " & $error)
+ # End - Build sub-configs
+
+ let logLevel =
+ if builder.logLevel.isSome():
+ builder.logLevel.get()
+ else:
+ warn "Log Level not specified, defaulting to INFO"
+ logging.LogLevel.INFO
+
+ let logFormat =
+ if builder.logFormat.isSome():
+ builder.logFormat.get()
+ else:
+ warn "Log Format not specified, defaulting to TEXT"
+ logging.LogFormat.TEXT
+
+ let natStrategy =
+ if builder.natStrategy.isSome():
+ builder.natStrategy.get()
+ else:
+ warn "Nat Strategy is not specified, defaulting to none"
+ "none"
+
+ let p2pTcpPort =
+ if builder.p2pTcpPort.isSome():
+ builder.p2pTcpPort.get()
+ else:
+ warn "P2P Listening TCP Port is not specified, listening on 60000"
+ 60000.Port
+
+ let p2pListenAddress =
+ if builder.p2pListenAddress.isSome():
+ builder.p2pListenAddress.get()
+ else:
+ warn "P2P listening address not specified, listening on 0.0.0.0"
+ (static parseIpAddress("0.0.0.0"))
+
+ let portsShift =
+ if builder.portsShift.isSome():
+ builder.portsShift.get()
+ else:
+ warn "Ports Shift is not specified, defaulting to 0"
+ 0.uint16
+
+ let dns4DomainName =
+ if builder.dns4DomainName.isSome():
+ let d = builder.dns4DomainName.get()
+ if d.string != "":
+ some(d)
+ else:
+ none(string)
+ else:
+ none(string)
+
+ var extMultiAddrs: seq[MultiAddress] = @[]
+ for s in builder.extMultiAddrs:
+ let m = MultiAddress.init(s).valueOr:
+ return err("Invalid multiaddress provided: " & s)
+ extMultiAddrs.add(m)
+
+ let extMultiAddrsOnly =
+ if builder.extMultiAddrsOnly.isSome():
+ builder.extMultiAddrsOnly.get()
+ else:
+ warn "Whether to only announce external multiaddresses is not specified, defaulting to false"
+ false
+
+ let dnsAddrs =
+ if builder.dnsAddrs.isSome():
+ builder.dnsAddrs.get()
+ else:
+ warn "Whether to resolve DNS multiaddresses was not specified, defaulting to false."
+ false
+
+ let dnsAddrsNameServers =
+ if builder.dnsAddrsNameServers.len != 0:
+ builder.dnsAddrsNameServers
+ else:
+ warn "DNS name servers IPs not provided, defaulting to Cloudflare's."
+ @[static parseIpAddress("1.1.1.1"), static parseIpAddress("1.0.0.1")]
+
+ let peerPersistence =
+ if builder.peerPersistence.isSome():
+ builder.peerPersistence.get()
+ else:
+ warn "Peer persistence not specified, defaulting to false"
+ false
+
+ let maxConnections =
+ if builder.maxConnections.isSome():
+ builder.maxConnections.get()
+ else:
+ warn "Max Connections was not specified, defaulting to 300"
+ 300
+
+ # TODO: Do the git version thing here
+ let agentString = builder.agentString.get("nwaku")
+
+ # TODO: use `DefaultColocationLimit`. the user of this value should
+ # probably be defining a config object
+ let colocationLimit = builder.colocationLimit.get(5)
+ let rateLimits = builder.rateLimits.get(newSeq[string](0))
+
+ # TODO: is there a strategy for experimental features? delete vs promote
+ let relayShardedPeerManagement = builder.relayShardedPeerManagement.get(false)
+
+ let wakuFlags = CapabilitiesBitfield.init(
+ lightpush = lightPush,
+ filter = filterServiceConf.isSome,
+ store = storeServiceConf.isSome,
+ relay = relay,
+ sync = storeServiceConf.isSome() and storeServiceConf.get().storeSyncConf.isSome,
+ )
+
+ let wakuConf = WakuConf(
+ # confs
+ storeServiceConf: storeServiceConf,
+ filterServiceConf: filterServiceConf,
+ discv5Conf: discv5Conf,
+ rlnRelayConf: rlnRelayConf,
+ metricsServerConf: metricsServerConf,
+ restServerConf: restServerConf,
+ dnsDiscoveryConf: dnsDiscoveryConf,
+ # end confs
+ nodeKey: nodeKey,
+ clusterId: clusterId,
+ numShardsInNetwork: numShardsInNetwork,
+ contentTopics: contentTopics,
+ shards: shards,
+ protectedShards: protectedShards,
+ relay: relay,
+ lightPush: lightPush,
+ peerExchange: peerExchange,
+ rendezvous: rendezvous,
+ remoteStoreNode: builder.remoteStoreNode,
+ remoteLightPushNode: builder.remoteLightPushNode,
+ remoteFilterNode: builder.remoteFilterNode,
+ remotePeerExchangeNode: builder.remotePeerExchangeNode,
+ relayPeerExchange: relayPeerExchange,
+ maxMessageSizeBytes: maxMessageSizeBytes,
+ logLevel: logLevel,
+ logFormat: logFormat,
+ # TODO: Separate builders
+ networkConf: NetworkConfig(
+ natStrategy: natStrategy,
+ p2pTcpPort: p2pTcpPort,
+ dns4DomainName: dns4DomainName,
+ p2pListenAddress: p2pListenAddress,
+ extMultiAddrs: extMultiAddrs,
+ extMultiAddrsOnly: extMultiAddrsOnly,
+ ),
+ portsShift: portsShift,
+ webSocketConf: webSocketConf,
+ dnsAddrs: dnsAddrs,
+ dnsAddrsNameServers: dnsAddrsNameServers,
+ peerPersistence: peerPersistence,
+ peerStoreCapacity: builder.peerStoreCapacity,
+ maxConnections: maxConnections,
+ agentString: agentString,
+ colocationLimit: colocationLimit,
+ maxRelayPeers: builder.maxRelayPeers,
+ relayServiceRatio: builder.relayServiceRatio.get("60:40"),
+ rateLimits: rateLimits,
+ circuitRelayClient: builder.circuitRelayClient.get(false),
+ keepAlive: builder.keepAlive.get(true),
+ staticNodes: builder.staticNodes,
+ relayShardedPeerManagement: relayShardedPeerManagement,
+ p2pReliability: builder.p2pReliability.get(false),
+ wakuFlags: wakuFlags,
+ )
+
+ ?wakuConf.validate()
+
+ return ok(wakuConf)
diff --git a/waku/factory/conf_builder/web_socket_conf_builder.nim b/waku/factory/conf_builder/web_socket_conf_builder.nim
new file mode 100644
index 000000000..25ff6461d
--- /dev/null
+++ b/waku/factory/conf_builder/web_socket_conf_builder.nim
@@ -0,0 +1,70 @@
+import chronicles, std/[net, options], results
+import ../network_conf
+
+logScope:
+ topics = "waku conf builder websocket"
+
+##############################
+## WebSocket Config Builder ##
+##############################
+type WebSocketConfBuilder* = object
+ enabled*: Option[bool]
+ webSocketPort*: Option[Port]
+ secureEnabled*: Option[bool]
+ keyPath*: Option[string]
+ certPath*: Option[string]
+
+proc init*(T: type WebSocketConfBuilder): WebSocketConfBuilder =
+ WebSocketConfBuilder()
+
+proc withEnabled*(b: var WebSocketConfBuilder, enabled: bool) =
+ b.enabled = some(enabled)
+
+proc withSecureEnabled*(b: var WebSocketConfBuilder, secureEnabled: bool) =
+ b.secureEnabled = some(secureEnabled)
+ if b.secureEnabled.get():
+ b.enabled = some(true) # ws must be enabled to use wss
+
+proc withWebSocketPort*(b: var WebSocketConfBuilder, webSocketPort: Port) =
+ b.webSocketPort = some(webSocketPort)
+
+proc withWebSocketPort*(b: var WebSocketConfBuilder, webSocketPort: uint16) =
+ b.webSocketPort = some(Port(webSocketPort))
+
+proc withKeyPath*(b: var WebSocketConfBuilder, keyPath: string) =
+ b.keyPath = some(keyPath)
+
+proc withCertPath*(b: var WebSocketConfBuilder, certPath: string) =
+ b.certPath = some(certPath)
+
+proc build*(b: WebSocketConfBuilder): Result[Option[WebSocketConf], string] =
+ if not b.enabled.get(false):
+ return ok(none(WebSocketConf))
+
+ if b.webSocketPort.isNone():
+ return err("websocket.port is not specified")
+
+ if not b.secureEnabled.get(false):
+ return ok(
+ some(
+ WebSocketConf(
+ port: b.websocketPort.get(), secureConf: none(WebSocketSecureConf)
+ )
+ )
+ )
+
+ if b.keyPath.get("") == "":
+ return err("WebSocketSecure enabled but key path is not specified")
+ if b.certPath.get("") == "":
+ return err("WebSocketSecure enabled but cert path is not specified")
+
+ return ok(
+ some(
+ WebSocketConf(
+ port: b.webSocketPort.get(),
+ secureConf: some(
+ WebSocketSecureConf(keyPath: b.keyPath.get(), certPath: b.certPath.get())
+ ),
+ )
+ )
+ )
diff --git a/waku/factory/external_config.nim b/waku/factory/external_config.nim
index 512c3ffcf..ce0eec97f 100644
--- a/waku/factory/external_config.nim
+++ b/waku/factory/external_config.nim
@@ -1,6 +1,7 @@
import
- std/[strutils, strformat],
+ std/[strutils, strformat, sequtils],
results,
+ chronicles,
chronos,
regex,
confutils,
@@ -14,17 +15,26 @@ import
nimcrypto/utils,
secp256k1,
json
+
import
+ ./waku_conf,
+ ./conf_builder/conf_builder,
+ ./networks_config,
../common/confutils/envvar/defs as confEnvvarDefs,
../common/confutils/envvar/std/net as confEnvvarNet,
../common/logging,
../waku_enr,
../node/peer_manager,
- ../waku_core/topics/pubsub_topic
+ ../waku_core/topics/pubsub_topic,
+ ../../tools/rln_keystore_generator/rln_keystore_generator,
+ ../../tools/rln_db_inspector/rln_db_inspector
include ../waku_core/message/default_values
-export confTomlDefs, confTomlNet, confEnvvarDefs, confEnvvarNet
+export confTomlDefs, confTomlNet, confEnvvarDefs, confEnvvarNet, ProtectedShard
+
+logScope:
+ topics = "waku external config"
# Git version in git describe format (defined at compile time)
const git_version* {.strdefine.} = "n/a"
@@ -33,10 +43,6 @@ type ConfResult*[T] = Result[T, string]
type EthRpcUrl* = distinct string
-type ProtectedShard* = object
- shard*: uint16
- key*: secp256k1.SkPublicKey
-
type StartUpCommand* = enum
noCommand # default, runs waku
generateRlnKeystore # generates a new RLN keystore
@@ -64,16 +70,17 @@ type WakuNodeConf* = object
.}: logging.LogFormat
rlnRelayCredPath* {.
- desc: "The path for peristing rln-relay credential",
+ desc: "The path for persisting rln-relay credential",
defaultValue: "",
name: "rln-relay-cred-path"
.}: string
- rlnRelayEthClientAddress* {.
- desc: "HTTP address of an Ethereum testnet client e.g., http://localhost:8540/",
- defaultValue: "http://localhost:8540/",
+ ethClientUrls* {.
+ desc:
+ "HTTP address of an Ethereum testnet client e.g., http://localhost:8540/. Argument may be repeated.",
+ defaultValue: @[EthRpcUrl("http://localhost:8540/")],
name: "rln-relay-eth-client-address"
- .}: EthRpcUrl
+ .}: seq[EthRpcUrl]
rlnRelayEthContractAddress* {.
desc: "Address of membership contract on an Ethereum testnet.",
@@ -148,7 +155,7 @@ type WakuNodeConf* = object
## General node config
preset* {.
desc:
- "Network preset to use. 'twn' is The RLN-protected Waku Network (cluster 1).",
+ "Network preset to use. 'twn' is The RLN-protected Waku Network (cluster 1). Overrides other values.",
defaultValue: "",
name: "preset"
.}: string
@@ -196,7 +203,7 @@ type WakuNodeConf* = object
.}: seq[string]
extMultiAddrsOnly* {.
- desc: "Only announce external multiaddresses",
+ desc: "Only announce external multiaddresses setup with --ext-multiaddr",
defaultValue: false,
name: "ext-multiaddr-only"
.}: bool
@@ -300,31 +307,12 @@ hence would have reachability issues.""",
name: "rln-relay-dynamic"
.}: bool
- rlnRelayIdKey* {.
- desc: "Rln relay identity secret key as a Hex string",
- defaultValue: "",
- name: "rln-relay-id-key"
- .}: string
-
- rlnRelayIdCommitmentKey* {.
- desc: "Rln relay identity commitment key as a Hex string",
- defaultValue: "",
- name: "rln-relay-id-commitment-key"
- .}: string
-
rlnRelayTreePath* {.
desc: "Path to the RLN merkle tree sled db (https://github.com/spacejam/sled)",
defaultValue: "",
name: "rln-relay-tree-path"
.}: string
- rlnRelayBandwidthThreshold* {.
- desc:
- "Message rate in bytes/sec after which verification of proofs should happen.",
- defaultValue: 0, # to maintain backwards compatibility
- name: "rln-relay-bandwidth-threshold"
- .}: int
-
staticnodes* {.
desc: "Peer multiaddr to directly connect with. Argument may be repeated.",
name: "staticnode"
@@ -344,13 +332,6 @@ hence would have reachability issues.""",
name: "num-shards-in-network"
.}: uint32
- pubsubTopics* {.
- desc:
- "Deprecated. Default pubsub topic to subscribe to. Argument may be repeated.",
- defaultValue: @[],
- name: "pubsub-topic"
- .}: seq[string]
-
shards* {.
desc:
"Shards index to subscribe to [0..NUM_SHARDS_IN_NETWORK-1]. Argument may be repeated.",
@@ -379,7 +360,7 @@ hence would have reachability issues.""",
.}: bool
legacyStore* {.
- desc: "Enable/disable waku store legacy mode",
+ desc: "Enable/disable support of Waku Store v2 as a service",
defaultValue: true,
name: "legacy-store"
.}: bool
@@ -439,28 +420,20 @@ hence would have reachability issues.""",
desc: "Interval between store sync attempts. In seconds.",
defaultValue: 300, # 5 minutes
name: "store-sync-interval"
- .}: int64
+ .}: uint32
storeSyncRange* {.
desc: "Amount of time to sync. In seconds.",
defaultValue: 3600, # 1 hours
name: "store-sync-range"
- .}: int64
+ .}: uint32
storeSyncRelayJitter* {.
hidden,
desc: "Time offset to account for message propagation jitter. In seconds.",
defaultValue: 20,
name: "store-sync-relay-jitter"
- .}: int64
-
- storeSyncMaxPayloadSize* {.
- hidden,
- desc:
- "Max size in bytes of the inner negentropy payload. Cannot be less than 5K, 0 is unlimited.",
- defaultValue: 0,
- name: "store-sync-max-payload-size"
- .}: int64
+ .}: uint32
## Filter config
filter* {.
@@ -478,7 +451,7 @@ hence would have reachability issues.""",
"Timeout for filter subscription without ping or refresh it, in seconds. Only for v2 filter protocol.",
defaultValue: 300, # 5 minutes
name: "filter-subscription-timeout"
- .}: int64
+ .}: uint16
filterMaxPeersToServe* {.
desc: "Maximum number of peers to serve at a time. Only for v2 filter protocol.",
@@ -616,9 +589,9 @@ with the drawback of consuming some more bandwidth.""",
## Discovery v5 config
discv5Discovery* {.
desc: "Enable discovering nodes via Node Discovery v5.",
- defaultValue: false,
+ defaultValue: none(bool),
name: "discv5-discovery"
- .}: bool
+ .}: Option[bool]
discv5UdpPort* {.
desc: "Listening UDP port for Node Discovery v5.",
@@ -796,8 +769,7 @@ proc completeCmdArg*(T: type IpAddress, val: string): seq[string] =
return @[]
proc defaultListenAddress*(): IpAddress =
- # TODO: How should we select between IPv4 and IPv6
- # Maybe there should be a config option for this.
+ # TODO: Should probably listen on both ipv4 and ipv6 by default.
(static parseIpAddress("0.0.0.0"))
proc defaultColocationLimit*(): int =
@@ -906,3 +878,188 @@ proc defaultWakuNodeConf*(): ConfResult[WakuNodeConf] =
return ok(conf)
except CatchableError:
return err("exception in defaultWakuNodeConf: " & getCurrentExceptionMsg())
+
+proc toKeystoreGeneratorConf*(n: WakuNodeConf): RlnKeystoreGeneratorConf =
+ RlnKeystoreGeneratorConf(
+ execute: n.execute,
+ chainId: n.rlnRelayChainId,
+ ethClientUrls: n.ethClientUrls.mapIt(string(it)),
+ ethContractAddress: n.rlnRelayEthContractAddress,
+ userMessageLimit: n.rlnRelayUserMessageLimit,
+ ethPrivateKey: n.rlnRelayEthPrivateKey,
+ credPath: n.rlnRelayCredPath,
+ credPassword: n.rlnRelayCredPassword,
+ )
+
+proc toInspectRlnDbConf*(n: WakuNodeConf): InspectRlnDbConf =
+ return InspectRlnDbConf(treePath: n.treePath)
+
+proc toClusterConf(
+ preset: string, clusterId: Option[uint16]
+): ConfResult[Option[ClusterConf]] =
+ var lcPreset = toLowerAscii(preset)
+ if clusterId.isSome() and clusterId.get() == 1:
+ warn(
+ "TWN - The Waku Network configuration will not be applied when `--cluster-id=1` is passed in future releases. Use `--preset=twn` instead."
+ )
+ lcPreset = "twn"
+
+ case lcPreset
+ of "":
+ ok(none(ClusterConf))
+ of "twn":
+ ok(some(ClusterConf.TheWakuNetworkConf()))
+ else:
+ err("Invalid --preset value passed: " & lcPreset)
+
+proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] =
+ var b = WakuConfBuilder.init()
+
+ b.withLogLevel(n.logLevel)
+ b.withLogFormat(n.logFormat)
+
+ b.rlnRelayConf.withEnabled(n.rlnRelay)
+ if n.rlnRelayCredPath != "":
+ b.rlnRelayConf.withCredPath(n.rlnRelayCredPath)
+ if n.rlnRelayCredPassword != "":
+ b.rlnRelayConf.withCredPassword(n.rlnRelayCredPassword)
+ if n.ethClientUrls.len > 0:
+ b.rlnRelayConf.withEthClientUrls(n.ethClientUrls.mapIt(string(it)))
+ if n.rlnRelayEthContractAddress != "":
+ b.rlnRelayConf.withEthContractAddress(n.rlnRelayEthContractAddress)
+
+ if n.rlnRelayChainId != 0:
+ b.rlnRelayConf.withChainId(n.rlnRelayChainId)
+ b.rlnRelayConf.withUserMessageLimit(n.rlnRelayUserMessageLimit)
+ b.rlnRelayConf.withEpochSizeSec(n.rlnEpochSizeSec)
+
+ if n.rlnRelayCredIndex.isSome():
+ b.rlnRelayConf.withCredIndex(n.rlnRelayCredIndex.get())
+ b.rlnRelayConf.withDynamic(n.rlnRelayDynamic)
+
+ b.rlnRelayConf.withTreePath(n.rlnRelayTreePath)
+
+ if n.maxMessageSize != "":
+ b.withMaxMessageSize(n.maxMessageSize)
+
+ b.withProtectedShards(n.protectedShards)
+ b.withClusterId(n.clusterId)
+
+ let clusterConf = toClusterConf(n.preset, some(n.clusterId)).valueOr:
+ return err("Error determining cluster from preset: " & $error)
+
+ if clusterConf.isSome():
+ b.withClusterConf(clusterConf.get())
+
+ b.withAgentString(n.agentString)
+
+ if n.nodeKey.isSome():
+ b.withNodeKey(n.nodeKey.get())
+
+ b.withP2pListenAddress(n.listenAddress)
+ b.withP2pTcpPort(n.tcpPort)
+ b.withPortsShift(n.portsShift)
+ b.withNatStrategy(n.nat)
+ b.withExtMultiAddrs(n.extMultiAddrs)
+ b.withExtMultiAddrsOnly(n.extMultiAddrsOnly)
+ b.withMaxConnections(n.maxConnections)
+
+ if n.maxRelayPeers.isSome():
+ b.withMaxRelayPeers(n.maxRelayPeers.get())
+
+ if n.relayServiceRatio != "":
+ b.withRelayServiceRatio(n.relayServiceRatio)
+ b.withColocationLimit(n.colocationLimit)
+
+ if n.peerStoreCapacity.isSome:
+ b.withPeerStoreCapacity(n.peerStoreCapacity.get())
+
+ b.withPeerPersistence(n.peerPersistence)
+ b.withDnsAddrs(n.dnsAddrs)
+ b.withDnsAddrsNameServers(n.dnsAddrsNameServers)
+ b.withDns4DomainName(n.dns4DomainName)
+ b.withCircuitRelayClient(n.isRelayClient)
+ b.withRelay(n.relay)
+ b.withRelayPeerExchange(n.relayPeerExchange)
+ b.withRelayShardedPeerManagement(n.relayShardedPeerManagement)
+ b.withStaticNodes(n.staticNodes)
+ b.withKeepAlive(n.keepAlive)
+
+ if n.numShardsInNetwork != 0:
+ b.withNumShardsInNetwork(n.numShardsInNetwork)
+
+ b.withShards(n.shards)
+ b.withContentTopics(n.contentTopics)
+
+ b.storeServiceConf.withEnabled(n.store)
+ b.storeServiceConf.withSupportV2(n.legacyStore)
+ b.storeServiceConf.withRetentionPolicy(n.storeMessageRetentionPolicy)
+ b.storeServiceConf.withDbUrl(n.storeMessageDbUrl)
+ b.storeServiceConf.withDbVacuum(n.storeMessageDbVacuum)
+ b.storeServiceConf.withDbMigration(n.storeMessageDbMigration)
+ b.storeServiceConf.withMaxNumDbConnections(n.storeMaxNumDbConnections)
+ b.storeServiceConf.withResume(n.storeResume)
+
+ # TODO: can we just use `Option` on the CLI?
+ if n.storenode != "":
+ b.withRemoteStoreNode(n.storenode)
+ if n.filternode != "":
+ b.withRemoteFilterNode(n.filternode)
+ if n.lightpushnode != "":
+ b.withRemoteLightPushNode(n.lightpushnode)
+ if n.peerExchangeNode != "":
+ b.withRemotePeerExchangeNode(n.peerExchangeNode)
+
+ b.storeServiceConf.storeSyncConf.withEnabled(n.storeSync)
+ b.storeServiceConf.storeSyncConf.withIntervalSec(n.storeSyncInterval)
+ b.storeServiceConf.storeSyncConf.withRangeSec(n.storeSyncRange)
+ b.storeServiceConf.storeSyncConf.withRelayJitterSec(n.storeSyncRelayJitter)
+
+ b.filterServiceConf.withEnabled(n.filter)
+ b.filterServiceConf.withSubscriptionTimeout(n.filterSubscriptionTimeout)
+ b.filterServiceConf.withMaxPeersToServe(n.filterMaxPeersToServe)
+ b.filterServiceConf.withMaxCriteria(n.filterMaxCriteria)
+
+ b.withLightPush(n.lightpush)
+ b.withP2pReliability(n.reliabilityEnabled)
+
+ b.restServerConf.withEnabled(n.rest)
+ b.restServerConf.withListenAddress(n.restAddress)
+ b.restServerConf.withPort(n.restPort)
+ b.restServerConf.withRelayCacheCapacity(n.restRelayCacheCapacity)
+ b.restServerConf.withAdmin(n.restAdmin)
+ b.restServerConf.withAllowOrigin(n.restAllowOrigin)
+
+ b.metricsServerConf.withEnabled(n.metricsServer)
+ b.metricsServerConf.withHttpAddress(n.metricsServerAddress)
+ b.metricsServerConf.withHttpPort(n.metricsServerPort)
+ b.metricsServerConf.withLogging(n.metricsLogging)
+
+ b.dnsDiscoveryConf.withEnabled(n.dnsDiscovery)
+ b.dnsDiscoveryConf.withEnrTreeUrl(n.dnsDiscoveryUrl)
+ b.dnsDiscoveryConf.withNameServers(n.dnsDiscoveryNameServers)
+
+ if n.discv5Discovery.isSome():
+ b.discv5Conf.withEnabled(n.discv5Discovery.get())
+
+ b.discv5Conf.withUdpPort(n.discv5UdpPort)
+ b.discv5Conf.withBootstrapNodes(n.discv5BootstrapNodes)
+ b.discv5Conf.withEnrAutoUpdate(n.discv5EnrAutoUpdate)
+ b.discv5Conf.withTableIpLimit(n.discv5TableIpLimit)
+ b.discv5Conf.withBucketIpLimit(n.discv5BucketIpLimit)
+ b.discv5Conf.withBitsPerHop(n.discv5BitsPerHop)
+ b.discv5Conf.withDiscv5Only(n.discv5Only)
+
+ b.withPeerExchange(n.peerExchange)
+
+ b.withRendezvous(n.rendezvous)
+
+ b.webSocketConf.withEnabled(n.websocketSupport)
+ b.webSocketConf.withWebSocketPort(n.websocketPort)
+ b.webSocketConf.withSecureEnabled(n.websocketSecureSupport)
+ b.webSocketConf.withKeyPath(n.websocketSecureKeyPath)
+ b.webSocketConf.withCertPath(n.websocketSecureCertPath)
+
+ b.withRateLimits(n.rateLimits)
+
+ return b.build()
diff --git a/waku/factory/internal_config.nim b/waku/factory/internal_config.nim
index 08f11f1c5..72af28340 100644
--- a/waku/factory/internal_config.nim
+++ b/waku/factory/internal_config.nim
@@ -4,21 +4,20 @@ import
libp2p/crypto/crypto,
libp2p/multiaddress,
libp2p/nameresolving/dnsresolver,
- std/[options, sequtils, strutils, net],
+ std/[options, sequtils, net],
results
import
- ./external_config,
../common/utils/nat,
- ../node/config,
- ../waku_enr/capabilities,
+ ../node/net_config,
../waku_enr,
../waku_core,
- ./networks_config
+ ./waku_conf,
+ ./network_conf
proc enrConfiguration*(
- conf: WakuNodeConf, netConfig: NetConfig, key: crypto.PrivateKey
+ conf: WakuConf, netConfig: NetConfig
): Result[enr.Record, string] =
- var enrBuilder = EnrBuilder.init(key)
+ var enrBuilder = EnrBuilder.init(conf.nodeKey)
enrBuilder.withIpAddressAndPorts(
netConfig.enrIp, netConfig.enrPort, netConfig.discv5UdpPort
@@ -44,19 +43,12 @@ proc enrConfiguration*(
return ok(record)
-proc validateExtMultiAddrs*(vals: seq[string]): Result[seq[MultiAddress], string] =
- var multiaddrs: seq[MultiAddress]
- for val in vals:
- let multiaddr = ?MultiAddress.init(val)
- multiaddrs.add(multiaddr)
- return ok(multiaddrs)
-
proc dnsResolve*(
- domain: string, conf: WakuNodeConf
+ domain: string, dnsAddrsNameServers: seq[IpAddress]
): Future[Result[string, string]] {.async.} =
# Use conf's DNS servers
var nameServers: seq[TransportAddress]
- for ip in conf.dnsAddrsNameServers:
+ for ip in dnsAddrsNameServers:
nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53
let dnsResolver = DnsResolver.new(nameServers)
@@ -69,14 +61,24 @@ proc dnsResolve*(
else:
return err("Could not resolve IP from DNS: empty response")
-proc networkConfiguration*(conf: WakuNodeConf, clientId: string): NetConfigResult =
+# TODO: Reduce number of parameters, can be done once the same is done on Netconfig.init
+proc networkConfiguration*(
+ clusterId: uint16,
+ conf: NetworkConfig,
+ discv5Conf: Option[Discv5Conf],
+ webSocketConf: Option[WebSocketConf],
+ wakuFlags: CapabilitiesBitfield,
+ dnsAddrsNameServers: seq[IpAddress],
+ portsShift: uint16,
+ clientId: string,
+): NetConfigResult =
## `udpPort` is only supplied to satisfy underlying APIs but is not
## actually a supported transport for libp2p traffic.
let natRes = setupNat(
- conf.nat,
+ conf.natStrategy.string,
clientId,
- Port(uint16(conf.tcpPort) + conf.portsShift),
- Port(uint16(conf.tcpPort) + conf.portsShift),
+ Port(uint16(conf.p2pTcpPort) + portsShift),
+ Port(uint16(conf.p2pTcpPort) + portsShift),
)
if natRes.isErr():
return err("failed to setup NAT: " & $natRes.error)
@@ -84,15 +86,9 @@ proc networkConfiguration*(conf: WakuNodeConf, clientId: string): NetConfigResul
var (extIp, extTcpPort, _) = natRes.get()
let
- dns4DomainName =
- if conf.dns4DomainName != "":
- some(conf.dns4DomainName)
- else:
- none(string)
-
discv5UdpPort =
- if conf.discv5Discovery:
- some(Port(uint16(conf.discv5UdpPort) + conf.portsShift))
+ if discv5Conf.isSome():
+ some(Port(uint16(discv5Conf.get().udpPort) + portsShift))
else:
none(Port)
@@ -101,34 +97,15 @@ proc networkConfiguration*(conf: WakuNodeConf, clientId: string): NetConfigResul
## extPort as well. The following heuristic assumes that, in absence of
## manual config, the external port is the same as the bind port.
extPort =
- if (extIp.isSome() or dns4DomainName.isSome()) and extTcpPort.isNone():
- some(Port(uint16(conf.tcpPort) + conf.portsShift))
+ if (extIp.isSome() or conf.dns4DomainName.isSome()) and extTcpPort.isNone():
+ some(Port(uint16(conf.p2pTcpPort) + portsShift))
else:
extTcpPort
- extMultiAddrs =
- if (conf.extMultiAddrs.len > 0):
- let extMultiAddrsValidationRes = validateExtMultiAddrs(conf.extMultiAddrs)
- if extMultiAddrsValidationRes.isErr():
- return
- err("invalid external multiaddress: " & $extMultiAddrsValidationRes.error)
- else:
- extMultiAddrsValidationRes.get()
- else:
- @[]
-
- wakuFlags = CapabilitiesBitfield.init(
- lightpush = conf.lightpush,
- filter = conf.filter,
- store = conf.store,
- relay = conf.relay,
- sync = conf.storeSync,
- )
-
# Resolve and use DNS domain IP
- if dns4DomainName.isSome() and extIp.isNone():
+ if conf.dns4DomainName.isSome() and extIp.isNone():
try:
- let dnsRes = waitFor dnsResolve(conf.dns4DomainName, conf)
+ let dnsRes = waitFor dnsResolve(conf.dns4DomainName.get(), dnsAddrsNameServers)
if dnsRes.isErr():
return err($dnsRes.error) # Pass error down the stack
@@ -138,91 +115,38 @@ proc networkConfiguration*(conf: WakuNodeConf, clientId: string): NetConfigResul
return
err("Could not update extIp to resolved DNS IP: " & getCurrentExceptionMsg())
+ let (wsEnabled, wsBindPort, wssEnabled) =
+ if webSocketConf.isSome:
+ let wsConf = webSocketConf.get()
+ (true, some(Port(wsConf.port.uint16 + portsShift)), wsConf.secureConf.isSome)
+ else:
+ (false, none(Port), false)
+
# Wrap in none because NetConfig does not have a default constructor
# TODO: We could change bindIp in NetConfig to be something less restrictive
# than IpAddress, which doesn't allow default construction
let netConfigRes = NetConfig.init(
- clusterId = conf.clusterId,
- bindIp = conf.listenAddress,
- bindPort = Port(uint16(conf.tcpPort) + conf.portsShift),
+ clusterId = clusterId,
+ bindIp = conf.p2pListenAddress,
+ bindPort = Port(uint16(conf.p2pTcpPort) + portsShift),
extIp = extIp,
extPort = extPort,
- extMultiAddrs = extMultiAddrs,
+ extMultiAddrs = conf.extMultiAddrs,
extMultiAddrsOnly = conf.extMultiAddrsOnly,
- wsBindPort = Port(uint16(conf.websocketPort) + conf.portsShift),
- wsEnabled = conf.websocketSupport,
- wssEnabled = conf.websocketSecureSupport,
- dns4DomainName = dns4DomainName,
+ wsBindPort = wsBindPort,
+ wsEnabled = wsEnabled,
+ wssEnabled = wssEnabled,
+ dns4DomainName = conf.dns4DomainName,
discv5UdpPort = discv5UdpPort,
wakuFlags = some(wakuFlags),
)
return netConfigRes
-proc applyPresetConfiguration*(srcConf: WakuNodeConf): Result[WakuNodeConf, string] =
- var resConf = srcConf
-
- if resConf.clusterId == 1:
- warn(
- "TWN - The Waku Network configuration will not be applied when `--cluster-id=1` is passed in future releases. Use `--preset=twn` instead."
- )
- resConf.preset = "twn"
-
- case toLowerAscii(resConf.preset)
- of "twn":
- let twnClusterConf = ClusterConf.TheWakuNetworkConf()
-
- # Override configuration
- resConf.maxMessageSize = twnClusterConf.maxMessageSize
- resConf.clusterId = twnClusterConf.clusterId
- resConf.rlnRelay = twnClusterConf.rlnRelay
- resConf.rlnRelayEthContractAddress = twnClusterConf.rlnRelayEthContractAddress
- resConf.rlnRelayChainId = twnClusterConf.rlnRelayChainId
- resConf.rlnRelayDynamic = twnClusterConf.rlnRelayDynamic
- resConf.rlnRelayBandwidthThreshold = twnClusterConf.rlnRelayBandwidthThreshold
- resConf.discv5Discovery = twnClusterConf.discv5Discovery
- resConf.discv5BootstrapNodes =
- resConf.discv5BootstrapNodes & twnClusterConf.discv5BootstrapNodes
- resConf.rlnEpochSizeSec = twnClusterConf.rlnEpochSizeSec
- resConf.rlnRelayUserMessageLimit = twnClusterConf.rlnRelayUserMessageLimit
- resConf.numShardsInNetwork = twnClusterConf.numShardsInNetwork
-
- if resConf.relay:
- resConf.rlnRelay = twnClusterConf.rlnRelay
- else:
- discard
-
- return ok(resConf)
-
# TODO: numShardsInNetwork should be mandatory with autosharding, and unneeded otherwise
-proc getNumShardsInNetwork*(conf: WakuNodeConf): uint32 =
+proc getNumShardsInNetwork*(conf: WakuConf): uint32 =
if conf.numShardsInNetwork != 0:
return conf.numShardsInNetwork
# If conf.numShardsInNetwork is not set, use 1024 - the maximum possible as per the static sharding spec
# https://github.com/waku-org/specs/blob/master/standards/core/relay-sharding.md#static-sharding
return uint32(MaxShardIndex + 1)
-
-proc validateShards*(conf: WakuNodeConf): Result[void, string] =
- let numShardsInNetwork = getNumShardsInNetwork(conf)
-
- for shard in conf.shards:
- if shard >= numShardsInNetwork:
- let msg =
- "validateShards invalid shard: " & $shard & " when numShardsInNetwork: " &
- $numShardsInNetwork # fmt doesn't work
- error "validateShards failed", error = msg
- return err(msg)
-
- return ok()
-
-proc getNodeKey*(
- conf: WakuNodeConf, rng: ref HmacDrbgContext = crypto.newRng()
-): Result[PrivateKey, string] =
- if conf.nodekey.isSome():
- return ok(conf.nodekey.get())
-
- warn "missing node key, generating new set"
- let key = crypto.PrivateKey.random(Secp256k1, rng[]).valueOr:
- error "Failed to generate key", error = error
- return err("Failed to generate key: " & $error)
- return ok(key)
diff --git a/waku/factory/network_conf.nim b/waku/factory/network_conf.nim
new file mode 100644
index 000000000..c5179e53a
--- /dev/null
+++ b/waku/factory/network_conf.nim
@@ -0,0 +1,34 @@
+import std/[net, options, strutils]
+import libp2p/multiaddress
+
+type WebSocketSecureConf* {.requiresInit.} = object
+ keyPath*: string
+ certPath*: string
+
+type WebSocketConf* = object
+ port*: Port
+ secureConf*: Option[WebSocketSecureConf]
+
+type NetworkConf* = object
+ natStrategy*: string # TODO: make enum
+ p2pTcpPort*: Port
+ dns4DomainName*: Option[string]
+ p2pListenAddress*: IpAddress
+ extMultiAddrs*: seq[MultiAddress]
+ extMultiAddrsOnly*: bool
+ webSocketConf*: Option[WebSocketConf]
+
+proc validateNoEmptyStrings(networkConf: NetworkConf): Result[void, string] =
+ if networkConf.dns4DomainName.isSome() and
+ isEmptyOrWhiteSpace(networkConf.dns4DomainName.get().string):
+ return err("dns4DomainName is an empty string, set it to none(string) instead")
+
+ if networkConf.webSocketConf.isSome() and
+ networkConf.webSocketConf.get().secureConf.isSome():
+ let secureConf = networkConf.webSocketConf.get().secureConf.get()
+ if isEmptyOrWhiteSpace(secureConf.keyPath):
+ return err("websocket.secureConf.keyPath is an empty string")
+ if isEmptyOrWhiteSpace(secureConf.certPath):
+ return err("websocket.secureConf.certPath is an empty string")
+
+ return ok()
diff --git a/waku/factory/networks_config.nim b/waku/factory/networks_config.nim
index 8dfefbf34..aceada3fe 100644
--- a/waku/factory/networks_config.nim
+++ b/waku/factory/networks_config.nim
@@ -1,15 +1,17 @@
{.push raises: [].}
+# TODO: Rename this type to match file name
+
type ClusterConf* = object
- maxMessageSize*: string
+ maxMessageSize*: string # TODO: static convert to a uint64
clusterId*: uint16
rlnRelay*: bool
rlnRelayEthContractAddress*: string
rlnRelayChainId*: uint
rlnRelayDynamic*: bool
- rlnRelayBandwidthThreshold*: int
rlnEpochSizeSec*: uint64
rlnRelayUserMessageLimit*: uint64
+ # TODO: should be uint16 like the `shards` parameter
numShardsInNetwork*: uint32
discv5Discovery*: bool
discv5BootstrapNodes*: seq[string]
@@ -22,15 +24,13 @@ proc TheWakuNetworkConf*(T: type ClusterConf): ClusterConf =
maxMessageSize: "150KiB",
clusterId: 1,
rlnRelay: true,
- rlnRelayEthContractAddress: "0xCB33Aa5B38d79E3D9Fa8B10afF38AA201399a7e3",
+ rlnRelayEthContractAddress: "0xfe7a9eabcE779a090FD702346Fd0bFAc02ce6Ac8",
rlnRelayDynamic: true,
rlnRelayChainId: 11155111,
- rlnRelayBandwidthThreshold: 0,
rlnEpochSizeSec: 600,
rlnRelayUserMessageLimit: 100,
numShardsInNetwork: 8,
discv5Discovery: true,
- # TODO: Why is this part of the conf? eg an edge node would not have this
discv5BootstrapNodes:
@[
"enr:-QESuED0qW1BCmF-oH_ARGPr97Nv767bl_43uoy70vrbah3EaCAdK3Q0iRQ6wkSTTpdrg_dU_NC2ydO8leSlRpBX4pxiAYJpZIJ2NIJpcIRA4VDAim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOTd-h5owwj-cx7xrmbvQKU8CV3Fomfdvcv1MBc-67T5oN0Y3CCdl-DdWRwgiMohXdha3UyDw",
diff --git a/waku/factory/node_factory.nim b/waku/factory/node_factory.nim
index 3142ff766..7df5c2567 100644
--- a/waku/factory/node_factory.nim
+++ b/waku/factory/node_factory.nim
@@ -10,7 +10,7 @@ import
import
./internal_config,
- ./external_config,
+ ./waku_conf,
./builder,
./validator_signed,
../waku_enr/sharding,
@@ -35,7 +35,6 @@ import
../node/peer_manager/peer_store/waku_peer_storage,
../node/peer_manager/peer_store/migrations as peer_store_sqlite_migrations,
../waku_lightpush_legacy/common,
- ../common/utils/parse_size_units,
../common/rate_limit/setting,
../common/databases/dburl
@@ -56,10 +55,9 @@ proc setupPeerStorage(): Result[Option[WakuPeerStorage], string] =
## Init waku node instance
proc initNode(
- conf: WakuNodeConf,
+ conf: WakuConf,
netConfig: NetConfig,
rng: ref HmacDrbgContext,
- nodeKey: crypto.PrivateKey,
record: enr.Record,
peerStore: Option[WakuPeerStorage],
relay: Relay,
@@ -86,17 +84,24 @@ proc initNode(
else:
peerStore.get()
+ let (secureKey, secureCert) =
+ if conf.webSocketConf.isSome() and conf.webSocketConf.get().secureConf.isSome():
+ let wssConf = conf.webSocketConf.get().secureConf.get()
+ (some(wssConf.keyPath), some(wssConf.certPath))
+ else:
+ (none(string), none(string))
+
# Build waku node instance
var builder = WakuNodeBuilder.init()
builder.withRng(rng)
- builder.withNodeKey(nodekey)
+ builder.withNodeKey(conf.nodeKey)
builder.withRecord(record)
builder.withNetworkConfiguration(netConfig)
builder.withPeerStorage(pStorage, capacity = conf.peerStoreCapacity)
builder.withSwitchConfiguration(
maxConnections = some(conf.maxConnections.int),
- secureKey = some(conf.websocketSecureKeyPath),
- secureCert = some(conf.websocketSecureCertPath),
+ secureKey = secureKey,
+ secureCert = secureCert,
nameResolver = dnsResolver,
sendSignedPeerRecord = conf.relayPeerExchange,
# We send our own signed peer record when peer exchange enabled
@@ -148,13 +153,13 @@ proc getAutoshards*(
return ok(autoshards)
proc setupProtocols(
- node: WakuNode, conf: WakuNodeConf, nodeKey: crypto.PrivateKey
+ node: WakuNode, conf: WakuConf
): Future[Result[void, string]] {.async.} =
## Setup configured protocols on an existing Waku v2 node.
## Optionally include persistent message storage.
## No protocols are started yet.
- if conf.discv5Only:
+ if conf.discv5Conf.isSome() and conf.discv5Conf.get().discv5Only:
notice "Running node only with Discv5, not mounting additional protocols"
return ok()
@@ -167,11 +172,12 @@ proc setupProtocols(
error "Unrecoverable error occurred", error = msg
quit(QuitFailure)
- if conf.store:
- if conf.legacyStore:
+ if conf.storeServiceConf.isSome():
+ let storeServiceConf = conf.storeServiceConf.get()
+ if storeServiceConf.supportV2:
let archiveDriverRes = waitFor legacy_driver.ArchiveDriver.new(
- conf.storeMessageDbUrl, conf.storeMessageDbVacuum, conf.storeMessageDbMigration,
- conf.storeMaxNumDbConnections, onFatalErrorAction,
+ storeServiceConf.dbUrl, storeServiceConf.dbVacuum, storeServiceConf.dbMigration,
+ storeServiceConf.maxNumDbConnections, onFatalErrorAction,
)
if archiveDriverRes.isErr():
return err("failed to setup legacy archive driver: " & archiveDriverRes.error)
@@ -191,26 +197,26 @@ proc setupProtocols(
## So for now, we need to make sure that when legacy store is enabled and we use sqlite
## that we migrate our db according to legacy store's schema to have the extra field
- let engineRes = dburl.getDbEngine(conf.storeMessageDbUrl)
+ let engineRes = dburl.getDbEngine(storeServiceConf.dbUrl)
if engineRes.isErr():
return err("error getting db engine in setupProtocols: " & engineRes.error)
let engine = engineRes.get()
let migrate =
- if engine == "sqlite" and conf.legacyStore:
+ if engine == "sqlite" and storeServiceConf.supportV2:
false
else:
- conf.storeMessageDbMigration
+ storeServiceConf.dbMigration
let archiveDriverRes = waitFor driver.ArchiveDriver.new(
- conf.storeMessageDbUrl, conf.storeMessageDbVacuum, migrate,
- conf.storeMaxNumDbConnections, onFatalErrorAction,
+ storeServiceConf.dbUrl, storeServiceConf.dbVacuum, migrate,
+ storeServiceConf.maxNumDbConnections, onFatalErrorAction,
)
if archiveDriverRes.isErr():
return err("failed to setup archive driver: " & archiveDriverRes.error)
- let retPolicyRes = policy.RetentionPolicy.new(conf.storeMessageRetentionPolicy)
+ let retPolicyRes = policy.RetentionPolicy.new(storeServiceConf.retentionPolicy)
if retPolicyRes.isErr():
return err("failed to create retention policy: " & retPolicyRes.error)
@@ -218,7 +224,7 @@ proc setupProtocols(
if mountArcRes.isErr():
return err("failed to mount waku archive protocol: " & mountArcRes.error)
- if conf.legacyStore:
+ if storeServiceConf.supportV2:
# Store legacy setup
try:
await mountLegacyStore(node, node.rateLimitSettings.getSetting(STOREV2))
@@ -232,17 +238,28 @@ proc setupProtocols(
except CatchableError:
return err("failed to mount waku store protocol: " & getCurrentExceptionMsg())
+ if storeServiceConf.storeSyncConf.isSome():
+ let confStoreSync = storeServiceConf.storeSyncConf.get()
+
+ (
+ await node.mountStoreSync(
+ confStoreSync.rangeSec, confStoreSync.intervalSec,
+ confStoreSync.relayJitterSec,
+ )
+ ).isOkOr:
+ return err("failed to mount waku store sync protocol: " & $error)
+
mountStoreClient(node)
- if conf.storenode != "":
- let storeNode = parsePeerInfo(conf.storenode)
+ if conf.remoteStoreNode.isSome():
+ let storeNode = parsePeerInfo(conf.remoteStoreNode.get())
if storeNode.isOk():
node.peerManager.addServicePeer(storeNode.value, store_common.WakuStoreCodec)
else:
return err("failed to set node waku store peer: " & storeNode.error)
mountLegacyStoreClient(node)
- if conf.storenode != "":
- let storeNode = parsePeerInfo(conf.storenode)
+ if conf.remoteStoreNode.isSome():
+ let storeNode = parsePeerInfo(conf.remoteStoreNode.get())
if storeNode.isOk():
node.peerManager.addServicePeer(
storeNode.value, legacy_common.WakuLegacyStoreCodec
@@ -250,7 +267,7 @@ proc setupProtocols(
else:
return err("failed to set node waku legacy store peer: " & storeNode.error)
- if conf.store and conf.storeResume:
+ if conf.storeServiceConf.isSome and conf.storeServiceConf.get().resume:
node.setupStoreResume()
# If conf.numShardsInNetwork is not set, use the number of shards configured as numShardsInNetwork
@@ -296,17 +313,17 @@ proc setupProtocols(
let shards = confShards & autoShards
if conf.relay:
- let parsedMaxMsgSize = parseMsgSize(conf.maxMessageSize).valueOr:
- return err("failed to parse 'max-num-bytes-msg-size' param: " & $error)
+ debug "Setting max message size", num_bytes = conf.maxMessageSizeBytes
- debug "Setting max message size", num_bytes = parsedMaxMsgSize
-
- try:
+ (
await mountRelay(
- node, shards, peerExchangeHandler = peerExchangeHandler, int(parsedMaxMsgSize)
+ node,
+ shards,
+ peerExchangeHandler = peerExchangeHandler,
+ int(conf.maxMessageSizeBytes),
)
- except CatchableError:
- return err("failed to mount waku relay protocol: " & getCurrentExceptionMsg())
+ ).isOkOr:
+ return err("failed to mount waku relay protocol: " & $error)
# Add validation keys to protected topics
var subscribedProtectedShards: seq[ProtectedShard]
@@ -330,18 +347,18 @@ proc setupProtocols(
except CatchableError:
return err("failed to mount libp2p ping protocol: " & getCurrentExceptionMsg())
- if conf.rlnRelay:
+ if conf.rlnRelayConf.isSome():
+ let rlnRelayConf = conf.rlnRelayConf.get()
let rlnConf = WakuRlnConfig(
- rlnRelayDynamic: conf.rlnRelayDynamic,
- rlnRelayCredIndex: conf.rlnRelayCredIndex,
- rlnRelayEthContractAddress: conf.rlnRelayEthContractAddress,
- rlnRelayChainId: conf.rlnRelayChainId,
- rlnRelayEthClientAddress: string(conf.rlnRelayethClientAddress),
- rlnRelayCredPath: conf.rlnRelayCredPath,
- rlnRelayCredPassword: conf.rlnRelayCredPassword,
- rlnRelayTreePath: conf.rlnRelayTreePath,
- rlnRelayUserMessageLimit: conf.rlnRelayUserMessageLimit,
- rlnEpochSizeSec: conf.rlnEpochSizeSec,
+ dynamic: rlnRelayConf.dynamic,
+ credIndex: rlnRelayConf.credIndex,
+ ethContractAddress: rlnRelayConf.ethContractAddress,
+ chainId: rlnRelayConf.chainId,
+ ethClientUrls: rlnRelayConf.ethClientUrls,
+ creds: rlnRelayConf.creds,
+ treePath: rlnRelayConf.treePath,
+ userMessageLimit: rlnRelayConf.userMessageLimit,
+ epochSizeSec: rlnRelayConf.epochSizeSec,
onFatalErrorAction: onFatalErrorAction,
)
@@ -351,7 +368,7 @@ proc setupProtocols(
return err("failed to mount waku RLN relay protocol: " & getCurrentExceptionMsg())
# NOTE Must be mounted after relay
- if conf.lightpush:
+ if conf.lightPush:
try:
await mountLightPush(node, node.rateLimitSettings.getSetting(LIGHTPUSH))
await mountLegacyLightPush(node, node.rateLimitSettings.getSetting(LIGHTPUSH))
@@ -360,8 +377,8 @@ proc setupProtocols(
mountLightPushClient(node)
mountLegacyLightPushClient(node)
- if conf.lightpushnode != "":
- let lightPushNode = parsePeerInfo(conf.lightpushnode)
+ if conf.remoteLightPushNode.isSome():
+ let lightPushNode = parsePeerInfo(conf.remoteLightPushNode.get())
if lightPushNode.isOk():
node.peerManager.addServicePeer(lightPushNode.value, WakuLightPushCodec)
node.peerManager.addServicePeer(lightPushNode.value, WakuLegacyLightPushCodec)
@@ -369,21 +386,22 @@ proc setupProtocols(
return err("failed to set node waku lightpush peer: " & lightPushNode.error)
# Filter setup. NOTE Must be mounted after relay
- if conf.filter:
+ if conf.filterServiceConf.isSome():
+ let confFilter = conf.filterServiceConf.get()
try:
await mountFilter(
node,
- subscriptionTimeout = chronos.seconds(conf.filterSubscriptionTimeout),
- maxFilterPeers = conf.filterMaxPeersToServe,
- maxFilterCriteriaPerPeer = conf.filterMaxCriteria,
+ subscriptionTimeout = chronos.seconds(confFilter.subscriptionTimeout),
+ maxFilterPeers = confFilter.maxPeersToServe,
+ maxFilterCriteriaPerPeer = confFilter.maxCriteria,
rateLimitSetting = node.rateLimitSettings.getSetting(FILTER),
)
except CatchableError:
return err("failed to mount waku filter protocol: " & getCurrentExceptionMsg())
await node.mountFilterClient()
- if conf.filternode != "":
- let filterNode = parsePeerInfo(conf.filternode)
+ if conf.remoteFilterNode.isSome():
+ let filterNode = parsePeerInfo(conf.remoteFilterNode.get())
if filterNode.isOk():
try:
node.peerManager.addServicePeer(filterNode.value, WakuFilterSubscribeCodec)
@@ -394,14 +412,6 @@ proc setupProtocols(
else:
return err("failed to set node waku filter peer: " & filterNode.error)
- if conf.storeSync:
- (
- await node.mountStoreSync(
- conf.storeSyncRange, conf.storeSyncInterval, conf.storeSyncRelayJitter
- )
- ).isOkOr:
- return err("failed to mount waku store sync protocol: " & $error)
-
# waku peer exchange setup
if conf.peerExchange:
try:
@@ -412,8 +422,8 @@ proc setupProtocols(
return
err("failed to mount waku peer-exchange protocol: " & getCurrentExceptionMsg())
- if conf.peerExchangeNode != "":
- let peerExchangeNode = parsePeerInfo(conf.peerExchangeNode)
+ if conf.remotePeerExchangeNode.isSome():
+ let peerExchangeNode = parsePeerInfo(conf.remotePeerExchangeNode.get())
if peerExchangeNode.isOk():
node.peerManager.addServicePeer(peerExchangeNode.value, WakuPeerExchangeCodec)
else:
@@ -425,7 +435,7 @@ proc setupProtocols(
## Start node
proc startNode*(
- node: WakuNode, conf: WakuNodeConf, dynamicBootstrapNodes: seq[RemotePeerInfo] = @[]
+ node: WakuNode, conf: WakuConf, dynamicBootstrapNodes: seq[RemotePeerInfo] = @[]
): Future[Result[void, string]] {.async: (raises: []).} =
## Start a configured node and all mounted protocols.
## Connect to static nodes and start
@@ -438,9 +448,9 @@ proc startNode*(
return err("failed to start waku node: " & getCurrentExceptionMsg())
# Connect to configured static nodes
- if conf.staticnodes.len > 0:
+ if conf.staticNodes.len > 0:
try:
- await connectToNodes(node, conf.staticnodes, "static")
+ await connectToNodes(node, conf.staticNodes, "static")
except CatchableError:
return err("failed to connect to static nodes: " & getCurrentExceptionMsg())
@@ -453,16 +463,18 @@ proc startNode*(
err("failed to connect to dynamic bootstrap nodes: " & getCurrentExceptionMsg())
# retrieve px peers and add the to the peer store
- if conf.peerExchangeNode != "":
+ if conf.remotePeerExchangeNode.isSome():
var desiredOutDegree = DefaultPXNumPeersReq
if not node.wakuRelay.isNil() and node.wakuRelay.parameters.d.uint64() > 0:
desiredOutDegree = node.wakuRelay.parameters.d.uint64()
(await node.fetchPeerExchangePeers(desiredOutDegree)).isOkOr:
error "error while fetching peers from peer exchange", error = error
+ # TODO: behavior described by comment is undesired. PX as client should be used in tandem with discv5.
+ #
# Use px to periodically get peers if discv5 is disabled, as discv5 nodes have their own
# periodic loop to find peers and px returned peers actually come from discv5
- if conf.peerExchange and not conf.discv5Discovery:
+ if conf.peerExchange and not conf.discv5Conf.isSome():
node.startPeerExchangeLoop()
# Start keepalive, if enabled
@@ -476,27 +488,21 @@ proc startNode*(
return ok()
proc setupNode*(
- conf: WakuNodeConf, rng: ref HmacDrbgContext = crypto.newRng(), relay: Relay
+ wakuConf: WakuConf, rng: ref HmacDrbgContext = crypto.newRng(), relay: Relay
): Result[WakuNode, string] =
- # Use provided key only if corresponding rng is also provided
- let key =
- if conf.nodeKey.isSome():
- conf.nodeKey.get()
- else:
- warn "missing key, generating new"
- crypto.PrivateKey.random(Secp256k1, rng[]).valueOr:
- error "Failed to generate key", error = error
- return err("Failed to generate key: " & $error)
-
- let netConfig = networkConfiguration(conf, clientId).valueOr:
+ let netConfig = networkConfiguration(
+ wakuConf.clusterId, wakuConf.networkConf, wakuConf.discv5Conf,
+ wakuConf.webSocketConf, wakuConf.wakuFlags, wakuConf.dnsAddrsNameServers,
+ wakuConf.portsShift, clientId,
+ ).valueOr:
error "failed to create internal config", error = error
return err("failed to create internal config: " & error)
- let record = enrConfiguration(conf, netConfig, key).valueOr:
+ let record = enrConfiguration(wakuConf, netConfig).valueOr:
error "failed to create record", error = error
return err("failed to create record: " & error)
- if isClusterMismatched(record, conf.clusterId):
+ if isClusterMismatched(record, wakuConf.clusterId):
error "cluster id mismatch configured shards"
return err("cluster id mismatch configured shards")
@@ -504,21 +510,21 @@ proc setupNode*(
## Peer persistence
var peerStore: Option[WakuPeerStorage]
- if conf.peerPersistence:
+ if wakuConf.peerPersistence:
peerStore = setupPeerStorage().valueOr:
error "Setting up storage failed", error = "failed to setup peer store " & error
return err("Setting up storage failed: " & error)
debug "Initializing node"
- let node = initNode(conf, netConfig, rng, key, record, peerStore, relay).valueOr:
+ let node = initNode(wakuConf, netConfig, rng, record, peerStore, relay).valueOr:
error "Initializing node failed", error = error
return err("Initializing node failed: " & error)
debug "Mounting protocols"
try:
- (waitFor node.setupProtocols(conf, key)).isOkOr:
+ (waitFor node.setupProtocols(wakuConf)).isOkOr:
error "Mounting protocols failed", error = error
return err("Mounting protocols failed: " & error)
except CatchableError:
diff --git a/waku/factory/validator_signed.nim b/waku/factory/validator_signed.nim
index 59ee384b1..0da380ab5 100644
--- a/waku/factory/validator_signed.nim
+++ b/waku/factory/validator_signed.nim
@@ -13,7 +13,7 @@ import
const MessageWindowInSec = 5 * 60 # +- 5 minutes
-import ./external_config, ../waku_relay/protocol, ../waku_core
+import ./waku_conf, ../waku_relay/protocol, ../waku_core
declarePublicCounter waku_msg_validator_signed_outcome,
"number of messages for each validation outcome", ["result"]
diff --git a/waku/factory/waku.nim b/waku/factory/waku.nim
index 854df8dde..01dc7a36f 100644
--- a/waku/factory/waku.nim
+++ b/waku/factory/waku.nim
@@ -38,12 +38,12 @@ import
../waku_rln_relay,
../waku_store,
../waku_filter_v2,
- ../factory/networks_config,
../factory/node_factory,
../factory/internal_config,
../factory/external_config,
../factory/app_callbacks,
- ../waku_enr/multiaddr
+ ../waku_enr/multiaddr,
+ ./waku_conf
logScope:
topics = "wakunode waku"
@@ -53,13 +53,15 @@ const git_version* {.strdefine.} = "n/a"
type Waku* = ref object
version: string
- conf: WakuNodeConf
- rng: ref HmacDrbgContext
+ conf*: WakuConf
+ rng*: ref HmacDrbgContext
+
key: crypto.PrivateKey
wakuDiscv5*: WakuDiscoveryV5
- dynamicBootstrapNodes: seq[RemotePeerInfo]
+ dynamicBootstrapNodes*: seq[RemotePeerInfo]
dnsRetryLoopHandle: Future[void]
+ networkConnLoopHandle: Future[void]
discoveryMngr: DiscoveryManager
node*: WakuNode
@@ -70,37 +72,11 @@ type Waku* = ref object
metricsServer*: MetricsHttpServerRef
appCallbacks*: AppCallbacks
-proc logConfig(conf: WakuNodeConf) =
- info "Configuration: Enabled protocols",
- relay = conf.relay,
- rlnRelay = conf.rlnRelay,
- store = conf.store,
- filter = conf.filter,
- lightpush = conf.lightpush,
- peerExchange = conf.peerExchange
-
- info "Configuration. Network", cluster = conf.clusterId
-
- for shard in conf.shards:
- info "Configuration. Shards", shard = shard
-
- for i in conf.discv5BootstrapNodes:
- info "Configuration. Bootstrap nodes", node = i
-
- if conf.rlnRelay and conf.rlnRelayDynamic:
- info "Configuration. Validation",
- mechanism = "onchain rln",
- contract = conf.rlnRelayEthContractAddress,
- maxMessageSize = conf.maxMessageSize,
- rlnEpochSizeSec = conf.rlnEpochSizeSec,
- rlnRelayUserMessageLimit = conf.rlnRelayUserMessageLimit,
- rlnRelayEthClientAddress = string(conf.rlnRelayEthClientAddress)
-
func version*(waku: Waku): string =
waku.version
proc setupSwitchServices(
- waku: Waku, conf: WakuNodeConf, circuitRelay: Relay, rng: ref HmacDrbgContext
+ waku: Waku, conf: WakuConf, circuitRelay: Relay, rng: ref HmacDrbgContext
) =
proc onReservation(addresses: seq[MultiAddress]) {.gcsafe, raises: [].} =
debug "circuit relay handler new reserve event",
@@ -116,7 +92,7 @@ proc setupSwitchServices(
error "failed to update announced multiaddress", error = $error
let autonatService = getAutonatService(rng)
- if conf.isRelayClient:
+ if conf.circuitRelayClient:
## The node is considered to be behind a NAT or firewall and then it
## should struggle to be reachable and establish connections to other nodes
const MaxNumRelayServers = 2
@@ -131,12 +107,13 @@ proc setupSwitchServices(
## Initialisation
proc newCircuitRelay(isRelayClient: bool): Relay =
+ # TODO: Does it mean it's a circuit-relay server when it's false?
if isRelayClient:
return RelayClient.new()
return Relay.new()
proc setupAppCallbacks(
- node: WakuNode, conf: WakuNodeConf, appCallbacks: AppCallbacks
+ node: WakuNode, conf: WakuConf, appCallbacks: AppCallbacks
): Result[void, string] =
if appCallbacks.isNil():
info "No external callbacks to be set"
@@ -171,76 +148,35 @@ proc setupAppCallbacks(
return ok()
proc new*(
- T: type Waku, confCopy: var WakuNodeConf, appCallbacks: AppCallbacks = nil
+ T: type Waku, wakuConf: WakuConf, appCallbacks: AppCallbacks = nil
): Result[Waku, string] =
let rng = crypto.newRng()
- logging.setupLog(confCopy.logLevel, confCopy.logFormat)
+ logging.setupLog(wakuConf.logLevel, wakuConf.logFormat)
- # TODO: remove after pubsubtopic config gets removed
- var shards = newSeq[uint16]()
- if confCopy.pubsubTopics.len > 0:
- let shardsRes = topicsToRelayShards(confCopy.pubsubTopics)
- if shardsRes.isErr():
- error "failed to parse pubsub topic, please format according to static shard specification",
- error = shardsRes.error
- return err("failed to parse pubsub topic: " & $shardsRes.error)
+ ?wakuConf.validate()
- let shardsOpt = shardsRes.get()
-
- if shardsOpt.isSome():
- let relayShards = shardsOpt.get()
- if relayShards.clusterId != confCopy.clusterId:
- error "clusterId of the pubsub topic should match the node's cluster. e.g. --pubsub-topic=/waku/2/rs/22/1 and --cluster-id=22",
- nodeCluster = confCopy.clusterId, pubsubCluster = relayShards.clusterId
- return err(
- "clusterId of the pubsub topic should match the node's cluster. e.g. --pubsub-topic=/waku/2/rs/22/1 and --cluster-id=22"
- )
-
- for shard in relayShards.shardIds:
- shards.add(shard)
- confCopy.shards = shards
-
- # Why can't I replace this block with a concise `.valueOr`?
- confCopy = block:
- let res = applyPresetConfiguration(confCopy)
- if res.isErr():
- error "Failed to complete the config", error = res.error
- return err("Failed to complete the config:" & $res.error)
- res.get()
-
- logConfig(confCopy)
+ wakuConf.logConf()
info "Running nwaku node", version = git_version
- let validateShardsRes = validateShards(confCopy)
- if validateShardsRes.isErr():
- error "Failed validating shards", error = $validateShardsRes.error
- return err("Failed validating shards: " & $validateShardsRes.error)
+ var relay = newCircuitRelay(wakuConf.circuitRelayClient)
- let keyRes = getNodeKey(confCopy, rng)
- if keyRes.isErr():
- error "Failed to generate key", error = $keyRes.error
- return err("Failed to generate key: " & $keyRes.error)
- confCopy.nodeKey = some(keyRes.get())
-
- var relay = newCircuitRelay(confCopy.isRelayClient)
-
- let nodeRes = setupNode(confCopy, rng, relay)
+ let nodeRes = setupNode(wakuConf, rng, relay)
if nodeRes.isErr():
error "Failed setting up node", error = nodeRes.error
return err("Failed setting up node: " & nodeRes.error)
let node = nodeRes.get()
- node.setupAppCallbacks(confCopy, appCallbacks).isOkOr:
+ node.setupAppCallbacks(wakuConf, appCallbacks).isOkOr:
error "Failed setting up app callbacks", error = error
return err("Failed setting up app callbacks: " & $error)
## Delivery Monitor
var deliveryMonitor: DeliveryMonitor
- if confCopy.reliabilityEnabled:
- if confCopy.storenode == "":
+ if wakuConf.p2pReliability:
+ if wakuConf.remoteStoreNode.isNone():
return err("A storenode should be set when reliability mode is on")
let deliveryMonitorRes = DeliveryMonitor.new(
@@ -253,16 +189,15 @@ proc new*(
var waku = Waku(
version: git_version,
- # TODO: WakuNodeConf is re-used for too many context, `conf` here should be a dedicated subtype
- conf: confCopy,
+ conf: wakuConf,
rng: rng,
- key: confCopy.nodekey.get(),
+ key: wakuConf.nodeKey,
node: node,
deliveryMonitor: deliveryMonitor,
appCallbacks: appCallbacks,
)
- waku.setupSwitchServices(confCopy, relay, rng)
+ waku.setupSwitchServices(wakuConf, relay, rng)
ok(waku)
@@ -287,16 +222,19 @@ proc getPorts(
proc getRunningNetConfig(waku: ptr Waku): Result[NetConfig, string] =
var conf = waku[].conf
let (tcpPort, websocketPort) = getPorts(waku[].node.switch.peerInfo.listenAddrs).valueOr:
- return err("Could not retrieve ports " & error)
+ return err("Could not retrieve ports: " & error)
if tcpPort.isSome():
- conf.tcpPort = tcpPort.get()
+ conf.networkConf.p2pTcpPort = tcpPort.get()
- if websocketPort.isSome():
- conf.websocketPort = websocketPort.get()
+ if websocketPort.isSome() and conf.webSocketConf.isSome():
+ conf.webSocketConf.get().port = websocketPort.get()
# Rebuild NetConfig with bound port values
- let netConf = networkConfiguration(conf, clientId).valueOr:
+ let netConf = networkConfiguration(
+ conf.clusterId, conf.networkConf, conf.discv5Conf, conf.webSocketConf,
+ conf.wakuFlags, conf.dnsAddrsNameServers, conf.portsShift, clientId,
+ ).valueOr:
return err("Could not update NetConfig: " & error)
return ok(netConf)
@@ -304,12 +242,11 @@ proc getRunningNetConfig(waku: ptr Waku): Result[NetConfig, string] =
proc updateEnr(waku: ptr Waku): Result[void, string] =
let netConf: NetConfig = getRunningNetConfig(waku).valueOr:
return err("error calling updateNetConfig: " & $error)
-
- let record = enrConfiguration(waku[].conf, netConf, waku[].key).valueOr:
+ let record = enrConfiguration(waku[].conf, netConf).valueOr:
return err("ENR setup failed: " & error)
if isClusterMismatched(record, waku[].conf.clusterId):
- return err("cluster id mismatch configured shards")
+ return err("cluster-id mismatch configured shards")
waku[].node.enr = record
@@ -344,7 +281,9 @@ proc updateAddressInENR(waku: ptr Waku): Result[void, string] =
return ok()
proc updateWaku(waku: ptr Waku): Result[void, string] =
- if waku[].conf.tcpPort == Port(0) or waku[].conf.websocketPort == Port(0):
+ let conf = waku[].conf
+ if conf.networkConf.p2pTcpPort == Port(0) or
+ (conf.websocketConf.isSome() and conf.websocketConf.get.port == Port(0)):
updateEnr(waku).isOkOr:
return err("error calling updateEnr: " & $error)
@@ -357,15 +296,17 @@ proc updateWaku(waku: ptr Waku): Result[void, string] =
proc startDnsDiscoveryRetryLoop(waku: ptr Waku): Future[void] {.async.} =
while true:
await sleepAsync(30.seconds)
- let dynamicBootstrapNodesRes = await waku_dnsdisc.retrieveDynamicBootstrapNodes(
- waku.conf.dnsDiscoveryUrl, waku.conf.dnsDiscoveryNameServers
- )
- if dynamicBootstrapNodesRes.isErr():
- error "Retrieving dynamic bootstrap nodes failed",
- error = dynamicBootstrapNodesRes.error
- continue
+ if waku.conf.dnsDiscoveryConf.isSome():
+ let dnsDiscoveryConf = waku.conf.dnsDiscoveryConf.get()
+ let dynamicBootstrapNodesRes = await waku_dnsdisc.retrieveDynamicBootstrapNodes(
+ dnsDiscoveryConf.enrTreeUrl, dnsDiscoveryConf.nameServers
+ )
+ if dynamicBootstrapNodesRes.isErr():
+ error "Retrieving dynamic bootstrap nodes failed",
+ error = dynamicBootstrapNodesRes.error
+ continue
- waku[].dynamicBootstrapNodes = dynamicBootstrapNodesRes.get()
+ waku[].dynamicBootstrapNodes = dynamicBootstrapNodesRes.get()
if not waku[].wakuDiscv5.isNil():
let dynamicBootstrapEnrs = waku[].dynamicBootstrapNodes
@@ -389,22 +330,34 @@ proc startDnsDiscoveryRetryLoop(waku: ptr Waku): Future[void] {.async.} =
error "failed to connect to dynamic bootstrap nodes: " & getCurrentExceptionMsg()
return
+# The network connectivity loop checks periodically whether the node is online or not
+# and triggers any change that depends on the network connectivity state
+proc startNetworkConnectivityLoop(waku: Waku): Future[void] {.async.} =
+ while true:
+ await sleepAsync(15.seconds)
+
+ # Update online state
+ await waku.node.peerManager.updateOnlineState()
+
proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} =
debug "Retrieve dynamic bootstrap nodes"
+ let conf = waku[].conf
- let dynamicBootstrapNodesRes = await waku_dnsdisc.retrieveDynamicBootstrapNodes(
- waku.conf.dnsDiscoveryUrl, waku.conf.dnsDiscoveryNameServers
- )
+ if conf.dnsDiscoveryConf.isSome():
+ let dnsDiscoveryConf = waku.conf.dnsDiscoveryConf.get()
+ let dynamicBootstrapNodesRes = await waku_dnsdisc.retrieveDynamicBootstrapNodes(
+ dnsDiscoveryConf.enrTreeUrl, dnsDiscoveryConf.nameServers
+ )
- if dynamicBootstrapNodesRes.isErr():
- error "Retrieving dynamic bootstrap nodes failed",
- error = dynamicBootstrapNodesRes.error
- # Start Dns Discovery retry loop
- waku[].dnsRetryLoopHandle = waku.startDnsDiscoveryRetryLoop()
- else:
- waku[].dynamicBootstrapNodes = dynamicBootstrapNodesRes.get()
+ if dynamicBootstrapNodesRes.isErr():
+ error "Retrieving dynamic bootstrap nodes failed",
+ error = dynamicBootstrapNodesRes.error
+ # Start Dns Discovery retry loop
+ waku[].dnsRetryLoopHandle = waku.startDnsDiscoveryRetryLoop()
+ else:
+ waku[].dynamicBootstrapNodes = dynamicBootstrapNodesRes.get()
- if not waku[].conf.discv5Only:
+ if conf.discv5Conf.isNone or not conf.discv5Conf.get().discv5Only:
(await startNode(waku.node, waku.conf, waku.dynamicBootstrapNodes)).isOkOr:
return err("error while calling startNode: " & $error)
@@ -413,10 +366,17 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} =
return err("Error in updateApp: " & $error)
## Discv5
- if waku[].conf.discv5Discovery or waku[].conf.discv5Only:
+ if conf.discv5Conf.isSome:
waku[].wakuDiscV5 = waku_discv5.setupDiscoveryV5(
- waku.node.enr, waku.node.peerManager, waku.node.topicSubscriptionQueue, waku.conf,
- waku.dynamicBootstrapNodes, waku.rng, waku.key,
+ waku.node.enr,
+ waku.node.peerManager,
+ waku.node.topicSubscriptionQueue,
+ conf.discv5Conf.get(),
+ waku.dynamicBootstrapNodes,
+ waku.rng,
+ conf.nodeKey,
+ conf.networkConf.p2pListenAddress,
+ conf.portsShift,
)
(await waku.wakuDiscV5.start()).isOkOr:
@@ -426,6 +386,9 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} =
if not waku[].deliveryMonitor.isNil():
waku[].deliveryMonitor.startDeliveryMonitor()
+ # Start network connectivity check loop
+ waku[].networkConnLoopHandle = waku[].startNetworkConnectivityLoop()
+
return ok()
# Waku shutdown
@@ -437,6 +400,9 @@ proc stop*(waku: Waku): Future[void] {.async: (raises: [Exception]).} =
if not waku.metricsServer.isNil():
await waku.metricsServer.stop()
+ if not waku.networkConnLoopHandle.isNil():
+ await waku.networkConnLoopHandle.cancelAndWait()
+
if not waku.wakuDiscv5.isNil():
await waku.wakuDiscv5.stop()
diff --git a/waku/factory/waku_conf.nim b/waku/factory/waku_conf.nim
new file mode 100644
index 000000000..94b89a26e
--- /dev/null
+++ b/waku/factory/waku_conf.nim
@@ -0,0 +1,243 @@
+import
+ std/[net, options, strutils],
+ chronicles,
+ libp2p/crypto/crypto,
+ libp2p/multiaddress,
+ secp256k1,
+ results
+
+import
+ ../waku_rln_relay/rln_relay,
+ ../waku_api/rest/builder,
+ ../discovery/waku_discv5,
+ ../node/waku_metrics,
+ ../common/logging,
+ ../waku_enr/capabilities,
+ ./network_conf
+
+export RlnRelayConf, RlnRelayCreds, RestServerConf, Discv5Conf, MetricsServerConf
+
+logScope:
+ topics = "waku conf"
+
+# TODO: should be defined in validator_signed.nim and imported here
+type ProtectedShard* {.requiresInit.} = object
+ shard*: uint16
+ key*: secp256k1.SkPublicKey
+
+type DnsDiscoveryConf* {.requiresInit.} = object
+ enrTreeUrl*: string
+ # TODO: should probably only have one set of name servers (see dnsaddrs)
+ nameServers*: seq[IpAddress]
+
+type StoreSyncConf* {.requiresInit.} = object
+ rangeSec*: uint32
+ intervalSec*: uint32
+ relayJitterSec*: uint32
+
+type StoreServiceConf* {.requiresInit.} = object
+ dbMigration*: bool
+ dbURl*: string
+ dbVacuum*: bool
+ supportV2*: bool
+ maxNumDbConnections*: int
+ retentionPolicy*: string
+ resume*: bool
+ storeSyncConf*: Option[StoreSyncConf]
+
+type FilterServiceConf* {.requiresInit.} = object
+ maxPeersToServe*: uint32
+ subscriptionTimeout*: uint16
+ maxCriteria*: uint32
+
+type NetworkConfig* = object # TODO: make enum
+ natStrategy*: string
+ p2pTcpPort*: Port
+ dns4DomainName*: Option[string]
+ p2pListenAddress*: IpAddress
+ extMultiAddrs*: seq[MultiAddress]
+ extMultiAddrsOnly*: bool
+
+## `WakuConf` is a valid configuration for a Waku node
+## All information needed by a waku node should be contained
+## In this object. A convenient `validate` method enables doing
+## sanity checks beyond type enforcement.
+## If `Option` is `some` it means the related protocol is enabled.
+type WakuConf* {.requiresInit.} = ref object
+ # ref because `getRunningNetConfig` modifies it
+ nodeKey*: crypto.PrivateKey
+
+ clusterId*: uint16
+ shards*: seq[uint16]
+ protectedShards*: seq[ProtectedShard]
+
+ # TODO: move to an autoShardingConf
+ numShardsInNetwork*: uint32
+ contentTopics*: seq[string]
+
+ relay*: bool
+ lightPush*: bool
+ peerExchange*: bool
+
+ # TODO: remove relay peer exchange
+ relayPeerExchange*: bool
+ rendezvous*: bool
+ circuitRelayClient*: bool
+ keepAlive*: bool
+
+ discv5Conf*: Option[Discv5Conf]
+ dnsDiscoveryConf*: Option[DnsDiscoveryConf]
+ filterServiceConf*: Option[FilterServiceConf]
+ storeServiceConf*: Option[StoreServiceConf]
+ rlnRelayConf*: Option[RlnRelayConf]
+ restServerConf*: Option[RestServerConf]
+ metricsServerConf*: Option[MetricsServerConf]
+ webSocketConf*: Option[WebSocketConf]
+
+ portsShift*: uint16
+ dnsAddrs*: bool
+ dnsAddrsNameServers*: seq[IpAddress]
+ networkConf*: NetworkConfig
+ wakuFlags*: CapabilitiesBitfield
+
+ # TODO: could probably make it a `PeerRemoteInfo`
+ staticNodes*: seq[string]
+ remoteStoreNode*: Option[string]
+ remoteLightPushNode*: Option[string]
+ remoteFilterNode*: Option[string]
+ remotePeerExchangeNode*: Option[string]
+
+ maxMessageSizeBytes*: uint64
+
+ logLevel*: logging.LogLevel
+ logFormat*: logging.LogFormat
+
+ peerPersistence*: bool
+ # TODO: should clearly be a uint
+ peerStoreCapacity*: Option[int]
+ # TODO: should clearly be a uint
+ maxConnections*: int
+
+ agentString*: string
+
+ colocationLimit*: int
+
+ # TODO: use proper type
+ rateLimits*: seq[string]
+
+ # TODO: those could be in a relay conf object
+ maxRelayPeers*: Option[int]
+ relayShardedPeerManagement*: bool
+ # TODO: use proper type
+ relayServiceRatio*: string
+
+ p2pReliability*: bool
+
+proc logConf*(conf: WakuConf) =
+ info "Configuration: Enabled protocols",
+ relay = conf.relay,
+ rlnRelay = conf.rlnRelayConf.isSome(),
+ store = conf.storeServiceConf.isSome(),
+ filter = conf.filterServiceConf.isSome(),
+ lightPush = conf.lightPush,
+ peerExchange = conf.peerExchange
+
+ info "Configuration. Network", cluster = conf.clusterId
+
+ for shard in conf.shards:
+ info "Configuration. Shards", shard = shard
+
+ if conf.discv5Conf.isSome():
+ for i in conf.discv5Conf.get().bootstrapNodes:
+ info "Configuration. Bootstrap nodes", node = i.string
+
+ if conf.rlnRelayConf.isSome():
+ var rlnRelayConf = conf.rlnRelayConf.get()
+ if rlnRelayConf.dynamic:
+ info "Configuration. Validation",
+ mechanism = "onchain rln",
+ contract = rlnRelayConf.ethContractAddress.string,
+ maxMessageSize = conf.maxMessageSizeBytes,
+ rlnEpochSizeSec = rlnRelayConf.epochSizeSec,
+ rlnRelayUserMessageLimit = rlnRelayConf.userMessageLimit,
+ ethClientUrls = rlnRelayConf.ethClientUrls
+
+proc validateNodeKey(wakuConf: WakuConf): Result[void, string] =
+ wakuConf.nodeKey.getPublicKey().isOkOr:
+ return err("nodekey param is invalid")
+ return ok()
+
+proc validateShards(wakuConf: WakuConf): Result[void, string] =
+ let numShardsInNetwork = wakuConf.numShardsInNetwork
+
+ # TODO: fix up this behaviour
+ if numShardsInNetwork == 0:
+ return ok()
+
+ for shard in wakuConf.shards:
+ if shard >= numShardsInNetwork:
+ let msg =
+ "validateShards invalid shard: " & $shard & " when numShardsInNetwork: " &
+ $numShardsInNetwork # fmt doesn't work
+ error "validateShards failed", error = msg
+ return err(msg)
+
+ return ok()
+
+proc validateNoEmptyStrings(wakuConf: WakuConf): Result[void, string] =
+ if wakuConf.networkConf.dns4DomainName.isSome() and
+ isEmptyOrWhiteSpace(wakuConf.networkConf.dns4DomainName.get().string):
+ return err("dns4-domain-name is an empty string, set it to none(string) instead")
+
+ if isEmptyOrWhiteSpace(wakuConf.relayServiceRatio):
+ return err("relay-service-ratio is an empty string")
+
+ for sn in wakuConf.staticNodes:
+ if isEmptyOrWhiteSpace(sn):
+ return err("staticnode contain an empty string")
+
+ if wakuConf.remoteStoreNode.isSome() and
+ isEmptyOrWhiteSpace(wakuConf.remoteStoreNode.get()):
+ return err("storenode is an empty string, set it to none(string) instead")
+
+ if wakuConf.remoteLightPushNode.isSome() and
+ isEmptyOrWhiteSpace(wakuConf.remoteLightPushNode.get()):
+ return err("lightpushnode is an empty string, set it to none(string) instead")
+
+ if wakuConf.remotePeerExchangeNode.isSome() and
+ isEmptyOrWhiteSpace(wakuConf.remotePeerExchangeNode.get()):
+ return err("peer-exchange-node is an empty string, set it to none(string) instead")
+
+ if wakuConf.remoteFilterNode.isSome() and
+ isEmptyOrWhiteSpace(wakuConf.remoteFilterNode.get()):
+ return err("filternode is an empty string, set it to none(string) instead")
+
+ if wakuConf.dnsDiscoveryConf.isSome() and
+ isEmptyOrWhiteSpace(wakuConf.dnsDiscoveryConf.get().enrTreeUrl):
+ return err("dns-discovery-url is an empty string")
+
+ # TODO: rln relay config should validate itself
+ if wakuConf.rlnRelayConf.isSome():
+ let rlnRelayConf = wakuConf.rlnRelayConf.get()
+
+ if isEmptyOrWhiteSpace(rlnRelayConf.treePath):
+ return err("rln-relay-tree-path is an empty string")
+ if rlnRelayConf.ethClientUrls.len == 0:
+ return err("rln-relay-eth-client-address is empty")
+ if isEmptyOrWhiteSpace(rlnRelayConf.ethContractAddress):
+ return err("rln-relay-eth-contract-address is an empty string")
+
+ if rlnRelayConf.creds.isSome():
+ let creds = rlnRelayConf.creds.get()
+ if isEmptyOrWhiteSpace(creds.path):
+ return err ("rln-relay-cred-path is an empty string")
+ if isEmptyOrWhiteSpace(creds.password):
+ return err ("rln-relay-cred-password is an empty string")
+
+ return ok()
+
+proc validate*(wakuConf: WakuConf): Result[void, string] =
+ ?wakuConf.validateNodeKey()
+ ?wakuConf.validateShards()
+ ?wakuConf.validateNoEmptyStrings()
+ return ok()
diff --git a/waku/incentivization/common.nim b/waku/incentivization/common.nim
index 4dd1d3508..533836e42 100644
--- a/waku/incentivization/common.nim
+++ b/waku/incentivization/common.nim
@@ -1,6 +1,6 @@
import std/options
-import waku/incentivization/[rpc, eligibility_manager]
+import waku/incentivization/rpc
type EligibilityStatusCode* = enum
SUCCESS = uint32(200)
diff --git a/waku/incentivization/eligibility_manager.nim b/waku/incentivization/eligibility_manager.nim
index da8280da3..b10b293e1 100644
--- a/waku/incentivization/eligibility_manager.nim
+++ b/waku/incentivization/eligibility_manager.nim
@@ -1,6 +1,6 @@
import std/[options, sets], chronos, web3, stew/byteutils, stint, results, chronicles
-import waku/incentivization/rpc, tests/waku_rln_relay/[utils_onchain, utils]
+import waku/incentivization/rpc, tests/waku_rln_relay/utils_onchain
const SimpleTransferGasUsed = Quantity(21000)
const TxReceiptQueryTimeout = 3.seconds
diff --git a/waku/incentivization/rpc_codec.nim b/waku/incentivization/rpc_codec.nim
index 5d3ce48d5..9529ddffe 100644
--- a/waku/incentivization/rpc_codec.nim
+++ b/waku/incentivization/rpc_codec.nim
@@ -1,5 +1,5 @@
import std/options
-import ../common/protobuf, ../waku_core, ./rpc
+import ../common/protobuf, ./rpc
# Codec for EligibilityProof
diff --git a/waku/node/config.nim b/waku/node/net_config.nim
similarity index 88%
rename from waku/node/config.nim
rename to waku/node/net_config.nim
index 311e26771..a45d95f92 100644
--- a/waku/node/config.nim
+++ b/waku/node/net_config.nim
@@ -15,6 +15,7 @@ type NetConfig* = object
extIp*: Option[IpAddress]
extPort*: Option[Port]
dns4DomainName*: Option[string]
+ dnsNameServers*: seq[IpAddress]
announcedAddresses*: seq[MultiAddress]
extMultiAddrs*: seq[MultiAddress]
enrMultiAddrs*: seq[MultiAddress]
@@ -60,6 +61,8 @@ proc isWsAddress*(ma: MultiAddress): bool =
proc containsWsAddress(extMultiAddrs: seq[MultiAddress]): bool =
return extMultiAddrs.filterIt(it.isWsAddress()).len > 0
+const DefaultWsBindPort = static(Port(8000))
+# TODO: migrate to builder pattern with nested configs
proc init*(
T: type NetConfig,
bindIp: IpAddress,
@@ -68,13 +71,14 @@ proc init*(
extPort = none(Port),
extMultiAddrs = newSeq[MultiAddress](),
extMultiAddrsOnly: bool = false,
- wsBindPort: Port = Port(8000),
+ wsBindPort: Option[Port] = some(DefaultWsBindPort),
wsEnabled: bool = false,
wssEnabled: bool = false,
dns4DomainName = none(string),
discv5UdpPort = none(Port),
clusterId: uint16 = 0,
wakuFlags = none(CapabilitiesBitfield),
+ dnsNameServers = newSeq[IpAddress](),
): NetConfigResult =
## Initialize and validate waku node network configuration
@@ -84,7 +88,9 @@ proc init*(
var wsHostAddress = none(MultiAddress)
if wsEnabled or wssEnabled:
try:
- wsHostAddress = some(ip4TcpEndPoint(bindIp, wsbindPort) & wsFlag(wssEnabled))
+ wsHostAddress = some(
+ ip4TcpEndPoint(bindIp, wsbindPort.get(DefaultWsBindPort)) & wsFlag(wssEnabled)
+ )
except CatchableError:
return err(getCurrentExceptionMsg())
@@ -111,8 +117,10 @@ proc init*(
if wsHostAddress.isSome():
try:
- wsExtAddress =
- some(dns4TcpEndPoint(dns4DomainName.get(), wsBindPort) & wsFlag(wssEnabled))
+ wsExtAddress = some(
+ dns4TcpEndPoint(dns4DomainName.get(), wsBindPort.get(DefaultWsBindPort)) &
+ wsFlag(wssEnabled)
+ )
except CatchableError:
return err(getCurrentExceptionMsg())
else:
@@ -122,8 +130,10 @@ proc init*(
if wsHostAddress.isSome():
try:
- wsExtAddress =
- some(ip4TcpEndPoint(extIp.get(), wsBindPort) & wsFlag(wssEnabled))
+ wsExtAddress = some(
+ ip4TcpEndPoint(extIp.get(), wsBindPort.get(DefaultWsBindPort)) &
+ wsFlag(wssEnabled)
+ )
except CatchableError:
return err(getCurrentExceptionMsg())
@@ -165,6 +175,7 @@ proc init*(
extPort: extPort,
wssEnabled: wssEnabled,
dns4DomainName: dns4DomainName,
+ dnsNameServers: dnsNameServers,
announcedAddresses: announcedAddresses,
extMultiAddrs: extMultiAddrs,
enrMultiaddrs: enrMultiaddrs,
diff --git a/waku/node/peer_manager/peer_manager.nim b/waku/node/peer_manager/peer_manager.nim
index 651a107b4..4c1a9a4f9 100644
--- a/waku/node/peer_manager/peer_manager.nim
+++ b/waku/node/peer_manager/peer_manager.nim
@@ -8,6 +8,7 @@ import
libp2p/multistream,
libp2p/muxers/muxer,
libp2p/nameresolving/nameresolver,
+ libp2p/nameresolving/dnsresolver,
libp2p/peerstore
import
@@ -74,13 +75,14 @@ const
# Max peers that we allow from the same IP
DefaultColocationLimit* = 5
+ DNSCheckDomain = "one.one.one.one"
+
type ConnectionChangeHandler* = proc(
peerId: PeerId, peerEvent: PeerEventKind
): Future[void] {.gcsafe, raises: [Defect].}
type PeerManager* = ref object of RootObj
switch*: Switch
- wakuPeerStore*: WakuPeerStore
wakuMetadata*: WakuMetadata
initialBackoffInSec*: int
backoffFactor*: int
@@ -101,11 +103,16 @@ type PeerManager* = ref object of RootObj
reputationManager*: Option[ReputationManager]
# servers of light protocols (like Lightpush) may track client requests' eligibility
eligibilityManager*: Option[EligibilityManager]
+ dnsNameServers*: seq[IpAddress]
+ online: bool
#~~~~~~~~~~~~~~~~~~~#
# Helper Functions #
#~~~~~~~~~~~~~~~~~~~#
+template isOnline*(self: PeerManager): bool =
+ self.online
+
proc calculateBackoff(
initialBackoffInSec: int, backoffFactor: int, failedAttempts: int
): timer.Duration =
@@ -143,38 +150,13 @@ proc addPeer*(
trace "skipping to manage our unmanageable self"
return
- if pm.wakuPeerStore[AddressBook][remotePeerInfo.peerId] == remotePeerInfo.addrs and
- pm.wakuPeerStore[KeyBook][remotePeerInfo.peerId] == remotePeerInfo.publicKey and
- pm.wakuPeerStore[ENRBook][remotePeerInfo.peerId].raw.len > 0:
- let incomingEnr = remotePeerInfo.enr.valueOr:
- trace "peer already managed and incoming ENR is empty",
- remote_peer_id = $remotePeerInfo.peerId
- return
-
- if pm.wakuPeerStore[ENRBook][remotePeerInfo.peerId].raw == incomingEnr.raw or
- pm.wakuPeerStore[ENRBook][remotePeerInfo.peerId].seqNum > incomingEnr.seqNum:
- trace "peer already managed and ENR info is already saved",
- remote_peer_id = $remotePeerInfo.peerId
- return
+ pm.switch.peerStore.addPeer(remotePeerInfo, origin)
trace "Adding peer to manager",
- peerId = remotePeerInfo.peerId, addresses = remotePeerInfo.addrs
+ peerId = remotePeerInfo.peerId, addresses = remotePeerInfo.addrs, origin
waku_total_unique_peers.inc()
- pm.wakuPeerStore[AddressBook][remotePeerInfo.peerId] = remotePeerInfo.addrs
- pm.wakuPeerStore[KeyBook][remotePeerInfo.peerId] = remotePeerInfo.publicKey
- pm.wakuPeerStore[SourceBook][remotePeerInfo.peerId] = origin
- pm.wakuPeerStore[ProtoVersionBook][remotePeerInfo.peerId] =
- remotePeerInfo.protoVersion
- pm.wakuPeerStore[AgentBook][remotePeerInfo.peerId] = remotePeerInfo.agent
-
- if remotePeerInfo.protocols.len > 0:
- pm.wakuPeerStore[ProtoBook][remotePeerInfo.peerId] = remotePeerInfo.protocols
-
- if remotePeerInfo.enr.isSome():
- pm.wakuPeerStore[ENRBook][remotePeerInfo.peerId] = remotePeerInfo.enr.get()
-
# Add peer to storage. Entry will subsequently be updated with connectedness information
if not pm.storage.isNil:
# Reading from the db (pm.storage) is only done on startup, hence you need to connect to all saved peers.
@@ -185,6 +167,9 @@ proc addPeer*(
pm.storage.insertOrReplace(remotePeerInfo)
+proc getPeer*(pm: PeerManager, peerId: PeerId): RemotePeerInfo =
+ return pm.switch.peerStore.getPeer(peerId)
+
proc loadFromStorage(pm: PeerManager) {.gcsafe.} =
## Load peers from storage, if available
@@ -207,19 +192,20 @@ proc loadFromStorage(pm: PeerManager) {.gcsafe.} =
version = remotePeerInfo.protoVersion
# nim-libp2p books
- pm.wakuPeerStore[AddressBook][peerId] = remotePeerInfo.addrs
- pm.wakuPeerStore[ProtoBook][peerId] = remotePeerInfo.protocols
- pm.wakuPeerStore[KeyBook][peerId] = remotePeerInfo.publicKey
- pm.wakuPeerStore[AgentBook][peerId] = remotePeerInfo.agent
- pm.wakuPeerStore[ProtoVersionBook][peerId] = remotePeerInfo.protoVersion
+ pm.switch.peerStore[AddressBook][peerId] = remotePeerInfo.addrs
+ pm.switch.peerStore[ProtoBook][peerId] = remotePeerInfo.protocols
+ pm.switch.peerStore[KeyBook][peerId] = remotePeerInfo.publicKey
+ pm.switch.peerStore[AgentBook][peerId] = remotePeerInfo.agent
+ pm.switch.peerStore[ProtoVersionBook][peerId] = remotePeerInfo.protoVersion
# custom books
- pm.wakuPeerStore[ConnectionBook][peerId] = NotConnected # Reset connectedness state
- pm.wakuPeerStore[DisconnectBook][peerId] = remotePeerInfo.disconnectTime
- pm.wakuPeerStore[SourceBook][peerId] = remotePeerInfo.origin
+ pm.switch.peerStore[ConnectionBook][peerId] = NotConnected
+ # Reset connectedness state
+ pm.switch.peerStore[DisconnectBook][peerId] = remotePeerInfo.disconnectTime
+ pm.switch.peerStore[SourceBook][peerId] = remotePeerInfo.origin
if remotePeerInfo.enr.isSome():
- pm.wakuPeerStore[ENRBook][peerId] = remotePeerInfo.enr.get()
+ pm.switch.peerStore[ENRBook][peerId] = remotePeerInfo.enr.get()
amount.inc()
@@ -233,10 +219,11 @@ proc loadFromStorage(pm: PeerManager) {.gcsafe.} =
proc selectPeer*(
pm: PeerManager, proto: string, shard: Option[PubsubTopic] = none(PubsubTopic)
): Option[RemotePeerInfo] =
- trace "Selecting peer from peerstore", protocol = proto
-
# Selects the best peer for a given protocol
- var peers = pm.wakuPeerStore.getPeersByProtocol(proto)
+
+ var peers = pm.switch.peerStore.getPeersByProtocol(proto)
+ trace "Selecting peer from peerstore",
+ protocol = proto, peers, address = cast[uint](pm.switch.peerStore)
if shard.isSome():
peers.keepItIf((it.enr.isSome() and it.enr.get().containsShard(shard.get())))
@@ -321,14 +308,16 @@ proc connectPeer*(
): Future[bool] {.async.} =
let peerId = peer.peerId
+ var peerStore = pm.switch.peerStore
+
# Do not attempt to dial self
if peerId == pm.switch.peerInfo.peerId:
return false
- if not pm.wakuPeerStore.peerExists(peerId):
+ if not peerStore.peerExists(peerId):
pm.addPeer(peer)
- let failedAttempts = pm.wakuPeerStore[NumberFailedConnBook][peerId]
+ let failedAttempts = peerStore[NumberFailedConnBook][peerId]
trace "Connecting to peer",
wireAddr = peer.addrs, peerId = peerId, failedAttempts = failedAttempts
@@ -352,20 +341,19 @@ proc connectPeer*(
waku_peers_dials.inc(labelValues = ["successful"])
waku_node_conns_initiated.inc(labelValues = [source])
- pm.wakuPeerStore[NumberFailedConnBook][peerId] = 0
+ peerStore[NumberFailedConnBook][peerId] = 0
return true
# Dial failed
- pm.wakuPeerStore[NumberFailedConnBook][peerId] =
- pm.wakuPeerStore[NumberFailedConnBook][peerId] + 1
- pm.wakuPeerStore[LastFailedConnBook][peerId] = Moment.init(getTime().toUnix, Second)
- pm.wakuPeerStore[ConnectionBook][peerId] = CannotConnect
+ peerStore[NumberFailedConnBook][peerId] = peerStore[NumberFailedConnBook][peerId] + 1
+ peerStore[LastFailedConnBook][peerId] = Moment.init(getTime().toUnix, Second)
+ peerStore[ConnectionBook][peerId] = CannotConnect
trace "Connecting peer failed",
peerId = peerId,
reason = reasonFailed,
- failedAttempts = pm.wakuPeerStore[NumberFailedConnBook][peerId]
+ failedAttempts = peerStore[NumberFailedConnBook][peerId]
waku_peers_dials.inc(labelValues = [reasonFailed])
return false
@@ -472,7 +460,7 @@ proc dialPeer*(
# First add dialed peer info to peer store, if it does not exist yet..
# TODO: nim libp2p peerstore already adds them
- if not pm.wakuPeerStore.hasPeer(remotePeerInfo.peerId, proto):
+ if not pm.switch.peerStore.hasPeer(remotePeerInfo.peerId, proto):
trace "Adding newly dialed peer to manager",
peerId = $remotePeerInfo.peerId, address = $remotePeerInfo.addrs[0], proto = proto
pm.addPeer(remotePeerInfo)
@@ -498,7 +486,8 @@ proc canBeConnected*(pm: PeerManager, peerId: PeerId): bool =
# Returns if we can try to connect to this peer, based on past failed attempts
# It uses an exponential backoff. Each connection attempt makes us
# wait more before trying again.
- let failedAttempts = pm.wakuPeerStore[NumberFailedConnBook][peerId]
+ let peerStore = pm.switch.peerStore
+ let failedAttempts = peerStore[NumberFailedConnBook][peerId]
# if it never errored, we can try to connect
if failedAttempts == 0:
@@ -511,7 +500,7 @@ proc canBeConnected*(pm: PeerManager, peerId: PeerId): bool =
# If it errored we wait an exponential backoff from last connection
# the more failed attempts, the greater the backoff since last attempt
let now = Moment.init(getTime().toUnix, Second)
- let lastFailed = pm.wakuPeerStore[LastFailedConnBook][peerId]
+ let lastFailed = peerStore[LastFailedConnBook][peerId]
let backoff =
calculateBackoff(pm.initialBackoffInSec, pm.backoffFactor, failedAttempts)
@@ -573,7 +562,38 @@ proc getStreamByPeerIdAndProtocol*(
return ok(streamRes.get())
+proc checkInternetConnectivity(
+ nameServerIps: seq[IpAddress], timeout = 2.seconds
+): Future[bool] {.async.} =
+ var nameServers: seq[TransportAddress]
+ for ip in nameServerIps:
+ nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53
+
+ let dnsResolver = DnsResolver.new(nameServers)
+
+ # Resolve domain IP
+ let resolved = await dnsResolver.resolveIp(DNSCheckDomain, 0.Port, Domain.AF_UNSPEC)
+
+ if resolved.len > 0:
+ return true
+ else:
+ return false
+
+proc updateOnlineState*(pm: PeerManager) {.async.} =
+ let numConnectedPeers =
+ pm.switch.peerStore.peers().countIt(it.connectedness == Connected)
+
+ if numConnectedPeers > 0:
+ pm.online = true
+ else:
+ pm.online = await checkInternetConnectivity(pm.dnsNameServers)
+
proc connectToRelayPeers*(pm: PeerManager) {.async.} =
+ # only attempt if current node is online
+ if not pm.isOnline():
+ error "connectToRelayPeers: won't attempt new connections - node is offline"
+ return
+
var (inRelayPeers, outRelayPeers) = pm.connectedPeers(WakuRelayCodec)
let totalRelayPeers = inRelayPeers.len + outRelayPeers.len
@@ -583,7 +603,7 @@ proc connectToRelayPeers*(pm: PeerManager) {.async.} =
if outRelayPeers.len >= pm.outRelayPeersTarget:
return
- let notConnectedPeers = pm.wakuPeerStore.getDisconnectedPeers()
+ let notConnectedPeers = pm.switch.peerStore.getDisconnectedPeers()
var outsideBackoffPeers = notConnectedPeers.filterIt(pm.canBeConnected(it.peerId))
@@ -612,7 +632,7 @@ proc reconnectPeers*(
debug "Reconnecting peers", proto = proto
# Proto is not persisted, we need to iterate over all peers.
- for peerInfo in pm.wakuPeerStore.peers(protocolMatcher(proto)):
+ for peerInfo in pm.switch.peerStore.peers(protocolMatcher(proto)):
# Check that the peer can be connected
if peerInfo.connectedness == CannotConnect:
error "Not reconnecting to unreachable or non-existing peer",
@@ -685,7 +705,7 @@ proc onPeerMetadata(pm: PeerManager, peerId: PeerId) {.async.} =
break guardClauses
if (
- pm.wakuPeerStore.hasPeer(peerId, WakuRelayCodec) and
+ pm.switch.peerStore.hasPeer(peerId, WakuRelayCodec) and
not metadata.shards.anyIt(pm.wakuMetadata.shards.contains(it))
):
let myShardsString = "[ " & toSeq(pm.wakuMetadata.shards).join(", ") & " ]"
@@ -699,13 +719,14 @@ proc onPeerMetadata(pm: PeerManager, peerId: PeerId) {.async.} =
info "disconnecting from peer", peerId = peerId, reason = reason
asyncSpawn(pm.switch.disconnect(peerId))
- pm.wakuPeerStore.delete(peerId)
+ pm.switch.peerStore.delete(peerId)
# called when a peer i) first connects to us ii) disconnects all connections from us
proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} =
if not pm.wakuMetadata.isNil() and event.kind == PeerEventKind.Joined:
await pm.onPeerMetadata(peerId)
+ var peerStore = pm.switch.peerStore
var direction: PeerDirection
var connectedness: Connectedness
@@ -717,7 +738,7 @@ proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} =
## Check max allowed in-relay peers
let inRelayPeers = pm.connectedPeers(WakuRelayCodec)[0]
if inRelayPeers.len > pm.inRelayPeersTarget and
- pm.wakuPeerStore.hasPeer(peerId, WakuRelayCodec):
+ peerStore.hasPeer(peerId, WakuRelayCodec):
debug "disconnecting relay peer because reached max num in-relay peers",
peerId = peerId,
inRelayPeers = inRelayPeers.len,
@@ -736,7 +757,7 @@ proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} =
for peerId in peersBehindIp[0 ..< (peersBehindIp.len - pm.colocationLimit)]:
debug "Pruning connection due to ip colocation", peerId = peerId, ip = ip
asyncSpawn(pm.switch.disconnect(peerId))
- pm.wakuPeerStore.delete(peerId)
+ peerStore.delete(peerId)
if not pm.onConnectionChange.isNil():
# we don't want to await for the callback to finish
asyncSpawn pm.onConnectionChange(peerId, Joined)
@@ -757,11 +778,11 @@ proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} =
of Identified:
debug "event identified", peerId = peerId
- pm.wakuPeerStore[ConnectionBook][peerId] = connectedness
- pm.wakuPeerStore[DirectionBook][peerId] = direction
+ peerStore[ConnectionBook][peerId] = connectedness
+ peerStore[DirectionBook][peerId] = direction
if not pm.storage.isNil:
- var remotePeerInfo = pm.wakuPeerStore.getPeer(peerId)
+ var remotePeerInfo = peerStore.getPeer(peerId)
if event.kind == PeerEventKind.Left:
remotePeerInfo.disconnectTime = getTime().toUnix
@@ -774,12 +795,12 @@ proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} =
proc logAndMetrics(pm: PeerManager) {.async.} =
heartbeat "Scheduling log and metrics run", LogAndMetricsInterval:
+ var peerStore = pm.switch.peerStore
# log metrics
let (inRelayPeers, outRelayPeers) = pm.connectedPeers(WakuRelayCodec)
let maxConnections = pm.switch.connManager.inSema.size
- let notConnectedPeers = pm.wakuPeerStore.getDisconnectedPeers().mapIt(
- RemotePeerInfo.init(it.peerId, it.addrs)
- )
+ let notConnectedPeers =
+ peerStore.getDisconnectedPeers().mapIt(RemotePeerInfo.init(it.peerId, it.addrs))
let outsideBackoffPeers = notConnectedPeers.filterIt(pm.canBeConnected(it.peerId))
let totalConnections = pm.switch.connManager.getConnections().len
@@ -791,7 +812,7 @@ proc logAndMetrics(pm: PeerManager) {.async.} =
outsideBackoffPeers = outsideBackoffPeers.len
# update prometheus metrics
- for proto in pm.wakuPeerStore.getWakuProtos():
+ for proto in peerStore.getWakuProtos():
let (protoConnsIn, protoConnsOut) = pm.connectedPeers(proto)
let (protoStreamsIn, protoStreamsOut) = pm.getNumStreams(proto)
waku_connected_peers.set(
@@ -815,6 +836,10 @@ proc manageRelayPeers*(pm: PeerManager) {.async.} =
if pm.wakuMetadata.shards.len == 0:
return
+ if not pm.isOnline():
+ error "manageRelayPeers: won't attempt new connections - node is offline"
+ return
+
var peersToConnect: HashSet[PeerId] # Can't use RemotePeerInfo as they are ref objects
var peersToDisconnect: int
@@ -825,14 +850,16 @@ proc manageRelayPeers*(pm: PeerManager) {.async.} =
let inTarget = pm.inRelayPeersTarget div pm.wakuMetadata.shards.len
let outTarget = pm.outRelayPeersTarget div pm.wakuMetadata.shards.len
+ var peerStore = pm.switch.peerStore
+
for shard in pm.wakuMetadata.shards.items:
# Filter out peer not on this shard
let connectedInPeers = inPeers.filterIt(
- pm.wakuPeerStore.hasShard(it, uint16(pm.wakuMetadata.clusterId), uint16(shard))
+ peerStore.hasShard(it, uint16(pm.wakuMetadata.clusterId), uint16(shard))
)
let connectedOutPeers = outPeers.filterIt(
- pm.wakuPeerStore.hasShard(it, uint16(pm.wakuMetadata.clusterId), uint16(shard))
+ peerStore.hasShard(it, uint16(pm.wakuMetadata.clusterId), uint16(shard))
)
# Calculate the difference between current values and targets
@@ -847,17 +874,17 @@ proc manageRelayPeers*(pm: PeerManager) {.async.} =
# Get all peers for this shard
var connectablePeers =
- pm.wakuPeerStore.getPeersByShard(uint16(pm.wakuMetadata.clusterId), uint16(shard))
+ peerStore.getPeersByShard(uint16(pm.wakuMetadata.clusterId), uint16(shard))
let shardCount = connectablePeers.len
connectablePeers.keepItIf(
- not pm.wakuPeerStore.isConnected(it.peerId) and pm.canBeConnected(it.peerId)
+ not peerStore.isConnected(it.peerId) and pm.canBeConnected(it.peerId)
)
let connectableCount = connectablePeers.len
- connectablePeers.keepItIf(pm.wakuPeerStore.hasCapability(it.peerId, Relay))
+ connectablePeers.keepItIf(peerStore.hasCapability(it.peerId, Relay))
let relayCount = connectablePeers.len
@@ -881,7 +908,7 @@ proc manageRelayPeers*(pm: PeerManager) {.async.} =
if peersToConnect.len == 0:
return
- let uniquePeers = toSeq(peersToConnect).mapIt(pm.wakuPeerStore.getPeer(it))
+ let uniquePeers = toSeq(peersToConnect).mapIt(peerStore.getPeer(it))
# Connect to all nodes
for i in countup(0, uniquePeers.len, MaxParallelDials):
@@ -890,8 +917,9 @@ proc manageRelayPeers*(pm: PeerManager) {.async.} =
await pm.connectToNodes(uniquePeers[i ..< stop])
proc prunePeerStore*(pm: PeerManager) =
- let numPeers = pm.wakuPeerStore[AddressBook].book.len
- let capacity = pm.wakuPeerStore.getCapacity()
+ let peerStore = pm.switch.peerStore
+ let numPeers = peerStore[AddressBook].book.len
+ let capacity = peerStore.getCapacity()
if numPeers <= capacity:
return
@@ -900,7 +928,7 @@ proc prunePeerStore*(pm: PeerManager) =
var peersToPrune: HashSet[PeerId]
# prune failed connections
- for peerId, count in pm.wakuPeerStore[NumberFailedConnBook].book.pairs:
+ for peerId, count in peerStore[NumberFailedConnBook].book.pairs:
if count < pm.maxFailedAttempts:
continue
@@ -909,7 +937,7 @@ proc prunePeerStore*(pm: PeerManager) =
peersToPrune.incl(peerId)
- var notConnected = pm.wakuPeerStore.getDisconnectedPeers().mapIt(it.peerId)
+ var notConnected = peerStore.getDisconnectedPeers().mapIt(it.peerId)
# Always pick random non-connected peers
shuffle(notConnected)
@@ -918,11 +946,11 @@ proc prunePeerStore*(pm: PeerManager) =
var peersByShard = initTable[uint16, seq[PeerId]]()
for peer in notConnected:
- if not pm.wakuPeerStore[ENRBook].contains(peer):
+ if not peerStore[ENRBook].contains(peer):
shardlessPeers.add(peer)
continue
- let record = pm.wakuPeerStore[ENRBook][peer]
+ let record = peerStore[ENRBook][peer]
let rec = record.toTyped().valueOr:
shardlessPeers.add(peer)
@@ -956,9 +984,9 @@ proc prunePeerStore*(pm: PeerManager) =
peersToPrune.incl(peer)
for peer in peersToPrune:
- pm.wakuPeerStore.delete(peer)
+ peerStore.delete(peer)
- let afterNumPeers = pm.wakuPeerStore[AddressBook].book.len
+ let afterNumPeers = peerStore[AddressBook].book.len
trace "Finished pruning peer store",
beforeNumPeers = numPeers,
@@ -1041,6 +1069,7 @@ proc new*(
shardedPeerManagement = false,
reputationEnabled = false,
eligibilityEnabled = false,
+ dnsNameServers = newSeq[IpAddress](),
): PeerManager {.gcsafe.} =
let capacity = switch.peerStore.capacity
let maxConnections = switch.connManager.inSema.size
@@ -1081,7 +1110,6 @@ proc new*(
let pm = PeerManager(
switch: switch,
wakuMetadata: wakuMetadata,
- wakuPeerStore: createWakuPeerStore(switch.peerStore),
storage: storage,
initialBackoffInSec: initialBackoffInSec,
backoffFactor: backoffFactor,
@@ -1092,19 +1120,23 @@ proc new*(
maxFailedAttempts: maxFailedAttempts,
colocationLimit: colocationLimit,
shardedPeerManagement: shardedPeerManagement,
+ dnsNameServers: dnsNameServers,
+ online: true,
)
proc peerHook(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe.} =
onPeerEvent(pm, peerId, event)
+ var peerStore = pm.switch.peerStore
+
proc peerStoreChanged(peerId: PeerId) {.gcsafe.} =
- waku_peer_store_size.set(toSeq(pm.wakuPeerStore[AddressBook].book.keys).len.int64)
+ waku_peer_store_size.set(toSeq(peerStore[AddressBook].book.keys).len.int64)
pm.switch.addPeerEventHandler(peerHook, PeerEventKind.Joined)
pm.switch.addPeerEventHandler(peerHook, PeerEventKind.Left)
# called every time the peerstore is updated
- pm.wakuPeerStore[AddressBook].addHandler(peerStoreChanged)
+ peerStore[AddressBook].addHandler(peerStoreChanged)
pm.serviceSlots = initTable[string, RemotePeerInfo]()
pm.ipTable = initTable[string, seq[PeerId]]()
diff --git a/waku/node/peer_manager/waku_peer_store.nim b/waku/node/peer_manager/waku_peer_store.nim
index 027a1823f..ee339e858 100644
--- a/waku/node/peer_manager/waku_peer_store.nim
+++ b/waku/node/peer_manager/waku_peer_store.nim
@@ -3,6 +3,7 @@
import
std/[tables, sequtils, sets, options, strutils],
chronos,
+ chronicles,
eth/p2p/discoveryv5/enr,
libp2p/builders,
libp2p/peerstore
@@ -11,14 +12,12 @@ import
../../waku_core,
../../waku_enr/sharding,
../../waku_enr/capabilities,
- ../../common/utils/sequence
+ ../../common/utils/sequence,
+ ../../waku_core/peers
export peerstore, builders
type
- WakuPeerStore* = ref object
- peerStore: PeerStore
-
# Keeps track of the Connectedness state of a peer
ConnectionBook* = ref object of PeerBook[Connectedness]
@@ -40,137 +39,159 @@ type
# Keeps track of the ENR (Ethereum Node Record) of a peer
ENRBook* = ref object of PeerBook[enr.Record]
-# Constructor
-proc new*(T: type WakuPeerStore, identify: Identify, capacity = 1000): WakuPeerStore =
- let peerStore = PeerStore.new(identify, capacity)
- WakuPeerStore(peerStore: peerStore)
-
-proc createWakuPeerStore*(peerStore: PeerStore): WakuPeerStore =
- WakuPeerStore(peerStore: peerStore)
-
-# Core functionality
-proc `[]`*(wps: WakuPeerStore, T: typedesc): T =
- wps.peerStore[T]
-
-proc getPeer*(wps: WakuPeerStore, peerId: PeerId): RemotePeerInfo =
+proc getPeer*(peerStore: PeerStore, peerId: PeerId): RemotePeerInfo =
RemotePeerInfo(
peerId: peerId,
- addrs: wps[AddressBook][peerId],
+ addrs: peerStore[AddressBook][peerId],
enr:
- if wps[ENRBook][peerId] != default(enr.Record):
- some(wps[ENRBook][peerId])
+ if peerStore[ENRBook][peerId] != default(enr.Record):
+ some(peerStore[ENRBook][peerId])
else:
none(enr.Record),
- protocols: wps[ProtoBook][peerId],
- agent: wps[AgentBook][peerId],
- protoVersion: wps[ProtoVersionBook][peerId],
- publicKey: wps[KeyBook][peerId],
- connectedness: wps[ConnectionBook][peerId],
- disconnectTime: wps[DisconnectBook][peerId],
- origin: wps[SourceBook][peerId],
- direction: wps[DirectionBook][peerId],
- lastFailedConn: wps[LastFailedConnBook][peerId],
- numberFailedConn: wps[NumberFailedConnBook][peerId],
+ protocols: peerStore[ProtoBook][peerId],
+ agent: peerStore[AgentBook][peerId],
+ protoVersion: peerStore[ProtoVersionBook][peerId],
+ publicKey: peerStore[KeyBook][peerId],
+ connectedness: peerStore[ConnectionBook][peerId],
+ disconnectTime: peerStore[DisconnectBook][peerId],
+ origin: peerStore[SourceBook][peerId],
+ direction: peerStore[DirectionBook][peerId],
+ lastFailedConn: peerStore[LastFailedConnBook][peerId],
+ numberFailedConn: peerStore[NumberFailedConnBook][peerId],
)
-proc addPeer*(wps: WakuPeerStore, peer: RemotePeerInfo) =
- ## Only used in tests
- wps[AddressBook][peer.peerId] = peer.addrs
- wps[ProtoBook][peer.peerId] = peer.protocols
- wps[AgentBook][peer.peerId] = peer.agent
- wps[ProtoVersionBook][peer.peerId] = peer.protoVersion
- wps[KeyBook][peer.peerId] = peer.publicKey
- wps[ConnectionBook][peer.peerId] = peer.connectedness
- wps[DisconnectBook][peer.peerId] = peer.disconnectTime
- wps[SourceBook][peer.peerId] = peer.origin
- wps[DirectionBook][peer.peerId] = peer.direction
- wps[LastFailedConnBook][peer.peerId] = peer.lastFailedConn
- wps[NumberFailedConnBook][peer.peerId] = peer.numberFailedConn
- if peer.enr.isSome():
- wps[ENRBook][peer.peerId] = peer.enr.get()
-
-proc delete*(wps: WakuPeerStore, peerId: PeerId) =
+proc delete*(peerStore: PeerStore, peerId: PeerId) =
# Delete all the information of a given peer.
- wps.peerStore.del(peerId)
+ peerStore.del(peerId)
-# TODO: Rename peers() to getPeersByProtocol()
-proc peers*(wps: WakuPeerStore): seq[RemotePeerInfo] =
+proc peers*(peerStore: PeerStore): seq[RemotePeerInfo] =
let allKeys = concat(
- toSeq(wps[AddressBook].book.keys()),
- toSeq(wps[ProtoBook].book.keys()),
- toSeq(wps[KeyBook].book.keys()),
+ toSeq(peerStore[AddressBook].book.keys()),
+ toSeq(peerStore[ProtoBook].book.keys()),
+ toSeq(peerStore[KeyBook].book.keys()),
)
.toHashSet()
- return allKeys.mapIt(wps.getPeer(it))
+ return allKeys.mapIt(peerStore.getPeer(it))
-proc peers*(wps: WakuPeerStore, proto: string): seq[RemotePeerInfo] =
- wps.peers().filterIt(it.protocols.contains(proto))
+proc addPeer*(peerStore: PeerStore, peer: RemotePeerInfo, origin = UnknownOrigin) =
+ ## Notice that the origin parameter is used to manually override the given peer origin.
+ ## At the time of writing, this is used in waku_discv5 or waku_node (peer exchange.)
+ if peerStore[AddressBook][peer.peerId] == peer.addrs and
+ peerStore[KeyBook][peer.peerId] == peer.publicKey and
+ peerStore[ENRBook][peer.peerId].raw.len > 0:
+ let incomingEnr = peer.enr.valueOr:
+ trace "peer already managed and incoming ENR is empty",
+ remote_peer_id = $peer.peerId
+ return
-proc peers*(wps: WakuPeerStore, protocolMatcher: Matcher): seq[RemotePeerInfo] =
- wps.peers().filterIt(it.protocols.anyIt(protocolMatcher(it)))
+ if peerStore[ENRBook][peer.peerId].raw == incomingEnr.raw or
+ peerStore[ENRBook][peer.peerId].seqNum > incomingEnr.seqNum:
+ trace "peer already managed and ENR info is already saved",
+ remote_peer_id = $peer.peerId
+ return
-proc connectedness*(wps: WakuPeerStore, peerId: PeerId): Connectedness =
- wps[ConnectionBook].book.getOrDefault(peerId, NotConnected)
+ peerStore[AddressBook][peer.peerId] = peer.addrs
-proc hasShard*(wps: WakuPeerStore, peerId: PeerID, cluster, shard: uint16): bool =
- wps[ENRBook].book.getOrDefault(peerId).containsShard(cluster, shard)
+ var protos = peerStore[ProtoBook][peer.peerId]
+ for new_proto in peer.protocols:
+ ## append new discovered protocols to the current known protocols set
+ if not protos.contains(new_proto):
+ protos.add($new_proto)
+ peerStore[ProtoBook][peer.peerId] = protos
-proc hasCapability*(wps: WakuPeerStore, peerId: PeerID, cap: Capabilities): bool =
- wps[ENRBook].book.getOrDefault(peerId).supportsCapability(cap)
+ ## We don't care whether the item was already present in the table or not. Hence, we always discard the hasKeyOrPut's bool returned value
+ discard peerStore[AgentBook].book.hasKeyOrPut(peer.peerId, peer.agent)
+ discard peerStore[ProtoVersionBook].book.hasKeyOrPut(peer.peerId, peer.protoVersion)
+ discard peerStore[KeyBook].book.hasKeyOrPut(peer.peerId, peer.publicKey)
-proc peerExists*(wps: WakuPeerStore, peerId: PeerId): bool =
- wps[AddressBook].contains(peerId)
+ discard peerStore[ConnectionBook].book.hasKeyOrPut(peer.peerId, peer.connectedness)
+ discard peerStore[DisconnectBook].book.hasKeyOrPut(peer.peerId, peer.disconnectTime)
+ if origin != UnknownOrigin:
+ peerStore[SourceBook][peer.peerId] = origin
+ else:
+ discard peerStore[SourceBook].book.hasKeyOrPut(peer.peerId, peer.origin)
-proc isConnected*(wps: WakuPeerStore, peerId: PeerID): bool =
+ discard peerStore[DirectionBook].book.hasKeyOrPut(peer.peerId, peer.direction)
+ discard
+ peerStore[LastFailedConnBook].book.hasKeyOrPut(peer.peerId, peer.lastFailedConn)
+ discard
+ peerStore[NumberFailedConnBook].book.hasKeyOrPut(peer.peerId, peer.numberFailedConn)
+ if peer.enr.isSome():
+ peerStore[ENRBook][peer.peerId] = peer.enr.get()
+
+proc peers*(peerStore: PeerStore, proto: string): seq[RemotePeerInfo] =
+ peerStore.peers().filterIt(it.protocols.contains(proto))
+
+proc peers*(peerStore: PeerStore, protocolMatcher: Matcher): seq[RemotePeerInfo] =
+ peerStore.peers().filterIt(it.protocols.anyIt(protocolMatcher(it)))
+
+proc connectedness*(peerStore: PeerStore, peerId: PeerId): Connectedness =
+ peerStore[ConnectionBook].book.getOrDefault(peerId, NotConnected)
+
+proc hasShard*(peerStore: PeerStore, peerId: PeerID, cluster, shard: uint16): bool =
+ peerStore[ENRBook].book.getOrDefault(peerId).containsShard(cluster, shard)
+
+proc hasCapability*(peerStore: PeerStore, peerId: PeerID, cap: Capabilities): bool =
+ peerStore[ENRBook].book.getOrDefault(peerId).supportsCapability(cap)
+
+proc peerExists*(peerStore: PeerStore, peerId: PeerId): bool =
+ peerStore[AddressBook].contains(peerId)
+
+proc isConnected*(peerStore: PeerStore, peerId: PeerID): bool =
# Returns `true` if the peer is connected
- wps.connectedness(peerId) == Connected
+ peerStore.connectedness(peerId) == Connected
-proc hasPeer*(wps: WakuPeerStore, peerId: PeerID, proto: string): bool =
+proc hasPeer*(peerStore: PeerStore, peerId: PeerID, proto: string): bool =
# Returns `true` if peer is included in manager for the specified protocol
- # TODO: What if peer does not exist in the wps?
- wps.getPeer(peerId).protocols.contains(proto)
+ # TODO: What if peer does not exist in the peerStore?
+ peerStore.getPeer(peerId).protocols.contains(proto)
-proc hasPeers*(wps: WakuPeerStore, proto: string): bool =
+proc hasPeers*(peerStore: PeerStore, proto: string): bool =
# Returns `true` if the peerstore has any peer for the specified protocol
- toSeq(wps[ProtoBook].book.values()).anyIt(it.anyIt(it == proto))
+ toSeq(peerStore[ProtoBook].book.values()).anyIt(it.anyIt(it == proto))
-proc hasPeers*(wps: WakuPeerStore, protocolMatcher: Matcher): bool =
+proc hasPeers*(peerStore: PeerStore, protocolMatcher: Matcher): bool =
# Returns `true` if the peerstore has any peer matching the protocolMatcher
- toSeq(wps[ProtoBook].book.values()).anyIt(it.anyIt(protocolMatcher(it)))
+ toSeq(peerStore[ProtoBook].book.values()).anyIt(it.anyIt(protocolMatcher(it)))
-proc getCapacity*(wps: WakuPeerStore): int =
- wps.peerStore.capacity
+proc getCapacity*(peerStore: PeerStore): int =
+ peerStore.capacity
-proc setCapacity*(wps: WakuPeerStore, capacity: int) =
- wps.peerStore.capacity = capacity
+proc setCapacity*(peerStore: PeerStore, capacity: int) =
+ peerStore.capacity = capacity
-proc getWakuProtos*(wps: WakuPeerStore): seq[string] =
- toSeq(wps[ProtoBook].book.values()).flatten().deduplicate().filterIt(
+proc getWakuProtos*(peerStore: PeerStore): seq[string] =
+ toSeq(peerStore[ProtoBook].book.values()).flatten().deduplicate().filterIt(
it.startsWith("/vac/waku")
)
proc getPeersByDirection*(
- wps: WakuPeerStore, direction: PeerDirection
+ peerStore: PeerStore, direction: PeerDirection
): seq[RemotePeerInfo] =
- return wps.peers.filterIt(it.direction == direction)
+ return peerStore.peers.filterIt(it.direction == direction)
-proc getDisconnectedPeers*(wps: WakuPeerStore): seq[RemotePeerInfo] =
- return wps.peers.filterIt(it.connectedness != Connected)
+proc getDisconnectedPeers*(peerStore: PeerStore): seq[RemotePeerInfo] =
+ return peerStore.peers.filterIt(it.connectedness != Connected)
-proc getConnectedPeers*(wps: WakuPeerStore): seq[RemotePeerInfo] =
- return wps.peers.filterIt(it.connectedness == Connected)
+proc getConnectedPeers*(peerStore: PeerStore): seq[RemotePeerInfo] =
+ return peerStore.peers.filterIt(it.connectedness == Connected)
-proc getPeersByProtocol*(wps: WakuPeerStore, proto: string): seq[RemotePeerInfo] =
- return wps.peers.filterIt(it.protocols.contains(proto))
+proc getPeersByProtocol*(peerStore: PeerStore, proto: string): seq[RemotePeerInfo] =
+ return peerStore.peers.filterIt(it.protocols.contains(proto))
-proc getReachablePeers*(wps: WakuPeerStore): seq[RemotePeerInfo] =
+proc getReachablePeers*(peerStore: PeerStore): seq[RemotePeerInfo] =
+ return peerStore.peers.filterIt(it.connectedness != CannotConnect)
+
+proc getPeersByShard*(
+ peerStore: PeerStore, cluster, shard: uint16
+): seq[RemotePeerInfo] =
+ return peerStore.peers.filterIt(
+ it.enr.isSome() and it.enr.get().containsShard(cluster, shard)
+ )
+
+proc getPeersByCapability*(
+ peerStore: PeerStore, cap: Capabilities
+): seq[RemotePeerInfo] =
return
- wps.peers.filterIt(it.connectedness == CanConnect or it.connectedness == Connected)
-
-proc getPeersByShard*(wps: WakuPeerStore, cluster, shard: uint16): seq[RemotePeerInfo] =
- return
- wps.peers.filterIt(it.enr.isSome() and it.enr.get().containsShard(cluster, shard))
-
-proc getPeersByCapability*(wps: WakuPeerStore, cap: Capabilities): seq[RemotePeerInfo] =
- return wps.peers.filterIt(it.enr.isSome() and it.enr.get().supportsCapability(cap))
+ peerStore.peers.filterIt(it.enr.isSome() and it.enr.get().supportsCapability(cap))
diff --git a/waku/node/waku_metrics.nim b/waku/node/waku_metrics.nim
index c349f0849..ba61f6ef8 100644
--- a/waku/node/waku_metrics.nim
+++ b/waku/node/waku_metrics.nim
@@ -5,14 +5,18 @@ import
../waku_rln_relay/protocol_metrics as rln_metrics,
../utils/collector,
./peer_manager,
- ./waku_node,
- ../factory/external_config
+ ./waku_node
const LogInterval = 10.minutes
logScope:
topics = "waku node metrics"
+type MetricsServerConf* = object
+ httpAddress*: IpAddress
+ httpPort*: Port
+ logging*: bool
+
proc startMetricsLog*() =
var logMetrics: CallbackFunc
@@ -70,17 +74,15 @@ proc startMetricsServer(
return ok(server)
proc startMetricsServerAndLogging*(
- conf: WakuNodeConf
+ conf: MetricsServerConf, portsShift: uint16
): Result[MetricsHttpServerRef, string] =
var metricsServer: MetricsHttpServerRef
- if conf.metricsServer:
- metricsServer = startMetricsServer(
- conf.metricsServerAddress, Port(conf.metricsServerPort + conf.portsShift)
- ).valueOr:
- return
- err("Starting metrics server failed. Continuing in current state:" & $error)
+ metricsServer = startMetricsServer(
+ conf.httpAddress, Port(conf.httpPort.uint16 + portsShift)
+ ).valueOr:
+ return err("Starting metrics server failed. Continuing in current state:" & $error)
- if conf.metricsLogging:
+ if conf.logging:
startMetricsLog()
return ok(metricsServer)
diff --git a/waku/node/waku_node.nim b/waku/node/waku_node.nim
index 8807067ff..76e8b853b 100644
--- a/waku/node/waku_node.nim
+++ b/waku/node/waku_node.nim
@@ -15,8 +15,6 @@ import
libp2p/protocols/ping,
libp2p/protocols/pubsub/gossipsub,
libp2p/protocols/pubsub/rpc/messages,
- libp2p/protocols/connectivity/autonat/client,
- libp2p/protocols/connectivity/autonat/service,
libp2p/builders,
libp2p/transports/transport,
libp2p/transports/tcptransport,
@@ -48,10 +46,9 @@ import
../waku_enr,
../waku_peer_exchange,
../waku_rln_relay,
- ./config,
+ ./net_config,
./peer_manager,
../common/rate_limit/setting,
- ../discovery/autonat_service,
../incentivization/[eligibility_manager, rpc]
declarePublicCounter waku_node_messages, "number of messages received", ["type"]
@@ -211,12 +208,12 @@ proc mountSharding*(
proc mountStoreSync*(
node: WakuNode,
- storeSyncRange = 3600,
- storeSyncInterval = 300,
- storeSyncRelayJitter = 20,
+ storeSyncRange = 3600.uint32,
+ storeSyncInterval = 300.uint32,
+ storeSyncRelayJitter = 20.uint32,
): Future[Result[void, string]] {.async.} =
let idsChannel = newAsyncQueue[SyncID](0)
- let wantsChannel = newAsyncQueue[(PeerId, WakuMessageHash)](0)
+ let wantsChannel = newAsyncQueue[PeerId](0)
let needsChannel = newAsyncQueue[(PeerId, WakuMessageHash)](0)
var cluster: uint16
@@ -260,7 +257,7 @@ proc mountStoreSync*(
## Waku relay
-proc registerRelayDefaultHandler*(node: WakuNode, topic: PubsubTopic) =
+proc registerRelayDefaultHandler(node: WakuNode, topic: PubsubTopic) =
if node.wakuRelay.isSubscribed(topic):
return
@@ -305,30 +302,34 @@ proc registerRelayDefaultHandler*(node: WakuNode, topic: PubsubTopic) =
proc subscribe*(
node: WakuNode, subscription: SubscriptionEvent, handler = none(WakuRelayHandler)
-) =
+): Result[void, string] =
## Subscribes to a PubSub or Content topic. Triggers handler when receiving messages on
## this topic. WakuRelayHandler is a method that takes a topic and a Waku message.
if node.wakuRelay.isNil():
error "Invalid API call to `subscribe`. WakuRelay not mounted."
- return
+ return err("Invalid API call to `subscribe`. WakuRelay not mounted.")
let (pubsubTopic, contentTopicOp) =
case subscription.kind
of ContentSub:
let shard = node.wakuSharding.getShard((subscription.topic)).valueOr:
error "Autosharding error", error = error
- return
+ return err("Autosharding error: " & error)
($shard, some(subscription.topic))
of PubsubSub:
(subscription.topic, none(ContentTopic))
else:
- return
+ return err("Unsupported subscription type in relay subscribe")
+
+ if node.wakuRelay.isSubscribed(pubsubTopic):
+ debug "already subscribed to topic", pubsubTopic
+ return err("Already subscribed to topic: " & $pubsubTopic)
if contentTopicOp.isSome() and node.contentTopicHandlers.hasKey(contentTopicOp.get()):
error "Invalid API call to `subscribe`. Was already subscribed"
- return
+ return err("Invalid API call to `subscribe`. Was already subscribed")
node.topicSubscriptionQueue.emit((kind: PubsubSub, topic: pubsubTopic))
node.registerRelayDefaultHandler(pubsubTopic)
@@ -339,43 +340,49 @@ proc subscribe*(
if contentTopicOp.isSome():
node.contentTopicHandlers[contentTopicOp.get()] = wrappedHandler
-proc unsubscribe*(node: WakuNode, subscription: SubscriptionEvent) =
+ return ok()
+
+proc unsubscribe*(
+ node: WakuNode, subscription: SubscriptionEvent
+): Result[void, string] =
## Unsubscribes from a specific PubSub or Content topic.
if node.wakuRelay.isNil():
error "Invalid API call to `unsubscribe`. WakuRelay not mounted."
- return
+ return err("Invalid API call to `unsubscribe`. WakuRelay not mounted.")
let (pubsubTopic, contentTopicOp) =
case subscription.kind
of ContentUnsub:
let shard = node.wakuSharding.getShard((subscription.topic)).valueOr:
error "Autosharding error", error = error
- return
+ return err("Autosharding error: " & error)
($shard, some(subscription.topic))
of PubsubUnsub:
(subscription.topic, none(ContentTopic))
else:
- return
+ return err("Unsupported subscription type in relay unsubscribe")
if not node.wakuRelay.isSubscribed(pubsubTopic):
- error "Invalid API call to `unsubscribe`. Was not subscribed"
+ error "Invalid API call to `unsubscribe`. Was not subscribed", pubsubTopic
return
+ err("Invalid API call to `unsubscribe`. Was not subscribed to: " & $pubsubTopic)
if contentTopicOp.isSome():
# Remove this handler only
var handler: TopicHandler
+ ## TODO: refactor this part. I think we can simplify it
if node.contentTopicHandlers.pop(contentTopicOp.get(), handler):
debug "unsubscribe", contentTopic = contentTopicOp.get()
- node.wakuRelay.unsubscribe(pubsubTopic, handler)
-
- if contentTopicOp.isNone() or node.wakuRelay.topics.getOrDefault(pubsubTopic).len == 1:
- # Remove all handlers
+ node.wakuRelay.unsubscribe(pubsubTopic)
+ else:
debug "unsubscribe", pubsubTopic = pubsubTopic
- node.wakuRelay.unsubscribeAll(pubsubTopic)
+ node.wakuRelay.unsubscribe(pubsubTopic)
node.topicSubscriptionQueue.emit((kind: PubsubUnsub, topic: pubsubTopic))
+ return ok()
+
proc publish*(
node: WakuNode, pubsubTopicOp: Option[PubsubTopic], message: WakuMessage
): Future[Result[void, string]] {.async, gcsafe.} =
@@ -418,7 +425,7 @@ proc startRelay*(node: WakuNode) {.async.} =
## Setup relay protocol
# Resume previous relay connections
- if node.peerManager.wakuPeerStore.hasPeers(protocolMatcher(WakuRelayCodec)):
+ if node.peerManager.switch.peerStore.hasPeers(protocolMatcher(WakuRelayCodec)):
info "Found previous WakuRelay peers. Reconnecting."
# Reconnect to previous relay peers. This will respect a backoff period, if necessary
@@ -437,20 +444,17 @@ proc mountRelay*(
shards: seq[RelayShard] = @[],
peerExchangeHandler = none(RoutingRecordsHandler),
maxMessageSize = int(DefaultMaxWakuMessageSize),
-) {.async, gcsafe.} =
+): Future[Result[void, string]] {.async.} =
if not node.wakuRelay.isNil():
error "wakuRelay already mounted, skipping"
- return
+ return err("wakuRelay already mounted, skipping")
## The default relay topics is the union of all configured topics plus default PubsubTopic(s)
info "mounting relay protocol"
- let initRes = WakuRelay.new(node.switch, maxMessageSize)
- if initRes.isErr():
- error "failed mounting relay protocol", error = initRes.error
- return
-
- node.wakuRelay = initRes.value
+ node.wakuRelay = WakuRelay.new(node.switch, maxMessageSize).valueOr:
+ error "failed mounting relay protocol", error = error
+ return err("failed mounting relay protocol: " & error)
## Add peer exchange handler
if peerExchangeHandler.isSome():
@@ -463,11 +467,17 @@ proc mountRelay*(
node.switch.mount(node.wakuRelay, protocolMatcher(WakuRelayCodec))
- info "relay mounted successfully", shards = shards
+ ## Make sure we don't have duplicates
+ let uniqueShards = deduplicate(shards)
# Subscribe to shards
- for shard in shards:
- node.subscribe((kind: PubsubSub, topic: $shard))
+ for shard in uniqueShards:
+ node.subscribe((kind: PubsubSub, topic: $shard)).isOkOr:
+ error "failed to subscribe to shard", error = error
+ return err("failed to subscribe to shard in mountRelay: " & error)
+
+ info "relay mounted successfully", shards = uniqueShards
+ return ok()
## Waku filter
@@ -517,6 +527,10 @@ proc mountFilterClient*(node: WakuNode) {.async: (raises: []).} =
## rely on node provided cache. - This only applies for v2 filter client
info "mounting filter client"
+ if not node.wakuFilterClient.isNil():
+ trace "Filter client already mounted."
+ return
+
node.wakuFilterClient = WakuFilterClient.new(node.peerManager, node.rng)
try:
@@ -1025,8 +1039,9 @@ proc mountLegacyLightPush*(
proc mountLegacyLightPushClient*(node: WakuNode) =
info "mounting legacy light push client"
- node.wakuLegacyLightpushClient =
- WakuLegacyLightPushClient.new(node.peerManager, node.rng)
+ if node.wakuLegacyLightpushClient.isNil():
+ node.wakuLegacyLightpushClient =
+ WakuLegacyLightPushClient.new(node.peerManager, node.rng)
proc legacyLightpushPublish*(
node: WakuNode,
@@ -1137,7 +1152,8 @@ proc mountLightPush*(
proc mountLightPushClient*(node: WakuNode) =
info "mounting light push client"
- node.wakuLightpushClient = WakuLightPushClient.new(node.peerManager, node.rng)
+ if node.wakuLightpushClient.isNil():
+ node.wakuLightpushClient = WakuLightPushClient.new(node.peerManager, node.rng)
proc lightpushPublishHandler(
node: WakuNode,
@@ -1222,7 +1238,7 @@ proc mountRlnRelay*(
raise
newException(CatchableError, "failed to mount WakuRlnRelay: " & rlnRelayRes.error)
let rlnRelay = rlnRelayRes.get()
- if (rlnConf.rlnRelayUserMessageLimit > rlnRelay.groupManager.rlnRelayMaxMessageLimit):
+ if (rlnConf.userMessageLimit > rlnRelay.groupManager.rlnRelayMaxMessageLimit):
error "rln-relay-user-message-limit can't exceed the MAX_MESSAGE_LIMIT in the rln contract"
let validator = generateRlnValidator(rlnRelay, spamHandler)
@@ -1267,7 +1283,7 @@ proc fetchPeerExchangePeers*(
)
)
- info "Retrieving peer info via peer exchange protocol"
+ info "Retrieving peer info via peer exchange protocol", amount
let pxPeersRes = await node.wakuPeerExchange.request(amount)
if pxPeersRes.isOk:
var validPeers = 0
diff --git a/waku/utils/collector.nim b/waku/utils/collector.nim
index de6411ae3..3586a2d6a 100644
--- a/waku/utils/collector.nim
+++ b/waku/utils/collector.nim
@@ -22,9 +22,19 @@ template parseAndAccumulate*(collector: Collector, cumulativeValue: float64): fl
cumulativeValue = total
freshCount
+template parseAndAccumulate*(
+ collector: typedesc[IgnoredCollector], cumulativeValue: float64
+): float64 =
+ ## Used when metrics are disabled (undefined `metrics` compilation flag)
+ 0.0
+
template collectorAsF64*(collector: Collector): float64 =
## This template is used to get metrics from 0
## Serves as a wrapper for parseCollectorIntoF64 which is gcsafe
{.gcsafe.}:
let total = parseCollectorIntoF64(collector)
total
+
+template collectorAsF64*(collector: typedesc[IgnoredCollector]): float64 =
+ ## Used when metrics are disabled (undefined `metrics` compilation flag)
+ 0.0
diff --git a/waku/waku_api/rest/admin/client.nim b/waku/waku_api/rest/admin/client.nim
index 1fd9fdfc8..7d45544e2 100644
--- a/waku/waku_api/rest/admin/client.nim
+++ b/waku/waku_api/rest/admin/client.nim
@@ -1,13 +1,8 @@
{.push raises: [].}
-import
- chronicles,
- json_serialization,
- json_serialization/std/options,
- presto/[route, client],
- stew/byteutils
+import chronicles, json_serialization, presto/[route, client], stew/byteutils
-import ../serdes, ../responses, ../rest_serdes, ./types
+import ../serdes, ../rest_serdes, ./types
export types
@@ -27,6 +22,46 @@ proc postPeers*(
rest, endpoint: "/admin/v1/peers", meth: HttpMethod.MethodPost
.}
+proc getPeerById*(
+ peerId: string
+): RestResponse[WakuPeer] {.
+ rest, endpoint: "/admin/v1/peer/{peerId}", meth: HttpMethod.MethodGet
+.}
+
+proc getServicePeers*(): RestResponse[seq[WakuPeer]] {.
+ rest, endpoint: "/admin/v1/peers/service", meth: HttpMethod.MethodGet
+.}
+
+proc getConnectedPeers*(): RestResponse[seq[WakuPeer]] {.
+ rest, endpoint: "/admin/v1/peers/connected", meth: HttpMethod.MethodGet
+.}
+
+proc getConnectedPeersByShard*(
+ shardId: uint16
+): RestResponse[seq[WakuPeer]] {.
+ rest, endpoint: "/admin/v1/peers/connected/on/{shardId}", meth: HttpMethod.MethodGet
+.}
+
+proc getRelayPeers*(): RestResponse[PeersOfShards] {.
+ rest, endpoint: "/admin/v1/peers/relay", meth: HttpMethod.MethodGet
+.}
+
+proc getRelayPeersByShard*(
+ shardId: uint16
+): RestResponse[PeersOfShard] {.
+ rest, endpoint: "/admin/v1/peers/relay/on/{shardId}", meth: HttpMethod.MethodGet
+.}
+
+proc getMeshPeers*(): RestResponse[PeersOfShards] {.
+ rest, endpoint: "/admin/v1/peers/mesh", meth: HttpMethod.MethodGet
+.}
+
+proc getMeshPeersByShard*(
+ shardId: uint16
+): RestResponse[PeersOfShard] {.
+ rest, endpoint: "/admin/v1/peers/mesh/on/{shardId}", meth: HttpMethod.MethodGet
+.}
+
proc getFilterSubscriptions*(): RestResponse[seq[FilterSubscription]] {.
rest, endpoint: "/admin/v1/filter/subscriptions", meth: HttpMethod.MethodGet
.}
diff --git a/waku/waku_api/rest/admin/handlers.nim b/waku/waku_api/rest/admin/handlers.nim
index c140c46d6..9cf6ec131 100644
--- a/waku/waku_api/rest/admin/handlers.nim
+++ b/waku/waku_api/rest/admin/handlers.nim
@@ -1,22 +1,26 @@
{.push raises: [].}
import
- std/[strformat, sequtils, tables],
+ std/[sets, strformat, sequtils, tables],
chronicles,
json_serialization,
presto/route,
- libp2p/[peerinfo, switch]
+ libp2p/[peerinfo, switch, peerid, protocols/pubsub/pubsubpeer]
import
- ../../../waku_core,
- ../../../waku_store_legacy/common,
- ../../../waku_store/common,
- ../../../waku_filter_v2,
- ../../../waku_lightpush_legacy/common,
- ../../../waku_relay,
- ../../../waku_peer_exchange,
- ../../../waku_node,
- ../../../node/peer_manager,
+ waku/[
+ waku_core,
+ waku_core/topics/pubsub_topic,
+ waku_store_legacy/common,
+ waku_store/common,
+ waku_filter_v2,
+ waku_lightpush_legacy/common,
+ waku_relay,
+ waku_peer_exchange,
+ waku_node,
+ node/peer_manager,
+ waku_enr/sharding,
+ ],
../responses,
../serdes,
../rest_serdes,
@@ -27,104 +31,263 @@ export types
logScope:
topics = "waku node rest admin api"
-const ROUTE_ADMIN_V1_PEERS* = "/admin/v1/peers"
+const ROUTE_ADMIN_V1_PEERS* = "/admin/v1/peers" # returns all peers
+const ROUTE_ADMIN_V1_SINGLE_PEER* = "/admin/v1/peer/{peerId}"
+
+const ROUTE_ADMIN_V1_SERVICE_PEERS* = "/admin/v1/peers/service" # returns all peers
+
+const ROUTE_ADMIN_V1_CONNECTED_PEERS* = "/admin/v1/peers/connected"
+const ROUTE_ADMIN_V1_CONNECTED_PEERS_ON_SHARD* =
+ "/admin/v1/peers/connected/on/{shardId}"
+const ROUTE_ADMIN_V1_RELAY_PEERS* = "/admin/v1/peers/relay"
+const ROUTE_ADMIN_V1_RELAY_PEERS_ON_SHARD* = "/admin/v1/peers/relay/on/{shardId}"
+const ROUTE_ADMIN_V1_MESH_PEERS* = "/admin/v1/peers/mesh"
+const ROUTE_ADMIN_V1_MESH_PEERS_ON_SHARD* = "/admin/v1/peers/mesh/on/{shardId}"
+
const ROUTE_ADMIN_V1_FILTER_SUBS* = "/admin/v1/filter/subscriptions"
type PeerProtocolTuple =
- tuple[multiaddr: string, protocol: string, connected: bool, origin: PeerOrigin]
+ tuple[
+ multiaddr: string,
+ protocol: string,
+ shards: seq[uint16],
+ connected: Connectedness,
+ agent: string,
+ origin: PeerOrigin,
+ ]
proc tuplesToWakuPeers(peers: var WakuPeers, peersTup: seq[PeerProtocolTuple]) =
for peer in peersTup:
- peers.add(peer.multiaddr, peer.protocol, peer.connected, peer.origin)
+ peers.add(
+ peer.multiaddr, peer.protocol, peer.shards, peer.connected, peer.agent,
+ peer.origin,
+ )
+
+proc populateAdminPeerInfo(
+ peers: var WakuPeers, node: WakuNode, codec: Option[string] = none[string]()
+) =
+ if codec.isNone():
+ peers = node.peerManager.switch.peerStore.peers().mapIt(WakuPeer.init(it))
+ else:
+ let peersTuples = node.peerManager.switch.peerStore.peers(codec.get()).mapIt(
+ (
+ multiaddr: constructMultiaddrStr(it),
+ protocol: codec.get(),
+ shards: it.getShards(),
+ connected: it.connectedness,
+ agent: it.agent,
+ origin: it.origin,
+ )
+ )
+ tuplesToWakuPeers(peers, peersTuples)
+
+proc populateAdminPeerInfoForAll(node: WakuNode): WakuPeers =
+ var peers: WakuPeers = @[]
+ populateAdminPeerInfo(peers, node)
+ return peers
+
+proc populateAdminPeerInfoForCodecs(node: WakuNode, codecs: seq[string]): WakuPeers =
+ var peers: WakuPeers = @[]
+
+ for codec in codecs:
+ populateAdminPeerInfo(peers, node, some(codec))
+
+ return peers
proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) =
router.api(MethodGet, ROUTE_ADMIN_V1_PEERS) do() -> RestApiResponse:
- var peers: WakuPeers = @[]
+ let peers = populateAdminPeerInfoForAll(node)
- let relayPeers = node.peerManager.wakuPeerStore.peers(WakuRelayCodec).mapIt(
- (
- multiaddr: constructMultiaddrStr(it),
- protocol: WakuRelayCodec,
- connected: it.connectedness == Connectedness.Connected,
- origin: it.origin,
- )
- )
- tuplesToWakuPeers(peers, relayPeers)
-
- let filterV2Peers = node.peerManager.wakuPeerStore
- .peers(WakuFilterSubscribeCodec)
- .mapIt(
- (
- multiaddr: constructMultiaddrStr(it),
- protocol: WakuFilterSubscribeCodec,
- connected: it.connectedness == Connectedness.Connected,
- origin: it.origin,
- )
- )
- tuplesToWakuPeers(peers, filterV2Peers)
-
- let storePeers = node.peerManager.wakuPeerStore.peers(WakuStoreCodec).mapIt(
- (
- multiaddr: constructMultiaddrStr(it),
- protocol: WakuStoreCodec,
- connected: it.connectedness == Connectedness.Connected,
- origin: it.origin,
- )
- )
- tuplesToWakuPeers(peers, storePeers)
-
- let legacyStorePeers = node.peerManager.wakuPeerStore
- .peers(WakuLegacyStoreCodec)
- .mapIt(
- (
- multiaddr: constructMultiaddrStr(it),
- protocol: WakuLegacyStoreCodec,
- connected: it.connectedness == Connectedness.Connected,
- origin: it.origin,
- )
- )
- tuplesToWakuPeers(peers, legacyStorePeers)
-
- let legacyLightpushPeers = node.peerManager.wakuPeerStore
- .peers(WakuLegacyLightPushCodec)
- .mapIt(
- (
- multiaddr: constructMultiaddrStr(it),
- protocol: WakuLegacyLightPushCodec,
- connected: it.connectedness == Connectedness.Connected,
- origin: it.origin,
- )
- )
- tuplesToWakuPeers(peers, legacyLightpushPeers)
-
- let lightpushPeers = node.peerManager.wakuPeerStore.peers(WakuLightPushCodec).mapIt(
- (
- multiaddr: constructMultiaddrStr(it),
- protocol: WakuLightPushCodec,
- connected: it.connectedness == Connectedness.Connected,
- origin: it.origin,
- )
- )
- tuplesToWakuPeers(peers, lightpushPeers)
-
- let pxPeers = node.peerManager.wakuPeerStore.peers(WakuPeerExchangeCodec).mapIt(
- (
- multiaddr: constructMultiaddrStr(it),
- protocol: WakuPeerExchangeCodec,
- connected: it.connectedness == Connectedness.Connected,
- origin: it.origin,
- )
- )
- tuplesToWakuPeers(peers, pxPeers)
-
- let resp = RestApiResponse.jsonResponse(peers, status = Http200)
- if resp.isErr():
- error "An error ocurred while building the json respose: ", error = resp.error
+ let resp = RestApiResponse.jsonResponse(peers, status = Http200).valueOr:
+ error "An error occurred while building the json response: ", error = error
return RestApiResponse.internalServerError(
- fmt("An error ocurred while building the json respose: {resp.error}")
+ fmt("An error occurred while building the json response: {error}")
)
- return resp.get()
+ return resp
+
+ router.api(MethodGet, ROUTE_ADMIN_V1_SINGLE_PEER) do(
+ peerId: string
+ ) -> RestApiResponse:
+ let peerIdString = peerId.valueOr:
+ return RestApiResponse.badRequest("Invalid argument:" & $error)
+
+ let peerIdVal: PeerId = PeerId.init(peerIdString).valueOr:
+ return RestApiResponse.badRequest("Invalid argument:" & $error)
+
+ if node.peerManager.switch.peerStore.peerExists(peerIdVal):
+ let peerInfo = node.peerManager.switch.peerStore.getPeer(peerIdVal)
+ let peer = WakuPeer.init(peerInfo)
+ let resp = RestApiResponse.jsonResponse(peer, status = Http200).valueOr:
+ error "An error occurred while building the json response: ", error = error
+ return RestApiResponse.internalServerError(
+ fmt("An error occurred while building the json response: {error}")
+ )
+
+ return resp
+ else:
+ return RestApiResponse.notFound(fmt("Peer with ID {peerId} not found"))
+
+ router.api(MethodGet, ROUTE_ADMIN_V1_SERVICE_PEERS) do() -> RestApiResponse:
+ let peers = populateAdminPeerInfoForCodecs(
+ node,
+ @[
+ WakuRelayCodec, WakuFilterSubscribeCodec, WakuStoreCodec, WakuLegacyStoreCodec,
+ WakuLegacyLightPushCodec, WakuLightPushCodec, WakuPeerExchangeCodec,
+ WakuReconciliationCodec,
+ ],
+ )
+
+ let resp = RestApiResponse.jsonResponse(peers, status = Http200).valueOr:
+ error "An error occurred while building the json response: ", error = error
+ return RestApiResponse.internalServerError(
+ fmt("An error occurred while building the json response: {error}")
+ )
+
+ return resp
+
+ router.api(MethodGet, ROUTE_ADMIN_V1_CONNECTED_PEERS) do() -> RestApiResponse:
+ let allPeers = populateAdminPeerInfoForAll(node)
+
+ let connectedPeers = allPeers.filterIt(it.connected == Connectedness.Connected)
+
+ let resp = RestApiResponse.jsonResponse(connectedPeers, status = Http200).valueOr:
+ error "An error occurred while building the json response: ", error = error
+ return RestApiResponse.internalServerError(
+ fmt("An error occurred while building the json response: {error}")
+ )
+
+ return resp
+
+ router.api(MethodGet, ROUTE_ADMIN_V1_CONNECTED_PEERS_ON_SHARD) do(
+ shardId: uint16
+ ) -> RestApiResponse:
+ let shard = shardId.valueOr:
+ return RestApiResponse.badRequest(fmt("Invalid shardId: {error}"))
+
+ let allPeers = populateAdminPeerInfoForAll(node)
+
+ let connectedPeers = allPeers.filterIt(
+ it.connected == Connectedness.Connected and it.shards.contains(shard)
+ )
+
+ let resp = RestApiResponse.jsonResponse(connectedPeers, status = Http200).valueOr:
+ error "An error occurred while building the json response: ", error = error
+ return RestApiResponse.internalServerError(
+ fmt("An error occurred while building the json response: {error}")
+ )
+
+ return resp
+
+ router.api(MethodGet, ROUTE_ADMIN_V1_RELAY_PEERS) do() -> RestApiResponse:
+ if node.wakuRelay.isNil():
+ return RestApiResponse.serviceUnavailable(
+ "Error: Relay Protocol is not mounted to the node"
+ )
+
+ var relayPeers: PeersOfShards = @[]
+ for topic in node.wakuRelay.getSubscribedTopics():
+ let relayShard = RelayShard.parse(topic).valueOr:
+ error "Invalid subscribed topic", error = error, topic = topic
+ continue
+ let pubsubPeers =
+ node.wakuRelay.getConnectedPubSubPeers(topic).get(initHashSet[PubSubPeer](0))
+ relayPeers.add(
+ PeersOfShard(
+ shard: relayShard.shardId,
+ peers: toSeq(pubsubPeers).mapIt(WakuPeer.init(it, node.peerManager)),
+ )
+ )
+
+ let resp = RestApiResponse.jsonResponse(relayPeers, status = Http200).valueOr:
+ error "An error occurred while building the json response: ", error = error
+ return RestApiResponse.internalServerError(
+ fmt("An error occurred while building the json response: {error}")
+ )
+
+ return resp
+
+ router.api(MethodGet, ROUTE_ADMIN_V1_RELAY_PEERS_ON_SHARD) do(
+ shardId: uint16
+ ) -> RestApiResponse:
+ let shard = shardId.valueOr:
+ return RestApiResponse.badRequest(fmt("Invalid shardId: {error}"))
+
+ if node.wakuRelay.isNil():
+ return RestApiResponse.serviceUnavailable(
+ "Error: Relay Protocol is not mounted to the node"
+ )
+
+ let topic =
+ toPubsubTopic(RelayShard(clusterId: node.wakuSharding.clusterId, shardId: shard))
+ let pubsubPeers =
+ node.wakuRelay.getConnectedPubSubPeers(topic).get(initHashSet[PubSubPeer](0))
+ let relayPeer = PeersOfShard(
+ shard: shard, peers: toSeq(pubsubPeers).mapIt(WakuPeer.init(it, node.peerManager))
+ )
+
+ let resp = RestApiResponse.jsonResponse(relayPeer, status = Http200).valueOr:
+ error "An error occurred while building the json response: ", error = error
+ return RestApiResponse.internalServerError(
+ fmt("An error occurred while building the json response: {error}")
+ )
+
+ return resp
+
+ router.api(MethodGet, ROUTE_ADMIN_V1_MESH_PEERS) do() -> RestApiResponse:
+ if node.wakuRelay.isNil():
+ return RestApiResponse.serviceUnavailable(
+ "Error: Relay Protocol is not mounted to the node"
+ )
+
+ var relayPeers: PeersOfShards = @[]
+ for topic in node.wakuRelay.getSubscribedTopics():
+ let relayShard = RelayShard.parse(topic).valueOr:
+ error "Invalid subscribed topic", error = error, topic = topic
+ continue
+ let peers =
+ node.wakuRelay.getPubSubPeersInMesh(topic).get(initHashSet[PubSubPeer](0))
+ relayPeers.add(
+ PeersOfShard(
+ shard: relayShard.shardId,
+ peers: toSeq(peers).mapIt(WakuPeer.init(it, node.peerManager)),
+ )
+ )
+
+ let resp = RestApiResponse.jsonResponse(relayPeers, status = Http200).valueOr:
+ error "An error occurred while building the json response: ", error = error
+ return RestApiResponse.internalServerError(
+ fmt("An error occurred while building the json response: {error}")
+ )
+
+ return resp
+
+ router.api(MethodGet, ROUTE_ADMIN_V1_MESH_PEERS_ON_SHARD) do(
+ shardId: uint16
+ ) -> RestApiResponse:
+ let shard = shardId.valueOr:
+ return RestApiResponse.badRequest(fmt("Invalid shardId: {error}"))
+
+ if node.wakuRelay.isNil():
+ return RestApiResponse.serviceUnavailable(
+ "Error: Relay Protocol is not mounted to the node"
+ )
+
+ let topic =
+ toPubsubTopic(RelayShard(clusterId: node.wakuSharding.clusterId, shardId: shard))
+ let peers =
+ node.wakuRelay.getPubSubPeersInMesh(topic).get(initHashSet[PubSubPeer](0))
+ let relayPeer = PeersOfShard(
+ shard: shard, peers: toSeq(peers).mapIt(WakuPeer.init(it, node.peerManager))
+ )
+
+ let resp = RestApiResponse.jsonResponse(relayPeer, status = Http200).valueOr:
+ error "An error occurred while building the json response: ", error = error
+ return RestApiResponse.internalServerError(
+ fmt("An error occurred while building the json response: {error}")
+ )
+
+ return resp
proc installAdminV1PostPeersHandler(router: var RestRouter, node: WakuNode) =
router.api(MethodPost, ROUTE_ADMIN_V1_PEERS) do(
diff --git a/waku/waku_api/rest/admin/types.nim b/waku/waku_api/rest/admin/types.nim
index bb7dd2b0c..0c0786e3d 100644
--- a/waku/waku_api/rest/admin/types.nim
+++ b/waku/waku_api/rest/admin/types.nim
@@ -4,22 +4,29 @@ import
chronicles,
json_serialization,
json_serialization/std/options,
- json_serialization/lexer
-import ../serdes, ../../../waku_core
+ json_serialization/lexer,
+ results,
+ libp2p/protocols/pubsub/pubsubpeer
+import waku/[waku_core, node/peer_manager], ../serdes
#### Types
-
-type ProtocolState* = object
- protocol*: string
- connected*: bool
-
type WakuPeer* = object
multiaddr*: string
- protocols*: seq[ProtocolState]
+ protocols*: seq[string]
+ shards*: seq[uint16]
+ connected*: Connectedness
+ agent*: string
origin*: PeerOrigin
+ score*: Option[float64]
type WakuPeers* = seq[WakuPeer]
+type PeersOfShard* = object
+ shard*: uint16
+ peers*: WakuPeers
+
+type PeersOfShards* = seq[PeersOfShard]
+
type FilterTopic* = object
pubsubTopic*: string
contentTopic*: string
@@ -29,22 +36,25 @@ type FilterSubscription* = object
filterCriteria*: seq[FilterTopic]
#### Serialization and deserialization
-
-proc writeValue*(
- writer: var JsonWriter[RestJson], value: ProtocolState
-) {.raises: [IOError].} =
- writer.beginRecord()
- writer.writeField("protocol", value.protocol)
- writer.writeField("connected", value.connected)
- writer.endRecord()
-
proc writeValue*(
writer: var JsonWriter[RestJson], value: WakuPeer
) {.raises: [IOError].} =
writer.beginRecord()
writer.writeField("multiaddr", value.multiaddr)
writer.writeField("protocols", value.protocols)
+ writer.writeField("shards", value.shards)
+ writer.writeField("connected", value.connected)
+ writer.writeField("agent", value.agent)
writer.writeField("origin", value.origin)
+ writer.writeField("score", value.score)
+ writer.endRecord()
+
+proc writeValue*(
+ writer: var JsonWriter[RestJson], value: PeersOfShard
+) {.raises: [IOError].} =
+ writer.beginRecord()
+ writer.writeField("shard", value.shard)
+ writer.writeField("peers", value.peers)
writer.endRecord()
proc writeValue*(
@@ -63,43 +73,17 @@ proc writeValue*(
writer.writeField("filterCriteria", value.filterCriteria)
writer.endRecord()
-proc readValue*(
- reader: var JsonReader[RestJson], value: var ProtocolState
-) {.gcsafe, raises: [SerializationError, IOError].} =
- var
- protocol: Option[string]
- connected: Option[bool]
-
- for fieldName in readObjectFields(reader):
- case fieldName
- of "protocol":
- if protocol.isSome():
- reader.raiseUnexpectedField("Multiple `protocol` fields found", "ProtocolState")
- protocol = some(reader.readValue(string))
- of "connected":
- if connected.isSome():
- reader.raiseUnexpectedField(
- "Multiple `connected` fields found", "ProtocolState"
- )
- connected = some(reader.readValue(bool))
- else:
- unrecognizedFieldWarning(value)
-
- if connected.isNone():
- reader.raiseUnexpectedValue("Field `connected` is missing")
-
- if protocol.isNone():
- reader.raiseUnexpectedValue("Field `protocol` is missing")
-
- value = ProtocolState(protocol: protocol.get(), connected: connected.get())
-
proc readValue*(
reader: var JsonReader[RestJson], value: var WakuPeer
) {.gcsafe, raises: [SerializationError, IOError].} =
var
multiaddr: Option[string]
- protocols: Option[seq[ProtocolState]]
+ protocols: Option[seq[string]]
+ shards: Option[seq[uint16]]
+ connected: Option[Connectedness]
+ agent: Option[string]
origin: Option[PeerOrigin]
+ score: Option[float64]
for fieldName in readObjectFields(reader):
case fieldName
@@ -110,11 +94,27 @@ proc readValue*(
of "protocols":
if protocols.isSome():
reader.raiseUnexpectedField("Multiple `protocols` fields found", "WakuPeer")
- protocols = some(reader.readValue(seq[ProtocolState]))
+ protocols = some(reader.readValue(seq[string]))
+ of "shards":
+ if shards.isSome():
+ reader.raiseUnexpectedField("Multiple `shards` fields found", "WakuPeer")
+ shards = some(reader.readValue(seq[uint16]))
+ of "connected":
+ if connected.isSome():
+ reader.raiseUnexpectedField("Multiple `connected` fields found", "WakuPeer")
+ connected = some(reader.readValue(Connectedness))
+ of "agent":
+ if agent.isSome():
+ reader.raiseUnexpectedField("Multiple `agent` fields found", "WakuPeer")
+ agent = some(reader.readValue(string))
of "origin":
if origin.isSome():
reader.raiseUnexpectedField("Multiple `origin` fields found", "WakuPeer")
origin = some(reader.readValue(PeerOrigin))
+ of "score":
+ if score.isSome():
+ reader.raiseUnexpectedField("Multiple `score` fields found", "WakuPeer")
+ score = some(reader.readValue(float64))
else:
unrecognizedFieldWarning(value)
@@ -124,13 +124,56 @@ proc readValue*(
if protocols.isNone():
reader.raiseUnexpectedValue("Field `protocols` are missing")
+ if shards.isNone():
+ reader.raiseUnexpectedValue("Field `shards` is missing")
+
+ if connected.isNone():
+ reader.raiseUnexpectedValue("Field `connected` is missing")
+
+ if agent.isNone():
+ reader.raiseUnexpectedValue("Field `agent` is missing")
+
if origin.isNone():
reader.raiseUnexpectedValue("Field `origin` is missing")
value = WakuPeer(
- multiaddr: multiaddr.get(), protocols: protocols.get(), origin: origin.get()
+ multiaddr: multiaddr.get(),
+ protocols: protocols.get(),
+ shards: shards.get(),
+ connected: connected.get(),
+ agent: agent.get(),
+ origin: origin.get(),
+ score: score,
)
+proc readValue*(
+ reader: var JsonReader[RestJson], value: var PeersOfShard
+) {.gcsafe, raises: [SerializationError, IOError].} =
+ var
+ shard: Option[uint16]
+ peers: Option[WakuPeers]
+
+ for fieldName in readObjectFields(reader):
+ case fieldName
+ of "shard":
+ if shard.isSome():
+ reader.raiseUnexpectedField("Multiple `shard` fields found", "PeersOfShard")
+ shard = some(reader.readValue(uint16))
+ of "peers":
+ if peers.isSome():
+ reader.raiseUnexpectedField("Multiple `peers` fields found", "PeersOfShard")
+ peers = some(reader.readValue(WakuPeers))
+ else:
+ unrecognizedFieldWarning(value)
+
+ if shard.isNone():
+ reader.raiseUnexpectedValue("Field `shard` is missing")
+
+ if peers.isNone():
+ reader.raiseUnexpectedValue("Field `peers` are missing")
+
+ value = PeersOfShard(shard: shard.get(), peers: peers.get())
+
proc readValue*(
reader: var JsonReader[RestJson], value: var FilterTopic
) {.gcsafe, raises: [SerializationError, IOError].} =
@@ -195,26 +238,47 @@ proc readValue*(
value = FilterSubscription(peerId: peerId.get(), filterCriteria: filterCriteria.get())
-## Utility for populating WakuPeers and ProtocolState
-func `==`*(a, b: ProtocolState): bool {.inline.} =
- return a.protocol == b.protocol
-
-func `==`*(a: ProtocolState, b: string): bool {.inline.} =
- return a.protocol == b
-
func `==`*(a, b: WakuPeer): bool {.inline.} =
return a.multiaddr == b.multiaddr
+proc init*(T: type WakuPeer, peerInfo: RemotePeerInfo): WakuPeer =
+ result = WakuPeer(
+ multiaddr: constructMultiaddrStr(peerInfo),
+ protocols: peerInfo.protocols,
+ shards: peerInfo.getShards(),
+ connected: peerInfo.connectedness,
+ agent: peerInfo.agent,
+ origin: peerInfo.origin,
+ score: none(float64),
+ )
+
+proc init*(T: type WakuPeer, pubsubPeer: PubSubPeer, pm: PeerManager): WakuPeer =
+ let peerInfo = pm.getPeer(pubsubPeer.peerId)
+ result = WakuPeer(
+ multiaddr: constructMultiaddrStr(peerInfo),
+ protocols: peerInfo.protocols,
+ shards: peerInfo.getShards(),
+ connected: peerInfo.connectedness,
+ agent: peerInfo.agent,
+ origin: peerInfo.origin,
+ score: some(pubsubPeer.score),
+ )
+
proc add*(
peers: var WakuPeers,
multiaddr: string,
protocol: string,
- connected: bool,
+ shards: seq[uint16],
+ connected: Connectedness,
+ agent: string,
origin: PeerOrigin,
) =
var peer: WakuPeer = WakuPeer(
multiaddr: multiaddr,
- protocols: @[ProtocolState(protocol: protocol, connected: connected)],
+ protocols: @[protocol],
+ shards: shards,
+ connected: connected,
+ agent: agent,
origin: origin,
)
let idx = peers.find(peer)
@@ -222,4 +286,4 @@ proc add*(
if idx < 0:
peers.add(peer)
else:
- peers[idx].protocols.add(ProtocolState(protocol: protocol, connected: connected))
+ peers[idx].protocols.add(protocol)
diff --git a/waku/waku_api/rest/builder.nim b/waku/waku_api/rest/builder.nim
index cb324075a..6e880f5a3 100644
--- a/waku/waku_api/rest/builder.nim
+++ b/waku/waku_api/rest/builder.nim
@@ -5,7 +5,6 @@ import presto
import
waku/waku_node,
waku/discovery/waku_discv5,
- waku/factory/external_config,
waku/waku_api/message_cache,
waku/waku_api/handlers,
waku/waku_api/rest/server,
@@ -18,7 +17,8 @@ import
waku/waku_api/rest/legacy_store/handlers as rest_store_legacy_api,
waku/waku_api/rest/health/handlers as rest_health_api,
waku/waku_api/rest/admin/handlers as rest_admin_api,
- waku/waku_core/topics
+ waku/waku_core/topics,
+ waku/waku_relay/protocol
## Monitoring and external interfaces
@@ -30,12 +30,18 @@ import
var restServerNotInstalledTab {.threadvar.}: TableRef[string, string]
restServerNotInstalledTab = newTable[string, string]()
-proc startRestServerEsentials*(
- nodeHealthMonitor: WakuNodeHealthMonitor, conf: WakuNodeConf
-): Result[WakuRestServerRef, string] =
- if not conf.rest:
- return ok(nil)
+export WakuRestServerRef
+type RestServerConf* = object
+ allowOrigin*: seq[string]
+ listenAddress*: IpAddress
+ port*: Port
+ admin*: bool
+ relayCacheCapacity*: uint32
+
+proc startRestServerEssentials*(
+ nodeHealthMonitor: WakuNodeHealthMonitor, conf: RestServerConf, portsShift: uint16
+): Result[WakuRestServerRef, string] =
let requestErrorHandler: RestRequestErrorHandler = proc(
error: RestRequestError, request: HttpRequestRef
): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} =
@@ -71,13 +77,13 @@ proc startRestServerEsentials*(
return defaultResponse()
let allowedOrigin =
- if len(conf.restAllowOrigin) > 0:
- some(conf.restAllowOrigin.join(","))
+ if len(conf.allowOrigin) > 0:
+ some(conf.allowOrigin.join(","))
else:
none(string)
- let address = conf.restAddress
- let port = Port(conf.restPort + conf.portsShift)
+ let address = conf.listenAddress
+ let port = Port(conf.port.uint16 + portsShift)
let server =
?newRestHttpServer(
address,
@@ -111,14 +117,16 @@ proc startRestServerProtocolSupport*(
restServer: WakuRestServerRef,
node: WakuNode,
wakuDiscv5: WakuDiscoveryV5,
- conf: WakuNodeConf,
+ conf: RestServerConf,
+ relayEnabled: bool,
+ lightPushEnabled: bool,
+ clusterId: uint16,
+ shards: seq[uint16],
+ contentTopics: seq[string],
): Result[void, string] =
- if not conf.rest:
- return ok()
-
var router = restServer.router
## Admin REST API
- if conf.restAdmin:
+ if conf.admin:
installAdminApiHandlers(router, node)
else:
restServerNotInstalledTab["admin"] =
@@ -128,19 +136,33 @@ proc startRestServerProtocolSupport*(
installDebugApiHandlers(router, node)
## Relay REST API
- if conf.relay:
- let cache = MessageCache.init(int(conf.restRelayCacheCapacity))
+ if relayEnabled:
+ ## This MessageCache is used, f.e., in js-waku<>nwaku interop tests.
+ ## js-waku tests asks nwaku-docker through REST whether a message is properly received.
+ const RestRelayCacheCapacity = 50
+ let cache = MessageCache.init(int(RestRelayCacheCapacity))
- let handler = messageCacheHandler(cache)
+ let handler: WakuRelayHandler = messageCacheHandler(cache)
- for shard in conf.shards:
- let pubsubTopic = $RelayShard(clusterId: conf.clusterId, shardId: shard)
+ for shard in shards:
+ let pubsubTopic = $RelayShard(clusterId: clusterId, shardId: shard)
cache.pubsubSubscribe(pubsubTopic)
- node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(handler))
- for contentTopic in conf.contentTopics:
+ ## TODO: remove this line. use observer-observable pattern
+ ## within waku_node::registerRelayDefaultHandler
+ discard node.wakuRelay.subscribe(pubsubTopic, handler)
+
+ for contentTopic in contentTopics:
cache.contentSubscribe(contentTopic)
- node.subscribe((kind: ContentSub, topic: contentTopic), some(handler))
+
+ let shard = node.wakuSharding.getShard(contentTopic).valueOr:
+ error "Autosharding error in REST", error = error
+ continue
+ let pubsubTopic = $shard
+
+ ## TODO: remove this line. use observer-observable pattern
+ ## within waku_node::registerRelayDefaultHandler
+ discard node.wakuRelay.subscribe(pubsubTopic, handler)
installRelayApiHandlers(router, node, cache)
else:
@@ -178,7 +200,7 @@ proc startRestServerProtocolSupport*(
## or install it to be used with self-hosted lightpush service
## We either get lightpushnode (lightpush service node) from config or discovered or self served
if (node.wakuLegacyLightpushClient != nil) or
- (conf.lightpush and node.wakuLegacyLightPush != nil and node.wakuRelay != nil):
+ (lightPushEnabled and node.wakuLegacyLightPush != nil and node.wakuRelay != nil):
let lightDiscoHandler =
if not wakuDiscv5.isNil():
some(defaultDiscoveryHandler(wakuDiscv5, Lightpush))
diff --git a/waku/waku_api/rest/debug/client.nim b/waku/waku_api/rest/debug/client.nim
index 7048ae98f..c2d588197 100644
--- a/waku/waku_api/rest/debug/client.nim
+++ b/waku/waku_api/rest/debug/client.nim
@@ -2,7 +2,7 @@
import
chronicles, json_serialization, json_serialization/std/options, presto/[route, client]
-import ../serdes, ../responses, ../rest_serdes, ./types
+import ../serdes, ../rest_serdes, ./types
export types
diff --git a/waku/waku_api/rest/filter/client.nim b/waku/waku_api/rest/filter/client.nim
index b674bc594..db1a6895e 100644
--- a/waku/waku_api/rest/filter/client.nim
+++ b/waku/waku_api/rest/filter/client.nim
@@ -2,20 +2,12 @@
import
json,
- std/sets,
stew/byteutils,
- strformat,
chronicles,
json_serialization,
json_serialization/std/options,
presto/[route, client, common]
-import
- ../../../common/base64,
- ../../../waku_core,
- ../serdes,
- ../responses,
- ../rest_serdes,
- ./types
+import ../../../common/base64, ../serdes, ../rest_serdes, ./types
export types
diff --git a/waku/waku_api/rest/health/client.nim b/waku/waku_api/rest/health/client.nim
index c6f339006..97f4a2c6d 100644
--- a/waku/waku_api/rest/health/client.nim
+++ b/waku/waku_api/rest/health/client.nim
@@ -1,8 +1,7 @@
{.push raises: [].}
-import
- chronicles, json_serialization, json_serialization/std/options, presto/[route, client]
-import ./types, ../serdes, ../responses, ../rest_serdes, waku/node/health_monitor
+import chronicles, json_serialization, presto/[route, client]
+import ./types, ../serdes, ../rest_serdes, waku/node/health_monitor
logScope:
topics = "waku node rest health_api"
diff --git a/waku/waku_api/rest/legacy_lightpush/client.nim b/waku/waku_api/rest/legacy_lightpush/client.nim
index f0932e99f..a1e442857 100644
--- a/waku/waku_api/rest/legacy_lightpush/client.nim
+++ b/waku/waku_api/rest/legacy_lightpush/client.nim
@@ -1,15 +1,7 @@
{.push raises: [].}
-import
- json,
- std/sets,
- stew/byteutils,
- strformat,
- chronicles,
- json_serialization,
- json_serialization/std/options,
- presto/[route, client, common]
-import ../../../waku_core, ../serdes, ../responses, ../rest_serdes, ./types
+import chronicles, json_serialization, presto/[route, client, common]
+import ../serdes, ../rest_serdes, ./types
export types
diff --git a/waku/waku_api/rest/lightpush/handlers.nim b/waku/waku_api/rest/lightpush/handlers.nim
index 601aab74c..cafcd89d2 100644
--- a/waku/waku_api/rest/lightpush/handlers.nim
+++ b/waku/waku_api/rest/lightpush/handlers.nim
@@ -26,18 +26,15 @@ logScope:
const FutTimeoutForPushRequestProcessing* = 5.seconds
-const NoPeerNoDiscoError =
- RestApiResponse.serviceUnavailable("No suitable service peer & no discovery method")
-
-const NoPeerNoneFoundError =
- RestApiResponse.serviceUnavailable("No suitable service peer & none discovered")
+const NoPeerNoDiscoError = "No suitable service peer & no discovery method"
+const NoPeerNoneFoundError = "No suitable service peer & none discovered"
proc useSelfHostedLightPush(node: WakuNode): bool =
return node.wakuLightPush != nil and node.wakuLightPushClient == nil
proc convertErrorKindToHttpStatus(statusCode: LightpushStatusCode): HttpCode =
## Lightpush status codes are matching HTTP status codes by design
- return HttpCode(statusCode.int32)
+ return toHttpCode(statusCode.int).get(Http500)
proc makeRestResponse(response: WakuLightPushResult): RestApiResponse =
var httpStatus: HttpCode = Http200
@@ -72,10 +69,11 @@ proc installLightPushRequestHandler*(
debug "post", ROUTE_LIGHTPUSH, contentBody
let req: PushRequest = decodeRequestBody[PushRequest](contentBody).valueOr:
- return RestApiResponse.badRequest("Invalid push request: " & $error)
+ return
+ makeRestResponse(lightpushResultBadRequest("Invalid push request! " & $error))
let msg = req.message.toWakuMessage().valueOr:
- return RestApiResponse.badRequest("Invalid message: " & $error)
+ return makeRestResponse(lightpushResultBadRequest("Invalid message! " & $error))
var toPeer = none(RemotePeerInfo)
if useSelfHostedLightPush(node):
@@ -83,19 +81,23 @@ proc installLightPushRequestHandler*(
else:
let aPeer = node.peerManager.selectPeer(WakuLightPushCodec).valueOr:
let handler = discHandler.valueOr:
- return NoPeerNoDiscoError
+ return makeRestResponse(lightpushResultServiceUnavailable(NoPeerNoDiscoError))
let peerOp = (await handler()).valueOr:
- return RestApiResponse.internalServerError("No value in peerOp: " & $error)
+ return makeRestResponse(
+ lightpushResultInternalError("No value in peerOp: " & $error)
+ )
peerOp.valueOr:
- return NoPeerNoneFoundError
+ return
+ makeRestResponse(lightpushResultServiceUnavailable(NoPeerNoneFoundError))
toPeer = some(aPeer)
let subFut = node.lightpushPublish(req.pubsubTopic, msg, toPeer)
if not await subFut.withTimeout(FutTimeoutForPushRequestProcessing):
error "Failed to request a message push due to timeout!"
- return RestApiResponse.serviceUnavailable("Push request timed out")
+ return
+ makeRestResponse(lightpushResultServiceUnavailable("Push request timed out"))
return makeRestResponse(subFut.value())
diff --git a/waku/waku_api/rest/relay/client.nim b/waku/waku_api/rest/relay/client.nim
index 5e72bb609..6956a956d 100644
--- a/waku/waku_api/rest/relay/client.nim
+++ b/waku/waku_api/rest/relay/client.nim
@@ -1,13 +1,7 @@
{.push raises: [].}
-import
- std/sets,
- stew/byteutils,
- chronicles,
- json_serialization,
- json_serialization/std/options,
- presto/[route, client, common]
-import ../../../waku_core, ../serdes, ../responses, ../rest_serdes, ./types
+import stew/byteutils, chronicles, json_serialization, presto/[route, client, common]
+import ../../../waku_core, ../serdes, ../rest_serdes, ./types
export types
diff --git a/waku/waku_api/rest/relay/handlers.nim b/waku/waku_api/rest/relay/handlers.nim
index 7ee0ee7e3..252375208 100644
--- a/waku/waku_api/rest/relay/handlers.nim
+++ b/waku/waku_api/rest/relay/handlers.nim
@@ -66,9 +66,13 @@ proc installRelayApiHandlers*(
for pubsubTopic in newTopics:
cache.pubsubSubscribe(pubsubTopic)
+
node.subscribe(
(kind: PubsubSub, topic: pubsubTopic), some(messageCacheHandler(cache))
- )
+ ).isOkOr:
+ let errorMsg = "Subscribe failed:" & $error
+ error "SUBSCRIBE failed", error = errorMsg
+ return RestApiResponse.internalServerError(errorMsg)
return RestApiResponse.ok()
@@ -88,7 +92,10 @@ proc installRelayApiHandlers*(
# Unsubscribe all handlers from requested topics
for pubsubTopic in req:
cache.pubsubUnsubscribe(pubsubTopic)
- node.unsubscribe((kind: PubsubUnsub, topic: pubsubTopic))
+ node.unsubscribe((kind: PubsubUnsub, topic: pubsubTopic)).isOkOr:
+ let errorMsg = "Unsubscribe failed:" & $error
+ error "UNSUBSCRIBE failed", error = errorMsg
+ return RestApiResponse.internalServerError(errorMsg)
# Successfully unsubscribed from all requested topics
return RestApiResponse.ok()
@@ -193,9 +200,13 @@ proc installRelayApiHandlers*(
for contentTopic in newTopics:
cache.contentSubscribe(contentTopic)
+
node.subscribe(
(kind: ContentSub, topic: contentTopic), some(messageCacheHandler(cache))
- )
+ ).isOkOr:
+ let errorMsg = "Subscribe failed:" & $error
+ error "SUBSCRIBE failed", error = errorMsg
+ return RestApiResponse.internalServerError(errorMsg)
return RestApiResponse.ok()
@@ -211,7 +222,10 @@ proc installRelayApiHandlers*(
for contentTopic in req:
cache.contentUnsubscribe(contentTopic)
- node.unsubscribe((kind: ContentUnsub, topic: contentTopic))
+ node.unsubscribe((kind: ContentUnsub, topic: contentTopic)).isOkOr:
+ let errorMsg = "Unsubscribe failed:" & $error
+ error "UNSUBSCRIBE failed", error = errorMsg
+ return RestApiResponse.internalServerError(errorMsg)
return RestApiResponse.ok()
diff --git a/waku/waku_api/rest/serdes.nim b/waku/waku_api/rest/serdes.nim
index eb6bc1545..d54d17e78 100644
--- a/waku/waku_api/rest/serdes.nim
+++ b/waku/waku_api/rest/serdes.nim
@@ -1,9 +1,9 @@
{.push raises: [].}
import
- std/typetraits,
+ std/[typetraits, parseutils],
results,
- stew/byteutils,
+ stew/[byteutils, base10],
chronicles,
serialization,
json_serialization,
@@ -100,3 +100,13 @@ proc encodeString*(value: string): RestResult[string] =
proc decodeString*(t: typedesc[string], value: string): RestResult[string] =
ok(value)
+
+proc encodeString*(value: SomeUnsignedInt): RestResult[string] =
+ ok(Base10.toString(value))
+
+proc decodeString*(T: typedesc[SomeUnsignedInt], value: string): RestResult[T] =
+ let v = Base10.decode(T, value)
+ if v.isErr():
+ return err(v.error())
+ else:
+ return ok(v.get())
diff --git a/waku/waku_api/rest/server.nim b/waku/waku_api/rest/server.nim
index b8ad405c3..f16dfe83f 100644
--- a/waku/waku_api/rest/server.nim
+++ b/waku/waku_api/rest/server.nim
@@ -23,7 +23,7 @@ type
### Configuration
-type RestServerConf* = object
+type RestServerConf* {.requiresInit.} = object
cacheSize*: Natural
## \
## The maximum number of recently accessed states that are kept in \
diff --git a/waku/waku_archive/driver/queue_driver/index.nim b/waku/waku_archive/driver/queue_driver/index.nim
index 113d426d4..17783ebcc 100644
--- a/waku/waku_archive/driver/queue_driver/index.nim
+++ b/waku/waku_archive/driver/queue_driver/index.nim
@@ -1,6 +1,5 @@
{.push raises: [].}
-import stew/byteutils
import ../../../waku_core
type Index* = object
diff --git a/waku/waku_core/peers.nim b/waku/waku_core/peers.nim
index fdd3d7948..883f266bd 100644
--- a/waku/waku_core/peers.nim
+++ b/waku/waku_core/peers.nim
@@ -18,7 +18,7 @@ import
libp2p/routing_record,
regex,
json_serialization
-import ../waku_enr/capabilities
+import ../waku_enr
type
Connectedness* = enum
@@ -231,7 +231,7 @@ proc parsePeerInfo*(maddrs: varargs[string]): Result[RemotePeerInfo, string] =
parsePeerInfo(multiAddresses)
-func getTransportProtocol(typedR: TypedRecord): Option[IpTransportProtocol] =
+func getTransportProtocol(typedR: enr.TypedRecord): Option[IpTransportProtocol] =
if typedR.tcp6.isSome() or typedR.tcp.isSome():
return some(IpTransportProtocol.tcpProtocol)
@@ -255,9 +255,9 @@ proc parseUrlPeerAddr*(
return ok(some(parsedPeerInfo.value))
-proc toRemotePeerInfo*(enr: enr.Record): Result[RemotePeerInfo, cstring] =
+proc toRemotePeerInfo*(enrRec: enr.Record): Result[RemotePeerInfo, cstring] =
## Converts an ENR to dialable RemotePeerInfo
- let typedR = TypedRecord.fromRecord(enr)
+ let typedR = enr.TypedRecord.fromRecord(enrRec)
if not typedR.secp256k1.isSome():
return err("enr: no secp256k1 key in record")
@@ -303,7 +303,7 @@ proc toRemotePeerInfo*(enr: enr.Record): Result[RemotePeerInfo, cstring] =
return err("enr: no addresses in record")
let protocolsRes = catch:
- enr.getCapabilitiesCodecs()
+ enrRec.getCapabilitiesCodecs()
var protocols: seq[string]
if not protocolsRes.isErr():
@@ -312,7 +312,7 @@ proc toRemotePeerInfo*(enr: enr.Record): Result[RemotePeerInfo, cstring] =
error "Could not retrieve supported protocols from enr",
peerId = peerId, msg = protocolsRes.error.msg
- return ok(RemotePeerInfo.init(peerId, addrs, some(enr), protocols))
+ return ok(RemotePeerInfo.init(peerId, addrs, some(enrRec), protocols))
converter toRemotePeerInfo*(peerRecord: PeerRecord): RemotePeerInfo =
## Converts peer records to dialable RemotePeerInfo
@@ -350,8 +350,8 @@ func hasUdpPort*(peer: RemotePeerInfo): bool =
return false
let
- enr = peer.enr.get()
- typedEnr = TypedRecord.fromRecord(enr)
+ enrRec = peer.enr.get()
+ typedEnr = enr.TypedRecord.fromRecord(enrRec)
typedEnr.udp.isSome() or typedEnr.udp6.isSome()
@@ -361,3 +361,18 @@ proc getAgent*(peer: RemotePeerInfo): string =
return "unknown"
return peer.agent
+
+proc getShards*(peer: RemotePeerInfo): seq[uint16] =
+ if peer.enr.isNone():
+ return @[]
+
+ let enrRec = peer.enr.get()
+ let typedRecord = enrRec.toTyped().valueOr:
+ trace "invalid ENR record", error = error
+ return @[]
+
+ let shards = typedRecord.relaySharding()
+ if shards.isSome():
+ return shards.get().shardIds
+
+ return @[]
diff --git a/waku/waku_enr/sharding.nim b/waku/waku_enr/sharding.nim
index 88dc4e200..4ee77bf96 100644
--- a/waku/waku_enr/sharding.nim
+++ b/waku/waku_enr/sharding.nim
@@ -8,7 +8,7 @@ import
eth/keys,
libp2p/[multiaddress, multicodec],
libp2p/crypto/crypto
-import ../common/enr, ../waku_core
+import ../common/enr, ../waku_core/topics/pubsub_topic
logScope:
topics = "waku enr sharding"
diff --git a/waku/waku_filter_v2/protocol.nim b/waku/waku_filter_v2/protocol.nim
index d8b79ab67..c3a4683f7 100644
--- a/waku/waku_filter_v2/protocol.nim
+++ b/waku/waku_filter_v2/protocol.nim
@@ -225,7 +225,7 @@ proc maintainSubscriptions*(wf: WakuFilter) {.async.} =
## Remove subscriptions for peers that have been removed from peer store
var peersToRemove: seq[PeerId]
for peerId in wf.subscriptions.peersSubscribed.keys:
- if not wf.peerManager.wakuPeerStore.hasPeer(peerId, WakuFilterPushCodec):
+ if not wf.peerManager.switch.peerStore.hasPeer(peerId, WakuFilterPushCodec):
debug "peer has been removed from peer store, we will remove subscription",
peerId = peerId
peersToRemove.add(peerId)
diff --git a/waku/waku_filter_v2/protocol_metrics.nim b/waku/waku_filter_v2/protocol_metrics.nim
index b19f612f3..2d9f63c63 100644
--- a/waku/waku_filter_v2/protocol_metrics.nim
+++ b/waku/waku_filter_v2/protocol_metrics.nim
@@ -11,7 +11,11 @@ declarePublicGauge waku_filter_subscriptions, "number of subscribed filter clien
declarePublicHistogram waku_filter_request_duration_seconds,
"duration of filter subscribe requests", ["type"]
declarePublicHistogram waku_filter_handle_message_duration_seconds,
- "duration to push message to filter subscribers"
+ "duration to push message to filter subscribers",
+ buckets = [
+ 0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1.0, 2.5, 5.0, 7.5, 10.0,
+ 15.0, 20.0, 30.0, Inf,
+ ]
# Error types (metric label values)
const
diff --git a/waku/waku_filter_v2/subscriptions.nim b/waku/waku_filter_v2/subscriptions.nim
index 6b22a94b9..8d3b8084f 100644
--- a/waku/waku_filter_v2/subscriptions.nim
+++ b/waku/waku_filter_v2/subscriptions.nim
@@ -7,12 +7,7 @@ import
libp2p/peerid,
libp2p/stream/connection,
stew/shims/sets
-import
- ../waku_core,
- ../utils/tableutils,
- ../common/rate_limit/setting,
- ../node/peer_manager,
- ./common
+import ../waku_core, ../utils/tableutils, ../node/peer_manager
logScope:
topics = "waku filter subscriptions"
diff --git a/waku/waku_keystore/keystore.nim b/waku/waku_keystore/keystore.nim
index 9741761ff..6cc4ef701 100644
--- a/waku/waku_keystore/keystore.nim
+++ b/waku/waku_keystore/keystore.nim
@@ -61,7 +61,9 @@ proc loadAppKeystore*(
return err(
AppKeystoreError(kind: KeystoreOsError, msg: "Cannot open file for reading")
)
- let fileContents = readAll(f)
+
+ ## the next blocks expect the whole keystore.json content to be compacted in one single line
+ let fileContents = readAll(f).replace(" ", "").replace("\n", "")
# We iterate over each substring split by separator (which we expect to correspond to a single keystore json)
for keystore in fileContents.split(separator):
@@ -159,8 +161,7 @@ proc loadAppKeystore*(
return err(
AppKeystoreError(
- kind: KeystoreKeystoreDoesNotExist,
- msg: "No keystore found for the passed parameters",
+ kind: KeystoreKeystoreDoesNotExist, msg: "The keystore file could not be parsed"
)
)
diff --git a/waku/waku_lightpush/callbacks.nim b/waku/waku_lightpush/callbacks.nim
index d6700412f..3cfc3fe90 100644
--- a/waku/waku_lightpush/callbacks.nim
+++ b/waku/waku_lightpush/callbacks.nim
@@ -1,12 +1,11 @@
{.push raises: [].}
-import stew/results
+import results
import
../waku_core,
../waku_relay,
./common,
- ./protocol_metrics,
../waku_rln_relay,
../waku_rln_relay/protocol_types
diff --git a/waku/waku_lightpush/client.nim b/waku/waku_lightpush/client.nim
index 5802881e5..fa9ea466e 100644
--- a/waku/waku_lightpush/client.nim
+++ b/waku/waku_lightpush/client.nim
@@ -77,9 +77,13 @@ proc sendPushRequest(
proc publish*(
wl: WakuLightPushClient,
pubSubTopic: Option[PubsubTopic] = none(PubsubTopic),
- message: WakuMessage,
+ wakuMessage: WakuMessage,
peer: PeerId | RemotePeerInfo,
): Future[WakuLightPushResult] {.async, gcsafe.} =
+ var message = wakuMessage
+ if message.timestamp == 0:
+ message.timestamp = getNowInNanosecondTime()
+
when peer is PeerId:
info "publish",
peerId = shortLog(peer),
@@ -104,11 +108,15 @@ proc publish*(
return lightpushSuccessResult(publishedCount)
proc publishToAny*(
- wl: WakuLightPushClient, pubSubTopic: PubsubTopic, message: WakuMessage
+ wl: WakuLightPushClient, pubSubTopic: PubsubTopic, wakuMessage: WakuMessage
): Future[WakuLightPushResult] {.async, gcsafe.} =
## This proc is similar to the publish one but in this case
## we don't specify a particular peer and instead we get it from peer manager
+ var message = wakuMessage
+ if message.timestamp == 0:
+ message.timestamp = getNowInNanosecondTime()
+
info "publishToAny", msg_hash = computeMessageHash(pubsubTopic, message).to0xHex
let peer = wl.peerManager.selectPeer(WakuLightPushCodec).valueOr:
# TODO: check if it is matches the situation - shall we distinguish client side missing peers from server side?
diff --git a/waku/waku_lightpush/common.nim b/waku/waku_lightpush/common.nim
index 1e71ab2e7..20c177e39 100644
--- a/waku/waku_lightpush/common.nim
+++ b/waku/waku_lightpush/common.nim
@@ -43,6 +43,12 @@ func lightpushSuccessResult*(relayPeerCount: uint32): WakuLightPushResult =
func lightpushResultInternalError*(msg: string): WakuLightPushResult =
return err((LightpushStatusCode.INTERNAL_SERVER_ERROR, some(msg)))
+func lightpushResultBadRequest*(msg: string): WakuLightPushResult =
+ return err((LightpushStatusCode.BAD_REQUEST, some(msg)))
+
+func lightpushResultServiceUnavailable*(msg: string): WakuLightPushResult =
+ return err((LightpushStatusCode.SERVICE_NOT_AVAILABLE, some(msg)))
+
func lighpushErrorResult*(
statusCode: LightpushStatusCode, desc: Option[string]
): WakuLightPushResult =
@@ -79,5 +85,3 @@ func mapPubishingErrorToPushResult*(
some("Error generating message id, skipping publish"),
)
)
- else:
- return err((LightpushStatusCode.INTERNAL_SERVER_ERROR, none[string]()))
diff --git a/waku/waku_lightpush/protocol.nim b/waku/waku_lightpush/protocol.nim
index fbaef3f4e..4ed5a9413 100644
--- a/waku/waku_lightpush/protocol.nim
+++ b/waku/waku_lightpush/protocol.nim
@@ -78,12 +78,13 @@ proc handleRequest*(
waku_lightpush_v3_messages.inc(labelValues = ["PushRequest"])
+ let msg_hash = pubsubTopic.computeMessageHash(pushRequest.message).to0xHex()
notice "handling lightpush request",
my_peer_id = wl.peerManager.switch.peerInfo.peerId,
peer_id = peerId,
requestId = pushRequest.requestId,
pubsubTopic = pushRequest.pubsubTopic,
- msg_hash = pubsubTopic.computeMessageHash(pushRequest.message).to0xHex(),
+ msg_hash = msg_hash,
receivedTime = getNowInNanosecondTime()
let handleRes = await wl.pushHandler(peerId, pubsubTopic, pushRequest.message)
diff --git a/waku/waku_lightpush/self_req_handler.nim b/waku/waku_lightpush/self_req_handler.nim
index fffced40a..06a0d3715 100644
--- a/waku/waku_lightpush/self_req_handler.nim
+++ b/waku/waku_lightpush/self_req_handler.nim
@@ -9,15 +9,8 @@
## which spawn a full service Waku node
## that could be used also as a lightpush client, helping testing and development.
-import results, chronos, chronicles, std/options, metrics, stew/byteutils
-import
- ../waku_core,
- ./protocol,
- ./common,
- ./rpc,
- ./rpc_codec,
- ./protocol_metrics,
- ../utils/requests
+import results, chronos, std/options, metrics
+import ../waku_core, ./protocol, ./common, ./rpc, ./rpc_codec, ../utils/requests
proc handleSelfLightPushRequest*(
self: WakuLightPush, pubSubTopic: Option[PubsubTopic], message: WakuMessage
diff --git a/waku/waku_lightpush_legacy/client.nim b/waku/waku_lightpush_legacy/client.nim
index c3b4a158e..503cbe1eb 100644
--- a/waku/waku_lightpush_legacy/client.nim
+++ b/waku/waku_lightpush_legacy/client.nim
@@ -72,10 +72,15 @@ proc sendPushRequest(
proc publish*(
wl: WakuLegacyLightPushClient,
pubSubTopic: PubsubTopic,
- message: WakuMessage,
+ wakuMessage: WakuMessage,
peer: RemotePeerInfo,
): Future[WakuLightPushResult[string]] {.async, gcsafe.} =
## On success, returns the msg_hash of the published message
+
+ var message = wakuMessage
+ if message.timestamp == 0:
+ message.timestamp = getNowInNanosecondTime()
+
let msg_hash_hex_str = computeMessageHash(pubsubTopic, message).to0xHex()
let pushRequest = PushRequest(pubSubTopic: pubSubTopic, message: message)
?await wl.sendPushRequest(pushRequest, peer)
diff --git a/waku/waku_lightpush_legacy/protocol.nim b/waku/waku_lightpush_legacy/protocol.nim
index feb6a1320..5de25ead9 100644
--- a/waku/waku_lightpush_legacy/protocol.nim
+++ b/waku/waku_lightpush_legacy/protocol.nim
@@ -42,12 +42,14 @@ proc handleRequest*(
pubSubTopic = request.get().pubSubTopic
message = request.get().message
+ let msg_hash = pubsubTopic.computeMessageHash(message).to0xHex()
waku_lightpush_messages.inc(labelValues = ["PushRequest"])
+
notice "handling lightpush request",
peer_id = peerId,
requestId = requestId,
pubsubTopic = pubsubTopic,
- msg_hash = pubsubTopic.computeMessageHash(message).to0xHex(),
+ msg_hash = msg_hash,
receivedTime = getNowInNanosecondTime()
let handleRes = await wl.pushHandler(peerId, pubsubTopic, message)
diff --git a/waku/waku_node.nim b/waku/waku_node.nim
index f1c647111..74415e9de 100644
--- a/waku/waku_node.nim
+++ b/waku/waku_node.nim
@@ -1,7 +1,7 @@
import
- ./node/config,
+ ./node/net_config,
./node/waku_switch as switch,
./node/waku_node as node,
./node/health_monitor as health_monitor
-export config, switch, node, health_monitor
+export net_config, switch, node, health_monitor
diff --git a/waku/waku_noise/noise_types.nim b/waku/waku_noise/noise_types.nim
index 3d288a242..3b88c43e8 100644
--- a/waku/waku_noise/noise_types.nim
+++ b/waku/waku_noise/noise_types.nim
@@ -11,7 +11,7 @@ import std/[options, tables]
import chronos
import chronicles
import bearssl
-import nimcrypto/[sha2, hmac]
+import nimcrypto/sha2
import libp2p/errors
import libp2p/crypto/[crypto, chacha20poly1305, curve25519]
diff --git a/waku/waku_noise/noise_utils.nim b/waku/waku_noise/noise_utils.nim
index a612c0728..babab1ca4 100644
--- a/waku/waku_noise/noise_utils.nim
+++ b/waku/waku_noise/noise_utils.nim
@@ -9,8 +9,9 @@ import std/[algorithm, base64, oids, options, strutils, tables, sequtils]
import chronos
import chronicles
import bearssl/rand
-import stew/[results, endians2, byteutils]
-import nimcrypto/[sha2, hmac]
+import results
+import stew/[endians2, byteutils]
+import nimcrypto/sha2
import libp2p/crypto/[chacha20poly1305, curve25519, hkdf]
diff --git a/waku/waku_peer_exchange/protocol.nim b/waku/waku_peer_exchange/protocol.nim
index 7c9005215..2732cb1c1 100644
--- a/waku/waku_peer_exchange/protocol.nim
+++ b/waku/waku_peer_exchange/protocol.nim
@@ -218,7 +218,7 @@ proc poolFilter*(cluster: Option[uint16], peer: RemotePeerInfo): bool =
proc populateEnrCache(wpx: WakuPeerExchange) =
# share only peers that i) are reachable ii) come from discv5 iii) share cluster
- let withEnr = wpx.peerManager.wakuPeerStore.getReachablePeers().filterIt(
+ let withEnr = wpx.peerManager.switch.peerStore.getReachablePeers().filterIt(
poolFilter(wpx.cluster, it)
)
diff --git a/waku/waku_relay/protocol.nim b/waku/waku_relay/protocol.nim
index 0222db0d1..daaf056b7 100644
--- a/waku/waku_relay/protocol.nim
+++ b/waku/waku_relay/protocol.nim
@@ -129,7 +129,8 @@ type
# the second entry contains the error messages to be returned when the validator fails
wakuValidators: seq[tuple[handler: WakuValidatorHandler, errorMessage: string]]
# a map of validators to error messages to return when validation fails
- validatorInserted: Table[PubsubTopic, bool]
+ topicValidator: Table[PubsubTopic, ValidatorHandler]
+ # map topic with its assigned validator within pubsub
publishObservers: seq[PublishObserver]
topicsHealth*: Table[string, TopicHealth]
onTopicHealthChange*: TopicHealthChangeHandler
@@ -323,21 +324,46 @@ proc addObserver*(w: WakuRelay, observer: PubSubObserver) {.gcsafe.} =
proc getDHigh*(T: type WakuRelay): int =
return GossipsubParameters.dHigh
-proc getNumPeersInMesh*(w: WakuRelay, pubsubTopic: PubsubTopic): Result[int, string] =
- ## Returns the number of peers in a mesh defined by the passed pubsub topic.
+proc getPubSubPeersInMesh*(
+ w: WakuRelay, pubsubTopic: PubsubTopic
+): Result[HashSet[PubSubPeer], string] =
+ ## Returns the list of PubSubPeers in a mesh defined by the passed pubsub topic.
## The 'mesh' atribute is defined in the GossipSub ref object.
if not w.mesh.hasKey(pubsubTopic):
- debug "getNumPeersInMesh - there is no mesh peer for the given pubsub topic",
+ debug "getPubSubPeersInMesh - there is no mesh peer for the given pubsub topic",
pubsubTopic = pubsubTopic
- return ok(0)
+ return ok(initHashSet[PubSubPeer]())
let peersRes = catch:
w.mesh[pubsubTopic]
let peers: HashSet[PubSubPeer] = peersRes.valueOr:
- return
- err("getNumPeersInMesh - exception accessing " & pubsubTopic & ": " & error.msg)
+ return err(
+ "getPubSubPeersInMesh - exception accessing " & pubsubTopic & ": " & error.msg
+ )
+
+ return ok(peers)
+
+proc getPeersInMesh*(
+ w: WakuRelay, pubsubTopic: PubsubTopic
+): Result[seq[PeerId], string] =
+ ## Returns the list of peerIds in a mesh defined by the passed pubsub topic.
+ ## The 'mesh' atribute is defined in the GossipSub ref object.
+ let pubSubPeers = w.getPubSubPeersInMesh(pubsubTopic).valueOr:
+ return err(error)
+ let peerIds = toSeq(pubSubPeers).mapIt(it.peerId)
+
+ return ok(peerIds)
+
+proc getNumPeersInMesh*(w: WakuRelay, pubsubTopic: PubsubTopic): Result[int, string] =
+ ## Returns the number of peers in a mesh defined by the passed pubsub topic.
+
+ let peers = w.getPubSubPeersInMesh(pubsubTopic).valueOr:
+ return err(
+ "getNumPeersInMesh - failed retrieving peers in mesh: " & pubsubTopic & ": " &
+ error
+ )
return ok(peers.len)
@@ -402,7 +428,7 @@ proc isSubscribed*(w: WakuRelay, topic: PubsubTopic): bool =
proc subscribedTopics*(w: WakuRelay): seq[PubsubTopic] =
return toSeq(GossipSub(w).topics.keys())
-proc generateOrderedValidator(w: WakuRelay): auto {.gcsafe.} =
+proc generateOrderedValidator(w: WakuRelay): ValidatorHandler {.gcsafe.} =
# rejects messages that are not WakuMessage
let wrappedValidator = proc(
pubsubTopic: string, message: messages.Message
@@ -491,9 +517,10 @@ proc subscribe*(
# Add the ordered validator to the topic
# This assumes that if `w.validatorInserted.hasKey(pubSubTopic) is true`, it contains the ordered validator.
# Otherwise this might lead to unintended behaviour.
- if not w.validatorInserted.hasKey(pubSubTopic):
+ if not w.topicValidator.hasKey(pubSubTopic):
+ let newValidator = w.generateOrderedValidator()
procCall GossipSub(w).addValidator(pubSubTopic, w.generateOrderedValidator())
- w.validatorInserted[pubSubTopic] = true
+ w.topicValidator[pubSubTopic] = newValidator
# set this topic parameters for scoring
w.topicParams[pubsubTopic] = TopicParameters
@@ -509,21 +536,47 @@ proc unsubscribeAll*(w: WakuRelay, pubsubTopic: PubsubTopic) =
debug "unsubscribe all", pubsubTopic = pubsubTopic
procCall GossipSub(w).unsubscribeAll(pubsubTopic)
- w.validatorInserted.del(pubsubTopic)
+ w.topicValidator.del(pubsubTopic)
-proc unsubscribe*(w: WakuRelay, pubsubTopic: PubsubTopic, handler: TopicHandler) =
- ## Unsubscribe this handler on this pubsub topic
+proc unsubscribe*(w: WakuRelay, pubsubTopic: PubsubTopic) =
+ if not w.topicValidator.hasKey(pubsubTopic):
+ error "unsubscribe no validator for this topic", pubsubTopic
+ return
- debug "unsubscribe", pubsubTopic = pubsubTopic
+ if pubsubtopic notin Pubsub(w).topics:
+ error "not subscribed to the given topic", pubsubTopic
+ return
- procCall GossipSub(w).unsubscribe(pubsubTopic, handler)
+ var topicHandlerSeq: seq[TopicHandler]
+ var topicValidator: ValidatorHandler
+ try:
+ topicHandlerSeq = Pubsub(w).topics[pubsubTopic]
+ if topicHandlerSeq.len == 0:
+ error "unsubscribe no handler for this topic", pubsubTopic
+ return
+ topicValidator = w.topicValidator[pubsubTopic]
+ except KeyError:
+ error "exception in unsubscribe", pubsubTopic, error = getCurrentExceptionMsg()
+ return
+
+ let topicHandler = topicHandlerSeq[0]
+
+ debug "unsubscribe", pubsubTopic
+ procCall GossipSub(w).unsubscribe($pubsubTopic, topicHandler)
+ ## TODO: uncomment the following line when https://github.com/vacp2p/nim-libp2p/pull/1356
+ ## is available in a nim-libp2p release.
+ # procCall GossipSub(w).removeValidator(pubsubTopic, topicValidator)
proc publish*(
- w: WakuRelay, pubsubTopic: PubsubTopic, message: WakuMessage
+ w: WakuRelay, pubsubTopic: PubsubTopic, wakuMessage: WakuMessage
): Future[Result[int, PublishOutcome]] {.async.} =
if pubsubTopic.isEmptyOrWhitespace():
return err(NoTopicSpecified)
+ var message = wakuMessage
+ if message.timestamp == 0:
+ message.timestamp = getNowInNanosecondTime()
+
let data = message.encode().buffer
let msgHash = computeMessageHash(pubsubTopic, message).to0xHex()
@@ -539,22 +592,22 @@ proc publish*(
return ok(relayedPeerCount)
-proc getNumConnectedPeers*(
+proc getConnectedPubSubPeers*(
w: WakuRelay, pubsubTopic: PubsubTopic
-): Result[int, string] =
- ## Returns the number of connected peers and subscribed to the passed pubsub topic.
+): Result[HashSet[PubsubPeer], string] =
+ ## Returns the list of peerIds of connected peers and subscribed to the passed pubsub topic.
## The 'gossipsub' atribute is defined in the GossipSub ref object.
if pubsubTopic == "":
## Return all the connected peers
- var numConnPeers = 0
+ var peerIds = initHashSet[PubsubPeer]()
for k, v in w.gossipsub:
- numConnPeers.inc(v.len)
- return ok(numConnPeers)
+ peerIds = peerIds + v
+ return ok(peerIds)
if not w.gossipsub.hasKey(pubsubTopic):
return err(
- "getNumConnectedPeers - there is no gossipsub peer for the given pubsub topic: " &
+ "getConnectedPeers - there is no gossipsub peer for the given pubsub topic: " &
pubsubTopic
)
@@ -562,15 +615,37 @@ proc getNumConnectedPeers*(
w.gossipsub[pubsubTopic]
let peers: HashSet[PubSubPeer] = peersRes.valueOr:
+ return
+ err("getConnectedPeers - exception accessing " & pubsubTopic & ": " & error.msg)
+
+ return ok(peers)
+
+proc getConnectedPeers*(
+ w: WakuRelay, pubsubTopic: PubsubTopic
+): Result[seq[PeerId], string] =
+ ## Returns the list of peerIds of connected peers and subscribed to the passed pubsub topic.
+ ## The 'gossipsub' atribute is defined in the GossipSub ref object.
+
+ let peers = w.getConnectedPubSubPeers(pubsubTopic).valueOr:
+ return err(error)
+
+ let peerIds = toSeq(peers).mapIt(it.peerId)
+ return ok(peerIds)
+
+proc getNumConnectedPeers*(
+ w: WakuRelay, pubsubTopic: PubsubTopic
+): Result[int, string] =
+ ## Returns the number of connected peers and subscribed to the passed pubsub topic.
+
+ ## Return all the connected peers
+ let peers = w.getConnectedPubSubPeers(pubsubTopic).valueOr:
return err(
- "getNumConnectedPeers - exception accessing " & pubsubTopic & ": " & error.msg
+ "getNumConnectedPeers - failed retrieving peers in mesh: " & pubsubTopic & ": " &
+ error
)
return ok(peers.len)
proc getSubscribedTopics*(w: WakuRelay): seq[PubsubTopic] =
## Returns a seq containing the current list of subscribed topics
- var topics: seq[PubsubTopic]
- for t in w.validatorInserted.keys():
- topics.add(t)
- return topics
+ return PubSub(w).topics.keys.toSeq().mapIt(cast[PubsubTopic](it))
diff --git a/waku/waku_rln_relay/conversion_utils.nim b/waku/waku_rln_relay/conversion_utils.nim
index e710fea62..4a168ebeb 100644
--- a/waku/waku_rln_relay/conversion_utils.nim
+++ b/waku/waku_rln_relay/conversion_utils.nim
@@ -27,9 +27,6 @@ proc inHex*(
valueHex = "0" & valueHex
return toLowerAscii(valueHex)
-proc toUserMessageLimit*(v: UInt256): UserMessageLimit =
- return cast[UserMessageLimit](v)
-
proc encodeLengthPrefix*(input: openArray[byte]): seq[byte] =
## returns length prefixed version of the input
## with the following format [len<8>|input]
@@ -78,6 +75,31 @@ proc serialize*(
)
return output
+proc serialize*(witness: RLNWitnessInput): seq[byte] =
+ ## Serializes the RLN witness into a byte array following zerokit's expected format.
+ ## The serialized format includes:
+ ## - identity_secret (32 bytes, little-endian with zero padding)
+ ## - user_message_limit (32 bytes, little-endian with zero padding)
+ ## - message_id (32 bytes, little-endian with zero padding)
+ ## - merkle tree depth (8 bytes, little-endian) = path_elements.len / 32
+ ## - path_elements (each 32 bytes, ordered bottom-to-top)
+ ## - merkle tree depth again (8 bytes, little-endian)
+ ## - identity_path_index (sequence of bits as bytes, 0 = left, 1 = right)
+ ## - x (32 bytes, little-endian with zero padding)
+ ## - external_nullifier (32 bytes, little-endian with zero padding)
+ var buffer: seq[byte]
+ buffer.add(@(witness.identity_secret))
+ buffer.add(@(witness.user_message_limit))
+ buffer.add(@(witness.message_id))
+ buffer.add(toBytes(uint64(witness.path_elements.len / 32), Endianness.littleEndian))
+ for element in witness.path_elements:
+ buffer.add(element)
+ buffer.add(toBytes(uint64(witness.path_elements.len / 32), Endianness.littleEndian))
+ buffer.add(witness.identity_path_index)
+ buffer.add(@(witness.x))
+ buffer.add(@(witness.external_nullifier))
+ return buffer
+
proc serialize*(proof: RateLimitProof, data: openArray[byte]): seq[byte] =
## a private proc to convert RateLimitProof and data to a byte seq
## this conversion is used in the proof verification proc
@@ -133,3 +155,25 @@ func `+`*(a, b: Quantity): Quantity {.borrow.}
func u256*(n: Quantity): UInt256 {.inline.} =
n.uint64.stuint(256)
+
+proc uint64ToField*(n: uint64): array[32, byte] =
+ var output: array[32, byte]
+ let bytes = toBytes(n, Endianness.littleEndian)
+ output[0 ..< bytes.len] = bytes
+ return output
+
+proc UInt256ToField*(v: UInt256): array[32, byte] =
+ return cast[array[32, byte]](v) # already doesn't use `result`
+
+proc seqToField*(s: seq[byte]): array[32, byte] =
+ var output: array[32, byte]
+ let len = min(s.len, 32)
+ for i in 0 ..< len:
+ output[i] = s[i]
+ return output
+
+proc uint64ToIndex*(index: MembershipIndex, depth: int): seq[byte] =
+ var output = newSeq[byte](depth)
+ for i in 0 ..< depth:
+ output[i] = byte((index shr i) and 1) # LSB-first bit decomposition
+ return output
diff --git a/waku/waku_rln_relay/group_manager/group_manager_base.nim b/waku/waku_rln_relay/group_manager/group_manager_base.nim
index 818b36140..4a1c84e55 100644
--- a/waku/waku_rln_relay/group_manager/group_manager_base.nim
+++ b/waku/waku_rln_relay/group_manager/group_manager_base.nim
@@ -145,7 +145,6 @@ method validateRoot*(
g: GroupManager, root: MerkleNode
): bool {.base, gcsafe, raises: [].} =
## validates the root against the valid roots queue
- # Check if the root is in the valid roots queue
if g.indexOfRoot(root) >= 0:
return true
return false
@@ -175,7 +174,7 @@ method verifyProof*(
method generateProof*(
g: GroupManager,
- data: openArray[byte],
+ data: seq[byte],
epoch: Epoch,
messageId: MessageId,
rlnIdentifier = DefaultRlnIdentifier,
@@ -189,6 +188,7 @@ method generateProof*(
return err("membership index is not set")
if g.userMessageLimit.isNone():
return err("user message limit is not set")
+
waku_rln_proof_generation_duration_seconds.nanosecondTime:
let proof = proofGen(
rlnInstance = g.rlnInstance,
@@ -201,8 +201,6 @@ method generateProof*(
).valueOr:
return err("proof generation failed: " & $error)
- waku_rln_remaining_proofs_per_epoch.dec()
- waku_rln_total_generated_proofs.inc()
return ok(proof)
method isReady*(g: GroupManager): Future[bool] {.base, async.} =
diff --git a/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim b/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim
index e61ffb956..600291ecf 100644
--- a/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim
+++ b/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim
@@ -10,19 +10,18 @@ import
nimcrypto/keccak as keccak,
stint,
json,
- std/tables,
+ std/[strutils, tables, algorithm],
stew/[byteutils, arrayops],
- sequtils,
- strutils
+ sequtils
+
import
../../../waku_keystore,
../../rln,
+ ../../rln/rln_interface,
../../conversion_utils,
../group_manager_base,
./retry_wrapper
-from strutils import parseHexInt
-
export group_manager_base
logScope:
@@ -31,63 +30,39 @@ logScope:
# using the when predicate does not work within the contract macro, hence need to dupe
contract(WakuRlnContract):
# this serves as an entrypoint into the rln membership set
- proc register(idCommitment: UInt256, userMessageLimit: EthereumUInt32)
+ proc register(idCommitment: UInt256, userMessageLimit: UInt32)
# Initializes the implementation contract (only used in unit tests)
proc initialize(maxMessageLimit: UInt256)
# this event is raised when a new member is registered
- proc MemberRegistered(rateCommitment: UInt256, index: EthereumUInt32) {.event.}
+ proc MemberRegistered(rateCommitment: UInt256, index: UInt32) {.event.}
# this function denotes existence of a given user
- proc memberExists(idCommitment: Uint256): UInt256 {.view.}
+ proc memberExists(idCommitment: UInt256): UInt256 {.view.}
# this constant describes the next index of a new member
proc commitmentIndex(): UInt256 {.view.}
# this constant describes the block number this contract was deployed on
proc deployedBlockNumber(): UInt256 {.view.}
# this constant describes max message limit of rln contract
proc MAX_MESSAGE_LIMIT(): UInt256 {.view.}
+ # this function returns the merkleProof for a given index
+ # proc merkleProofElements(index: UInt40): seq[byte] {.view.}
+ # this function returns the merkle root
+ proc root(): UInt256 {.view.}
type
WakuRlnContractWithSender = Sender[WakuRlnContract]
OnchainGroupManager* = ref object of GroupManager
- ethClientUrl*: string
+ ethClientUrls*: seq[string]
ethPrivateKey*: Option[string]
ethContractAddress*: string
ethRpc*: Option[Web3]
- rlnContractDeployedBlockNumber*: BlockNumber
wakuRlnContract*: Option[WakuRlnContractWithSender]
- latestProcessedBlock*: BlockNumber
registrationTxHash*: Option[TxHash]
chainId*: uint
keystorePath*: Option[string]
keystorePassword*: Option[string]
registrationHandler*: Option[RegistrationHandler]
- # this buffer exists to backfill appropriate roots for the merkle tree,
- # in event of a reorg. we store 5 in the buffer. Maybe need to revisit this,
- # because the average reorg depth is 1 to 2 blocks.
- validRootBuffer*: Deque[MerkleNode]
- # interval loop to shut down gracefully
- blockFetchingActive*: bool
-
-const DefaultKeyStorePath* = "rlnKeystore.json"
-const DefaultKeyStorePassword* = "password"
-
-const DefaultBlockPollRate* = 6.seconds
-
-template initializedGuard(g: OnchainGroupManager): untyped =
- if not g.initialized:
- raise newException(CatchableError, "OnchainGroupManager is not initialized")
-
-proc resultifiedInitGuard(g: OnchainGroupManager): GroupManagerResult[void] =
- try:
- initializedGuard(g)
- return ok()
- except CatchableError:
- return err("OnchainGroupManager is not initialized")
-
-template retryWrapper(
- g: OnchainGroupManager, res: auto, errStr: string, body: untyped
-): auto =
- retryWrapper(res, RetryStrategy.new(), errStr, g.onFatalErrorAction):
- body
+ latestProcessedBlock*: BlockNumber
+ merkleProofCache*: seq[byte]
proc setMetadata*(
g: OnchainGroupManager, lastProcessedBlock = none(BlockNumber)
@@ -112,33 +87,109 @@ proc setMetadata*(
return err("failed to persist rln metadata: " & getCurrentExceptionMsg())
return ok()
-method atomicBatch*(
- g: OnchainGroupManager,
- start: MembershipIndex,
- rateCommitments = newSeq[RawRateCommitment](),
- toRemoveIndices = newSeq[MembershipIndex](),
-): Future[void] {.async: (raises: [Exception]), base.} =
- initializedGuard(g)
+proc fetchMerkleProofElements*(
+ g: OnchainGroupManager
+): Future[Result[seq[byte], string]] {.async.} =
+ try:
+ let membershipIndex = g.membershipIndex.get()
+ let index40 = stuint(membershipIndex, 40)
- waku_rln_membership_insertion_duration_seconds.nanosecondTime:
- let operationSuccess =
- g.rlnInstance.atomicWrite(some(start), rateCommitments, toRemoveIndices)
- if not operationSuccess:
- raise newException(CatchableError, "atomic batch operation failed")
- # TODO: when slashing is enabled, we need to track slashed members
- waku_rln_number_registered_memberships.set(int64(g.rlnInstance.leavesSet()))
+ let methodSig = "merkleProofElements(uint40)"
+ let methodIdDigest = keccak.keccak256.digest(methodSig)
+ let methodId = methodIdDigest.data[0 .. 3]
- if g.registerCb.isSome():
- var membersSeq = newSeq[Membership]()
- for i in 0 ..< rateCommitments.len:
- var index = start + MembershipIndex(i)
- debug "registering member to callback",
- rateCommitment = rateCommitments[i], index = index
- let member = Membership(rateCommitment: rateCommitments[i], index: index)
- membersSeq.add(member)
- await g.registerCb.get()(membersSeq)
+ var paddedParam = newSeq[byte](32)
+ let indexBytes = index40.toBytesBE()
+ for i in 0 ..< min(indexBytes.len, paddedParam.len):
+ paddedParam[paddedParam.len - indexBytes.len + i] = indexBytes[i]
- g.validRootBuffer = g.slideRootQueue()
+ var callData = newSeq[byte]()
+ for b in methodId:
+ callData.add(b)
+ callData.add(paddedParam)
+
+ var tx: TransactionArgs
+ tx.to = Opt.some(fromHex(Address, g.ethContractAddress))
+ tx.data = Opt.some(callData)
+
+ let responseBytes = await g.ethRpc.get().provider.eth_call(tx, "latest")
+
+ return ok(responseBytes)
+ except CatchableError:
+ error "Failed to fetch Merkle proof elements", error = getCurrentExceptionMsg()
+ return err("Failed to fetch merkle proof elements: " & getCurrentExceptionMsg())
+
+proc fetchMerkleRoot*(
+ g: OnchainGroupManager
+): Future[Result[UInt256, string]] {.async.} =
+ try:
+ let merkleRootInvocation = g.wakuRlnContract.get().root()
+ let merkleRoot = await merkleRootInvocation.call()
+ return ok(merkleRoot)
+ except CatchableError:
+ error "Failed to fetch Merkle root", error = getCurrentExceptionMsg()
+ return err("Failed to fetch merkle root: " & getCurrentExceptionMsg())
+
+template initializedGuard(g: OnchainGroupManager): untyped =
+ if not g.initialized:
+ raise newException(CatchableError, "OnchainGroupManager is not initialized")
+
+template retryWrapper(
+ g: OnchainGroupManager, res: auto, errStr: string, body: untyped
+): auto =
+ retryWrapper(res, RetryStrategy.new(), errStr, g.onFatalErrorAction):
+ body
+
+method validateRoot*(g: OnchainGroupManager, root: MerkleNode): bool =
+ if g.validRoots.find(root) >= 0:
+ return true
+ return false
+
+proc updateRoots*(g: OnchainGroupManager): Future[bool] {.async.} =
+ let rootRes = await g.fetchMerkleRoot()
+ if rootRes.isErr():
+ return false
+
+ let merkleRoot = UInt256ToField(rootRes.get())
+ if g.validRoots.len == 0:
+ g.validRoots.addLast(merkleRoot)
+ return true
+
+ if g.validRoots[g.validRoots.len - 1] != merkleRoot:
+ if g.validRoots.len > AcceptableRootWindowSize:
+ discard g.validRoots.popFirst()
+ g.validRoots.addLast(merkleRoot)
+ return true
+
+ return false
+
+proc trackRootChanges*(g: OnchainGroupManager) {.async: (raises: [CatchableError]).} =
+ try:
+ initializedGuard(g)
+ let ethRpc = g.ethRpc.get()
+ let wakuRlnContract = g.wakuRlnContract.get()
+
+ const rpcDelay = 5.seconds
+
+ while true:
+ let rootUpdated = await g.updateRoots()
+
+ if rootUpdated:
+ if g.membershipIndex.isNone():
+ error "membershipIndex is not set; skipping proof update"
+ else:
+ let proofResult = await g.fetchMerkleProofElements()
+ if proofResult.isErr():
+ error "Failed to fetch Merkle proof", error = proofResult.error
+ g.merkleProofCache = proofResult.get()
+
+ # also need update registerd membership
+ let memberCount = cast[int64](await wakuRlnContract.commitmentIndex().call())
+ waku_rln_number_registered_memberships.set(float64(memberCount))
+
+ await sleepAsync(rpcDelay)
+ except CatchableError:
+ error "Fatal error in trackRootChanges", error = getCurrentExceptionMsg()
method register*(
g: OnchainGroupManager, rateCommitment: RateCommitment
@@ -147,18 +198,14 @@ method register*(
try:
let leaf = rateCommitment.toLeaf().get()
- await g.registerBatch(@[leaf])
+ if g.registerCb.isSome():
+ let idx = g.latestIndex
+ debug "registering member via callback", rateCommitment = leaf, index = idx
+ await g.registerCb.get()(@[Membership(rateCommitment: leaf, index: idx)])
+ g.latestIndex.inc()
except CatchableError:
raise newException(ValueError, getCurrentExceptionMsg())
-method registerBatch*(
- g: OnchainGroupManager, rateCommitments: seq[RawRateCommitment]
-): Future[void] {.async: (raises: [Exception]).} =
- initializedGuard(g)
-
- await g.atomicBatch(g.latestIndex, rateCommitments)
- g.latestIndex += MembershipIndex(rateCommitments.len)
-
method register*(
g: OnchainGroupManager,
identityCredential: IdentityCredential,
@@ -212,8 +259,19 @@ method register*(
debug "parsed membershipIndex", membershipIndex
g.userMessageLimit = some(userMessageLimit)
g.membershipIndex = some(membershipIndex.toMembershipIndex())
+ g.idCredentials = some(identityCredential)
+
+ let rateCommitment = RateCommitment(
+ idCommitment: identityCredential.idCommitment, userMessageLimit: userMessageLimit
+ )
+ .toLeaf()
+ .get()
+
+ if g.registerCb.isSome():
+ let member = Membership(rateCommitment: rateCommitment, index: g.latestIndex)
+ await g.registerCb.get()(@[member])
+ g.latestIndex.inc()
- # don't handle member insertion into the tree here, it will be handled by the event listener
return
method withdraw*(
@@ -226,304 +284,170 @@ method withdrawBatch*(
): Future[void] {.async: (raises: [Exception]).} =
initializedGuard(g)
- # TODO: after slashing is enabled on the contract, use atomicBatch internally
+proc getRootFromProofAndIndex(
+ g: OnchainGroupManager, elements: seq[byte], bits: seq[byte]
+): GroupManagerResult[array[32, byte]] =
+ # this is a helper function to get root from merkle proof elements and index
+ # it's currently not used anywhere, but can be used to verify the root from the proof and index
+ # Compute leaf hash from idCommitment and messageLimit
+ let messageLimitField = uint64ToField(g.userMessageLimit.get())
+ let leafHashRes = poseidon(@[g.idCredentials.get().idCommitment, @messageLimitField])
+ if leafHashRes.isErr():
+ return err("Failed to compute leaf hash: " & leafHashRes.error)
-proc parseEvent(
- event: type MemberRegistered, log: JsonNode
-): GroupManagerResult[Membership] =
- ## parses the `data` parameter of the `MemberRegistered` event `log`
- ## returns an error if it cannot parse the `data` parameter
- var rateCommitment: UInt256
- var index: UInt256
- var data: seq[byte]
- try:
- data = hexToSeqByte(log["data"].getStr())
- except ValueError:
- return err(
- "failed to parse the data field of the MemberRegistered event: " &
- getCurrentExceptionMsg()
- )
- var offset = 0
- try:
- # Parse the rateCommitment
- offset += decode(data, 0, offset, rateCommitment)
- # Parse the index
- offset += decode(data, 0, offset, index)
- return ok(
- Membership(
- rateCommitment: rateCommitment.toRateCommitment(),
- index: index.toMembershipIndex(),
- )
- )
- except CatchableError:
- return err("failed to parse the data field of the MemberRegistered event")
+ var hash = leafHashRes.get()
+ for i in 0 ..< bits.len:
+ let sibling = elements[i * 32 .. (i + 1) * 32 - 1]
-type BlockTable* = OrderedTable[BlockNumber, seq[(Membership, bool)]]
+ let hashRes =
+ if bits[i] == 0:
+ poseidon(@[@hash, sibling])
+ else:
+ poseidon(@[sibling, @hash])
-proc backfillRootQueue*(
- g: OnchainGroupManager, len: uint
-): Future[void] {.async: (raises: [Exception]).} =
- if len > 0:
- # backfill the tree's acceptable roots
- for i in 0 .. len - 1:
- # remove the last root
- g.validRoots.popLast()
- for i in 0 .. len - 1:
- # add the backfilled root
- g.validRoots.addLast(g.validRootBuffer.popLast())
+ hash = hashRes.valueOr:
+ return err("Failed to compute poseidon hash: " & error)
+ hash = hashRes.get()
-proc insert(
- blockTable: var BlockTable,
- blockNumber: BlockNumber,
- member: Membership,
- removed: bool,
-) =
- let memberTuple = (member, removed)
- if blockTable.hasKeyOrPut(blockNumber, @[memberTuple]):
- try:
- blockTable[blockNumber].add(memberTuple)
- except KeyError: # qed
- error "could not insert member into block table",
- blockNumber = blockNumber, member = member
+ return ok(hash)
-proc getRawEvents(
- g: OnchainGroupManager, fromBlock: BlockNumber, toBlock: BlockNumber
-): Future[JsonNode] {.async: (raises: [Exception]).} =
- initializedGuard(g)
+method generateProof*(
+ g: OnchainGroupManager,
+ data: seq[byte],
+ epoch: Epoch,
+ messageId: MessageId,
+ rlnIdentifier = DefaultRlnIdentifier,
+): GroupManagerResult[RateLimitProof] {.gcsafe, raises: [].} =
+ ## Generates an RLN proof using the cached Merkle proof and custom witness
+ # Ensure identity credentials and membership index are set
+ if g.idCredentials.isNone():
+ return err("identity credentials are not set")
+ if g.membershipIndex.isNone():
+ return err("membership index is not set")
+ if g.userMessageLimit.isNone():
+ return err("user message limit is not set")
- let ethRpc = g.ethRpc.get()
- let wakuRlnContract = g.wakuRlnContract.get()
+ if (g.merkleProofCache.len mod 32) != 0:
+ return err("Invalid merkle proof cache length")
- var eventStrs: seq[JsonString]
- g.retryWrapper(eventStrs, "Failed to get the events"):
- await wakuRlnContract.getJsonLogs(
- MemberRegistered,
- fromBlock = Opt.some(fromBlock.blockId()),
- toBlock = Opt.some(toBlock.blockId()),
- )
+ let identity_secret = seqToField(g.idCredentials.get().idSecretHash)
+ let user_message_limit = uint64ToField(g.userMessageLimit.get())
+ let message_id = uint64ToField(messageId)
+ var path_elements = newSeq[byte](0)
- var events = newJArray()
- for eventStr in eventStrs:
- events.add(parseJson(eventStr.string))
- return events
+ let identity_path_index = uint64ToIndex(g.membershipIndex.get(), 20)
+ for i in 0 ..< g.merkleProofCache.len div 32:
+ let chunk = g.merkleProofCache[i * 32 .. (i + 1) * 32 - 1]
+ path_elements.add(chunk.reversed())
-proc getBlockTable(
- g: OnchainGroupManager, fromBlock: BlockNumber, toBlock: BlockNumber
-): Future[BlockTable] {.async: (raises: [Exception]).} =
- initializedGuard(g)
+ let x = keccak.keccak256.digest(data)
- var blockTable = default(BlockTable)
+ let extNullifier = poseidon(@[@(epoch), @(rlnIdentifier)]).valueOr:
+ return err("Failed to compute external nullifier: " & error)
- let events = await g.getRawEvents(fromBlock, toBlock)
+ let witness = RLNWitnessInput(
+ identity_secret: identity_secret,
+ user_message_limit: user_message_limit,
+ message_id: message_id,
+ path_elements: path_elements,
+ identity_path_index: identity_path_index,
+ x: x,
+ external_nullifier: extNullifier,
+ )
- if events.len == 0:
- trace "no events found"
- return blockTable
+ let serializedWitness = serialize(witness)
- for event in events:
- let blockNumber = parseHexInt(event["blockNumber"].getStr()).BlockNumber
- let removed = event["removed"].getBool()
- let parsedEventRes = parseEvent(MemberRegistered, event)
- if parsedEventRes.isErr():
- error "failed to parse the MemberRegistered event", error = parsedEventRes.error()
- raise newException(ValueError, "failed to parse the MemberRegistered event")
- let parsedEvent = parsedEventRes.get()
- blockTable.insert(blockNumber, parsedEvent, removed)
+ var input_witness_buffer = toBuffer(serializedWitness)
- return blockTable
+ # Generate the proof using the zerokit API
+ var output_witness_buffer: Buffer
+ let witness_success = generate_proof_with_witness(
+ g.rlnInstance, addr input_witness_buffer, addr output_witness_buffer
+ )
-proc handleEvents(
- g: OnchainGroupManager, blockTable: BlockTable
-): Future[void] {.async: (raises: [Exception]).} =
- initializedGuard(g)
+ if not witness_success:
+ return err("Failed to generate proof")
- for blockNumber, members in blockTable.pairs():
- try:
- let startIndex = blockTable[blockNumber].filterIt(not it[1])[0][0].index
- let removalIndices = members.filterIt(it[1]).mapIt(it[0].index)
- let rateCommitments = members.mapIt(it[0].rateCommitment)
- await g.atomicBatch(
- start = startIndex,
- rateCommitments = rateCommitments,
- toRemoveIndices = removalIndices,
- )
- g.latestIndex = startIndex + MembershipIndex(rateCommitments.len)
- trace "new members added to the Merkle tree",
- commitments = rateCommitments.mapIt(it.inHex)
- except CatchableError:
- error "failed to insert members into the tree", error = getCurrentExceptionMsg()
- raise newException(ValueError, "failed to insert members into the tree")
+ # Parse the proof into a RateLimitProof object
+ var proofValue = cast[ptr array[320, byte]](output_witness_buffer.`ptr`)
+ let proofBytes: array[320, byte] = proofValue[]
- return
+ ## parse the proof as [ proof<128> | root<32> | external_nullifier<32> | share_x<32> | share_y<32> | nullifier<32> ]
+ let
+ proofOffset = 128
+ rootOffset = proofOffset + 32
+ externalNullifierOffset = rootOffset + 32
+ shareXOffset = externalNullifierOffset + 32
+ shareYOffset = shareXOffset + 32
+ nullifierOffset = shareYOffset + 32
-proc handleRemovedEvents(
- g: OnchainGroupManager, blockTable: BlockTable
-): Future[void] {.async: (raises: [Exception]).} =
- initializedGuard(g)
+ var
+ zkproof: ZKSNARK
+ proofRoot, shareX, shareY: MerkleNode
+ externalNullifier: ExternalNullifier
+ nullifier: Nullifier
- # count number of blocks that have been removed
- var numRemovedBlocks: uint = 0
- for blockNumber, members in blockTable.pairs():
- if members.anyIt(it[1]):
- numRemovedBlocks += 1
+ discard zkproof.copyFrom(proofBytes[0 .. proofOffset - 1])
+ discard proofRoot.copyFrom(proofBytes[proofOffset .. rootOffset - 1])
+ discard
+ externalNullifier.copyFrom(proofBytes[rootOffset .. externalNullifierOffset - 1])
+ discard shareX.copyFrom(proofBytes[externalNullifierOffset .. shareXOffset - 1])
+ discard shareY.copyFrom(proofBytes[shareXOffset .. shareYOffset - 1])
+ discard nullifier.copyFrom(proofBytes[shareYOffset .. nullifierOffset - 1])
- await g.backfillRootQueue(numRemovedBlocks)
+ # Create the RateLimitProof object
+ let output = RateLimitProof(
+ proof: zkproof,
+ merkleRoot: proofRoot,
+ externalNullifier: externalNullifier,
+ epoch: epoch,
+ rlnIdentifier: rlnIdentifier,
+ shareX: shareX,
+ shareY: shareY,
+ nullifier: nullifier,
+ )
-proc getAndHandleEvents(
- g: OnchainGroupManager, fromBlock: BlockNumber, toBlock: BlockNumber
-): Future[bool] {.async: (raises: [Exception]).} =
- initializedGuard(g)
- let blockTable = await g.getBlockTable(fromBlock, toBlock)
- try:
- await g.handleEvents(blockTable)
- await g.handleRemovedEvents(blockTable)
- except CatchableError:
- error "failed to handle events", error = getCurrentExceptionMsg()
- raise newException(ValueError, "failed to handle events")
+ debug "Proof generated successfully"
- g.latestProcessedBlock = toBlock
- return true
+ waku_rln_remaining_proofs_per_epoch.dec()
+ waku_rln_total_generated_proofs.inc()
+ return ok(output)
-proc runInInterval(g: OnchainGroupManager, cb: proc, interval: Duration) =
- g.blockFetchingActive = false
+method verifyProof*(
+ g: OnchainGroupManager, # verifier context
+ input: seq[byte], # raw message data (signal)
+ proof: RateLimitProof, # proof received from the peer
+): GroupManagerResult[bool] {.gcsafe, raises: [].} =
+ ## -- Verifies an RLN rate-limit proof against the set of valid Merkle roots --
- proc runIntervalLoop() {.async, gcsafe.} =
- g.blockFetchingActive = true
+ var normalizedProof = proof
- while g.blockFetchingActive:
- var retCb: bool
- g.retryWrapper(retCb, "Failed to run the interval block fetching loop"):
- await cb()
- await sleepAsync(interval)
+ normalizedProof.externalNullifier = poseidon(
+ @[@(proof.epoch), @(proof.rlnIdentifier)]
+ ).valueOr:
+ return err("Failed to compute external nullifier: " & error)
- # using asyncSpawn is OK here since
- # we make use of the error handling provided by
- # OnFatalErrorHandler
- asyncSpawn runIntervalLoop()
+ let proofBytes = serialize(normalizedProof, input)
+ let proofBuffer = proofBytes.toBuffer()
-proc getNewBlockCallback(g: OnchainGroupManager): proc =
- let ethRpc = g.ethRpc.get()
- proc wrappedCb(): Future[bool] {.async, gcsafe.} =
- var latestBlock: BlockNumber
- g.retryWrapper(latestBlock, "Failed to get the latest block number"):
- cast[BlockNumber](await ethRpc.provider.eth_blockNumber())
+ let rootsBytes = serialize(g.validRoots.items().toSeq())
+ let rootsBuffer = rootsBytes.toBuffer()
- if latestBlock <= g.latestProcessedBlock:
- return
- # get logs from the last block
- # inc by 1 to prevent double processing
- let fromBlock = g.latestProcessedBlock + 1
- var handleBlockRes: bool
- g.retryWrapper(handleBlockRes, "Failed to handle new block"):
- await g.getAndHandleEvents(fromBlock, latestBlock)
+ var validProof: bool # out-param
+ let ffiOk = verify_with_roots(
+ g.rlnInstance, # RLN context created at init()
+ addr proofBuffer, # (proof + signal)
+ addr rootsBuffer, # valid Merkle roots
+ addr validProof # will be set by the FFI call
+ ,
+ )
- # cannot use isOkOr here because results in a compile-time error that
- # shows the error is void for some reason
- let setMetadataRes = g.setMetadata()
- if setMetadataRes.isErr():
- error "failed to persist rln metadata", error = setMetadataRes.error
+ if not ffiOk:
+ return err("could not verify the proof")
+ else:
+ trace "Proof verified successfully !"
- return handleBlockRes
-
- return wrappedCb
-
-proc startListeningToEvents(
- g: OnchainGroupManager
-): Future[void] {.async: (raises: [Exception]).} =
- initializedGuard(g)
-
- let ethRpc = g.ethRpc.get()
- let newBlockCallback = g.getNewBlockCallback()
- g.runInInterval(newBlockCallback, DefaultBlockPollRate)
-
-proc batchAwaitBlockHandlingFuture(
- g: OnchainGroupManager, futs: seq[Future[bool]]
-): Future[void] {.async: (raises: [Exception]).} =
- for fut in futs:
- try:
- var handleBlockRes: bool
- g.retryWrapper(handleBlockRes, "Failed to handle block"):
- await fut
- except CatchableError:
- raise newException(
- CatchableError, "could not fetch events from block: " & getCurrentExceptionMsg()
- )
-
-proc startOnchainSync(
- g: OnchainGroupManager
-): Future[void] {.async: (raises: [Exception]).} =
- initializedGuard(g)
-
- let ethRpc = g.ethRpc.get()
-
- # static block chunk size
- let blockChunkSize = 2_000.BlockNumber
- # delay between rpc calls to not overload the rate limit
- let rpcDelay = 200.milliseconds
- # max number of futures to run concurrently
- let maxFutures = 10
-
- var fromBlock: BlockNumber =
- if g.latestProcessedBlock > g.rlnContractDeployedBlockNumber:
- info "syncing from last processed block", blockNumber = g.latestProcessedBlock
- g.latestProcessedBlock + 1
- else:
- info "syncing from rln contract deployed block",
- blockNumber = g.rlnContractDeployedBlockNumber
- g.rlnContractDeployedBlockNumber
-
- var futs = newSeq[Future[bool]]()
- var currentLatestBlock: BlockNumber
- g.retryWrapper(currentLatestBlock, "Failed to get the latest block number"):
- cast[BlockNumber](await ethRpc.provider.eth_blockNumber())
-
- try:
- # we always want to sync from last processed block => latest
- # chunk events
- while true:
- # if the fromBlock is less than 2k blocks behind the current block
- # then fetch the new toBlock
- if fromBlock >= currentLatestBlock:
- break
-
- if fromBlock + blockChunkSize > currentLatestBlock:
- g.retryWrapper(currentLatestBlock, "Failed to get the latest block number"):
- cast[BlockNumber](await ethRpc.provider.eth_blockNumber())
-
- let toBlock = min(fromBlock + blockChunkSize, currentLatestBlock)
- debug "fetching events", fromBlock = fromBlock, toBlock = toBlock
- await sleepAsync(rpcDelay)
- futs.add(g.getAndHandleEvents(fromBlock, toBlock))
- if futs.len >= maxFutures or toBlock == currentLatestBlock:
- await g.batchAwaitBlockHandlingFuture(futs)
- g.setMetadata(lastProcessedBlock = some(toBlock)).isOkOr:
- error "failed to persist rln metadata", error = $error
- futs = newSeq[Future[bool]]()
- fromBlock = toBlock + 1
- except CatchableError:
- raise newException(
- CatchableError,
- "failed to get the history/reconcile missed blocks: " & getCurrentExceptionMsg(),
- )
-
- # listen to blockheaders and contract events
- try:
- await g.startListeningToEvents()
- except CatchableError:
- raise newException(
- ValueError, "failed to start listening to events: " & getCurrentExceptionMsg()
- )
-
-method startGroupSync*(
- g: OnchainGroupManager
-): Future[GroupManagerResult[void]] {.async.} =
- ?resultifiedInitGuard(g)
- # Get archive history
- try:
- await startOnchainSync(g)
- return ok()
- except CatchableError, Exception:
- return err("failed to start group sync: " & getCurrentExceptionMsg())
+ return ok(validProof)
method onRegister*(g: OnchainGroupManager, cb: OnRegisterCallback) {.gcsafe.} =
g.registerCb = some(cb)
@@ -531,11 +455,35 @@ method onRegister*(g: OnchainGroupManager, cb: OnRegisterCallback) {.gcsafe.} =
method onWithdraw*(g: OnchainGroupManager, cb: OnWithdrawCallback) {.gcsafe.} =
g.withdrawCb = some(cb)
+proc establishConnection(
+ g: OnchainGroupManager
+): Future[GroupManagerResult[Web3]] {.async.} =
+ var ethRpc: Web3
+
+ g.retryWrapper(ethRpc, "Failed to connect to the Ethereum client"):
+ var innerEthRpc: Web3
+ var connected = false
+ for clientUrl in g.ethClientUrls:
+ ## We give a chance to the user to provide multiple clients
+ ## and we try to connect to each of them
+ try:
+ innerEthRpc = await newWeb3(clientUrl)
+ connected = true
+ break
+ except CatchableError:
+ error "failed connect Eth client", error = getCurrentExceptionMsg()
+
+ if not connected:
+ raise newException(CatchableError, "all failed")
+
+ innerEthRpc
+
+ return ok(ethRpc)
+
method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.} =
# check if the Ethereum client is reachable
- var ethRpc: Web3
- g.retryWrapper(ethRpc, "Failed to connect to the Ethereum client"):
- await newWeb3(g.ethClientUrl)
+ let ethRpc: Web3 = (await establishConnection(g)).valueOr:
+ return err("failed to connect to Ethereum clients: " & $error)
var fetchedChainId: uint
g.retryWrapper(fetchedChainId, "Failed to get the chain id"):
@@ -609,53 +557,29 @@ method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.}
let metadata = metadataGetOptRes.get().get()
if metadata.chainId != uint(g.chainId):
return err("persisted data: chain id mismatch")
-
if metadata.contractAddress != g.ethContractAddress.toLower():
return err("persisted data: contract address mismatch")
- g.latestProcessedBlock = metadata.lastProcessedBlock.BlockNumber
- g.validRoots = metadata.validRoots.toDeque()
- var deployedBlockNumber: Uint256
- g.retryWrapper(
- deployedBlockNumber,
- "Failed to get the deployed block number. Have you set the correct contract address?",
- ):
- await wakuRlnContract.deployedBlockNumber().call()
- debug "using rln contract", deployedBlockNumber, rlnContractAddress = contractAddress
- g.rlnContractDeployedBlockNumber = cast[BlockNumber](deployedBlockNumber)
- g.latestProcessedBlock = max(g.latestProcessedBlock, g.rlnContractDeployedBlockNumber)
g.rlnRelayMaxMessageLimit =
cast[uint64](await wakuRlnContract.MAX_MESSAGE_LIMIT().call())
proc onDisconnect() {.async.} =
error "Ethereum client disconnected"
- let fromBlock = max(g.latestProcessedBlock, g.rlnContractDeployedBlockNumber)
- info "reconnecting with the Ethereum client, and restarting group sync",
- fromBlock = fromBlock
- var newEthRpc: Web3
- g.retryWrapper(newEthRpc, "Failed to reconnect with the Ethereum client"):
- await newWeb3(g.ethClientUrl)
+
+ var newEthRpc: Web3 = (await g.establishConnection()).valueOr:
+ g.onFatalErrorAction("failed to connect to Ethereum clients onDisconnect")
+ return
+
newEthRpc.ondisconnect = ethRpc.ondisconnect
g.ethRpc = some(newEthRpc)
- try:
- await g.startOnchainSync()
- except CatchableError, Exception:
- g.onFatalErrorAction(
- "failed to restart group sync" & ": " & getCurrentExceptionMsg()
- )
-
ethRpc.ondisconnect = proc() =
asyncSpawn onDisconnect()
- waku_rln_number_registered_memberships.set(int64(g.rlnInstance.leavesSet()))
g.initialized = true
-
return ok()
method stop*(g: OnchainGroupManager): Future[void] {.async, gcsafe.} =
- g.blockFetchingActive = false
-
if g.ethRpc.isSome():
g.ethRpc.get().ondisconnect = nil
await g.ethRpc.get().close()
@@ -665,26 +589,13 @@ method stop*(g: OnchainGroupManager): Future[void] {.async, gcsafe.} =
g.initialized = false
-proc isSyncing*(g: OnchainGroupManager): Future[bool] {.async, gcsafe.} =
- let ethRpc = g.ethRpc.get()
-
- var syncing: SyncingStatus
- g.retryWrapper(syncing, "Failed to get the syncing status"):
- await ethRpc.provider.eth_syncing()
- return syncing.syncing
-
method isReady*(g: OnchainGroupManager): Future[bool] {.async.} =
initializedGuard(g)
if g.ethRpc.isNone():
return false
- var currentBlock: BlockNumber
- g.retryWrapper(currentBlock, "Failed to get the current block number"):
- cast[BlockNumber](await g.ethRpc.get().provider.eth_blockNumber())
-
- # the node is still able to process messages if it is behind the latest block by a factor of the valid roots
- if u256(g.latestProcessedBlock.uint64) < (u256(currentBlock) - u256(g.validRoots.len)):
+ if g.wakuRlnContract.isNone():
return false
- return not (await g.isSyncing())
+ return true
diff --git a/waku/waku_rln_relay/protocol_metrics.nim b/waku/waku_rln_relay/protocol_metrics.nim
index 121727809..2210328f4 100644
--- a/waku/waku_rln_relay/protocol_metrics.nim
+++ b/waku/waku_rln_relay/protocol_metrics.nim
@@ -85,6 +85,7 @@ proc getRlnMetricsLogger*(): RLNMetricsLogger =
var cumulativeProofsVerified = 0.float64
var cumulativeProofsGenerated = 0.float64
var cumulativeProofsRemaining = 100.float64
+ var cumulativeRegisteredMember = 0.float64
when defined(metrics):
logMetrics = proc() =
@@ -107,6 +108,9 @@ proc getRlnMetricsLogger*(): RLNMetricsLogger =
let freshProofsRemainingCount = parseAndAccumulate(
waku_rln_remaining_proofs_per_epoch, cumulativeProofsRemaining
)
+ let freshRegisteredMemberCount = parseAndAccumulate(
+ waku_rln_number_registered_memberships, cumulativeRegisteredMember
+ )
info "Total messages", count = freshMsgCount
info "Total spam messages", count = freshSpamCount
@@ -116,5 +120,6 @@ proc getRlnMetricsLogger*(): RLNMetricsLogger =
info "Total proofs verified", count = freshProofsVerifiedCount
info "Total proofs generated", count = freshProofsGeneratedCount
info "Total proofs remaining", count = freshProofsRemainingCount
+ info "Total registered members", count = freshRegisteredMemberCount
return logMetrics
diff --git a/waku/waku_rln_relay/protocol_types.nim b/waku/waku_rln_relay/protocol_types.nim
index 97b1c34ea..c6f52e00b 100644
--- a/waku/waku_rln_relay/protocol_types.nim
+++ b/waku/waku_rln_relay/protocol_types.nim
@@ -52,6 +52,20 @@ type RateLimitProof* = object
## the external nullifier used for the generation of the `proof` (derived from poseidon([epoch, rln_identifier]))
externalNullifier*: ExternalNullifier
+type UInt40* = StUint[40]
+type UInt32* = StUint[32]
+
+type
+ Field = array[32, byte] # Field element representation (256 bits)
+ RLNWitnessInput* = object
+ identity_secret*: Field
+ user_message_limit*: Field
+ message_id*: Field
+ path_elements*: seq[byte]
+ identity_path_index*: seq[byte]
+ x*: Field
+ external_nullifier*: Field
+
type ProofMetadata* = object
nullifier*: Nullifier
shareX*: MerkleNode
diff --git a/waku/waku_rln_relay/rln/rln_interface.nim b/waku/waku_rln_relay/rln/rln_interface.nim
index cc468b124..27b3bbee9 100644
--- a/waku/waku_rln_relay/rln/rln_interface.nim
+++ b/waku/waku_rln_relay/rln/rln_interface.nim
@@ -130,6 +130,21 @@ proc generate_proof*(
## integers wrapped in <> indicate value sizes in bytes
## the return bool value indicates the success or failure of the operation
+proc generate_proof_with_witness*(
+ ctx: ptr RLN, input_buffer: ptr Buffer, output_buffer: ptr Buffer
+): bool {.importc: "generate_rln_proof_with_witness".}
+
+## rln-v2
+## "witness" term refer to collection of secret inputs with proper serialization
+## input_buffer has to be serialized as [ identity_secret<32> | user_message_limit<32> | message_id<32> | path_elements> | identity_path_index> | x<32> | external_nullifier<32> ]
+## output_buffer holds the proof data and should be parsed as [ proof<128> | root<32> | external_nullifier<32> | share_x<32> | share_y<32> | nullifier<32> ]
+## rln-v1
+## input_buffer has to be serialized as [ id_key<32> | path_elements> | identity_path_index> | x<32> | epoch<32> | rln_identifier<32> ]
+## output_buffer holds the proof data and should be parsed as [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> ]
+## integers wrapped in <> indicate value sizes in bytes
+## path_elements and identity_path_index serialize a merkle proof and are vectors of elements of 32 and 1 bytes respectively
+## the return bool value indicates the success or failure of the operation
+
proc verify*(
ctx: ptr RLN, proof_buffer: ptr Buffer, proof_is_valid_ptr: ptr bool
): bool {.importc: "verify_rln_proof".}
diff --git a/waku/waku_rln_relay/rln_relay.nim b/waku/waku_rln_relay/rln_relay.nim
index c3f3903f9..5dae3bd51 100644
--- a/waku/waku_rln_relay/rln_relay.nim
+++ b/waku/waku_rln_relay/rln_relay.nim
@@ -32,18 +32,26 @@ import
logScope:
topics = "waku rln_relay"
-type WakuRlnConfig* = object
- rlnRelayDynamic*: bool
- rlnRelayCredIndex*: Option[uint]
- rlnRelayEthContractAddress*: string
- rlnRelayEthClientAddress*: string
- rlnRelayChainId*: uint
- rlnRelayCredPath*: string
- rlnRelayCredPassword*: string
- rlnRelayTreePath*: string
- rlnEpochSizeSec*: uint64
+type RlnRelayCreds* {.requiresInit.} = object
+ path*: string
+ password*: string
+
+type RlnRelayConf* = object of RootObj
+ # TODO: severals parameters are only needed when it's dynamic
+ # change the config to either nest or use enum/type variant so it's obvious
+ # and then it can be set to `requiresInit`
+ dynamic*: bool
+ credIndex*: Option[uint]
+ ethContractAddress*: string
+ ethClientUrls*: seq[string]
+ chainId*: uint
+ creds*: Option[RlnRelayCreds]
+ treePath*: string
+ epochSizeSec*: uint64
+ userMessageLimit*: uint64
+
+type WakuRlnConfig* = object of RlnRelayConf
onFatalErrorAction*: OnFatalErrorHandler
- rlnRelayUserMessageLimit*: uint64
proc createMembershipList*(
rln: ptr RLN, n: int
@@ -90,6 +98,7 @@ type WakuRLNRelay* = ref object of RootObj
onFatalErrorAction*: OnFatalErrorHandler
nonceManager*: NonceManager
epochMonitorFuture*: Future[void]
+ rootChangesFuture*: Future[void]
proc calcEpoch*(rlnPeer: WakuRLNRelay, t: float64): Epoch =
## gets time `t` as `flaot64` with subseconds resolution in the fractional part
@@ -184,7 +193,7 @@ proc absDiff*(e1, e2: Epoch): uint64 =
return epoch2 - epoch1
proc validateMessage*(
- rlnPeer: WakuRLNRelay, msg: WakuMessage, timeOption = none(float64)
+ rlnPeer: WakuRLNRelay, msg: WakuMessage
): MessageValidationResult =
## validate the supplied `msg` based on the waku-rln-relay routing protocol i.e.,
## the `msg`'s epoch is within MaxEpochGap of the current epoch
@@ -204,12 +213,8 @@ proc validateMessage*(
# checks if the `msg`'s epoch is far from the current epoch
# it corresponds to the validation of rln external nullifier
- var epoch: Epoch
- if timeOption.isSome():
- epoch = rlnPeer.calcEpoch(timeOption.get())
- else:
- # get current rln epoch
- epoch = rlnPeer.getCurrentEpoch()
+ # get current rln epoch
+ let epoch: Epoch = rlnPeer.getCurrentEpoch()
let
msgEpoch = proof.epoch
@@ -248,6 +253,7 @@ proc validateMessage*(
waku_rln_errors_total.inc(labelValues = ["proof_verification"])
warn "invalid message: proof verification failed", payloadLen = msg.payload.len
return MessageValidationResult.Invalid
+
if not proofVerificationRes.value():
# invalid proof
warn "invalid message: invalid proof", payloadLen = msg.payload.len
@@ -273,12 +279,12 @@ proc validateMessage*(
return MessageValidationResult.Valid
proc validateMessageAndUpdateLog*(
- rlnPeer: WakuRLNRelay, msg: WakuMessage, timeOption = none(float64)
+ rlnPeer: WakuRLNRelay, msg: WakuMessage
): MessageValidationResult =
## validates the message and updates the log to prevent double messaging
## in future messages
- let isValidMessage = rlnPeer.validateMessage(msg, timeOption)
+ let isValidMessage = rlnPeer.validateMessage(msg)
let decodeRes = RateLimitProof.init(msg.proof)
if decodeRes.isErr():
@@ -408,9 +414,12 @@ proc generateRlnValidator*(
proc monitorEpochs(wakuRlnRelay: WakuRLNRelay) {.async.} =
while true:
try:
- waku_rln_remaining_proofs_per_epoch.set(
- wakuRlnRelay.groupManager.userMessageLimit.get().float64
- )
+ if wakuRlnRelay.groupManager.userMessageLimit.isSome():
+ waku_rln_remaining_proofs_per_epoch.set(
+ wakuRlnRelay.groupManager.userMessageLimit.get().float64
+ )
+ else:
+ error "userMessageLimit is not set in monitorEpochs"
except CatchableError:
error "Error in epoch monitoring", error = getCurrentExceptionMsg()
@@ -425,10 +434,10 @@ proc mount(
groupManager: GroupManager
wakuRlnRelay: WakuRLNRelay
# create an RLN instance
- let rlnInstance = createRLNInstance(tree_path = conf.rlnRelayTreePath).valueOr:
+ let rlnInstance = createRLNInstance(tree_path = conf.treePath).valueOr:
return err("could not create RLN instance: " & $error)
- if not conf.rlnRelayDynamic:
+ if not conf.dynamic:
# static setup
let parsedGroupKeys = StaticGroupKeys.toIdentityCredentials().valueOr:
return err("could not parse static group keys: " & $error)
@@ -436,50 +445,48 @@ proc mount(
groupManager = StaticGroupManager(
groupSize: StaticGroupSize,
groupKeys: parsedGroupKeys,
- membershipIndex: conf.rlnRelayCredIndex,
+ membershipIndex: conf.credIndex,
rlnInstance: rlnInstance,
onFatalErrorAction: conf.onFatalErrorAction,
)
# we don't persist credentials in static mode since they exist in ./constants.nim
else:
- # dynamic setup
- proc useValueOrNone(s: string): Option[string] =
- if s == "":
- none(string)
+ let (rlnRelayCredPath, rlnRelayCredPassword) =
+ if conf.creds.isSome:
+ (some(conf.creds.get().path), some(conf.creds.get().password))
else:
- some(s)
+ (none(string), none(string))
- let
- rlnRelayCredPath = useValueOrNone(conf.rlnRelayCredPath)
- rlnRelayCredPassword = useValueOrNone(conf.rlnRelayCredPassword)
groupManager = OnchainGroupManager(
- ethClientUrl: string(conf.rlnRelayethClientAddress),
- ethContractAddress: $conf.rlnRelayEthContractAddress,
- chainId: conf.rlnRelayChainId,
+ userMessageLimit: some(conf.userMessageLimit),
+ ethClientUrls: conf.ethClientUrls,
+ ethContractAddress: $conf.ethContractAddress,
+ chainId: conf.chainId,
rlnInstance: rlnInstance,
registrationHandler: registrationHandler,
keystorePath: rlnRelayCredPath,
keystorePassword: rlnRelayCredPassword,
- membershipIndex: conf.rlnRelayCredIndex,
+ membershipIndex: conf.credIndex,
onFatalErrorAction: conf.onFatalErrorAction,
)
# Initialize the groupManager
(await groupManager.init()).isOkOr:
return err("could not initialize the group manager: " & $error)
- # Start the group sync
- (await groupManager.startGroupSync()).isOkOr:
- return err("could not start the group sync: " & $error)
wakuRlnRelay = WakuRLNRelay(
groupManager: groupManager,
- nonceManager:
- NonceManager.init(conf.rlnRelayUserMessageLimit, conf.rlnEpochSizeSec.float),
- rlnEpochSizeSec: conf.rlnEpochSizeSec,
- rlnMaxEpochGap: max(uint64(MaxClockGapSeconds / float64(conf.rlnEpochSizeSec)), 1),
+ nonceManager: NonceManager.init(conf.userMessageLimit, conf.epochSizeSec.float),
+ rlnEpochSizeSec: conf.epochSizeSec,
+ rlnMaxEpochGap: max(uint64(MaxClockGapSeconds / float64(conf.epochSizeSec)), 1),
onFatalErrorAction: conf.onFatalErrorAction,
)
+ # track root changes on smart contract merkle tree
+ if groupManager of OnchainGroupManager:
+ let onchainManager = cast[OnchainGroupManager](groupManager)
+ wakuRlnRelay.rootChangesFuture = onchainManager.trackRootChanges()
+
# Start epoch monitoring in the background
wakuRlnRelay.epochMonitorFuture = monitorEpochs(wakuRlnRelay)
return ok(wakuRlnRelay)
diff --git a/waku/waku_store_sync/protocols_metrics.nim b/waku/waku_store_sync/protocols_metrics.nim
index 2d2776674..53595f931 100644
--- a/waku/waku_store_sync/protocols_metrics.nim
+++ b/waku/waku_store_sync/protocols_metrics.nim
@@ -8,10 +8,17 @@ const
declarePublicHistogram reconciliation_roundtrips,
"the nubmer of roundtrips for each reconciliation",
- buckets = [0.0, 1.0, 2.0, 3.0, 5.0, 10.0, Inf]
+ buckets = [1.0, 2.0, 3.0, 5.0, 8.0, 13.0, Inf]
-declarePublicSummary total_bytes_exchanged,
+declarePublicHistogram reconciliation_differences,
+ "the nubmer of differences for each reconciliation",
+ buckets = [0.0, 10.0, 50.0, 100.0, 500.0, 1000.0, 5000.0, Inf]
+
+declarePublicCounter total_bytes_exchanged,
"the number of bytes sent and received by the protocols", ["protocol", "direction"]
declarePublicCounter total_transfer_messages_exchanged,
"the number of messages sent and received by the transfer protocol", ["direction"]
+
+declarePublicGauge total_messages_cached,
+ "the number of messages cached by the node after prunning"
diff --git a/waku/waku_store_sync/reconciliation.nim b/waku/waku_store_sync/reconciliation.nim
index 80c025140..d9912a3df 100644
--- a/waku/waku_store_sync/reconciliation.nim
+++ b/waku/waku_store_sync/reconciliation.nim
@@ -46,13 +46,10 @@ type SyncReconciliation* = ref object of LPProtocol
storage: SyncStorage
- # Receive IDs from transfer protocol for storage
+ # AsyncQueues are used as communication channels between
+ # reconciliation and transfer protocols.
idsRx: AsyncQueue[SyncID]
-
- # Send Hashes to transfer protocol for reception
- localWantsTx: AsyncQueue[(PeerId, WakuMessageHash)]
-
- # Send Hashes to transfer protocol for transmission
+ localWantsTx: AsyncQueue[PeerId]
remoteNeedsTx: AsyncQueue[(PeerId, WakuMessageHash)]
# params
@@ -96,19 +93,26 @@ proc messageIngress*(self: SyncReconciliation, id: SyncID) =
proc processRequest(
self: SyncReconciliation, conn: Connection
): Future[Result[void, string]] {.async.} =
- var roundTrips = 0
+ var
+ roundTrips = 0
+ diffs = 0
+
+ # Signal to transfer protocol that this reconciliation is starting
+ await self.localWantsTx.addLast(conn.peerId)
while true:
let readRes = catch:
await conn.readLp(int.high)
let buffer: seq[byte] = readRes.valueOr:
- return err("connection read error: " & error.msg)
+ await conn.close()
+ return err("remote " & $conn.peerId & " connection read error: " & error.msg)
- total_bytes_exchanged.observe(buffer.len, labelValues = [Reconciliation, Receiving])
+ total_bytes_exchanged.inc(buffer.len, labelValues = [Reconciliation, Receiving])
let recvPayload = RangesData.deltaDecode(buffer).valueOr:
- return err("payload decoding error: " & error)
+ await conn.close()
+ return err("remote " & $conn.peerId & " payload decoding error: " & error)
roundTrips.inc()
@@ -136,21 +140,22 @@ proc processRequest(
for hash in hashToSend:
self.remoteNeedsTx.addLastNoWait((conn.peerId, hash))
+ diffs.inc()
for hash in hashToRecv:
- self.localWantsTx.addLastNoWait((conn.peerId, hash))
+ diffs.inc()
rawPayload = sendPayload.deltaEncode()
- total_bytes_exchanged.observe(
- rawPayload.len, labelValues = [Reconciliation, Sending]
- )
+ total_bytes_exchanged.inc(rawPayload.len, labelValues = [Reconciliation, Sending])
let writeRes = catch:
await conn.writeLP(rawPayload)
if writeRes.isErr():
- return err("connection write error: " & writeRes.error.msg)
+ await conn.close()
+ return
+ err("remote " & $conn.peerId & " connection write error: " & writeRes.error.msg)
trace "sync payload sent",
local = self.peerManager.switch.peerInfo.peerId,
@@ -162,7 +167,11 @@ proc processRequest(
continue
+ # Signal to transfer protocol that this reconciliation is done
+ await self.localWantsTx.addLast(conn.peerId)
+
reconciliation_roundtrips.observe(roundTrips)
+ reconciliation_differences.observe(diffs)
await conn.close()
@@ -188,20 +197,21 @@ proc initiate(
let sendPayload = initPayload.deltaEncode()
- total_bytes_exchanged.observe(
- sendPayload.len, labelValues = [Reconciliation, Sending]
- )
+ total_bytes_exchanged.inc(sendPayload.len, labelValues = [Reconciliation, Sending])
let writeRes = catch:
await connection.writeLP(sendPayload)
if writeRes.isErr():
- return err("connection write error: " & writeRes.error.msg)
+ await connection.close()
+ return err(
+ "remote " & $connection.peerId & " connection write error: " & writeRes.error.msg
+ )
trace "sync payload sent",
local = self.peerManager.switch.peerInfo.peerId,
remote = connection.peerId,
- payload = sendPayload
+ payload = initPayload
?await self.processRequest(connection)
@@ -217,7 +227,7 @@ proc storeSynchronization*(
let connOpt = await self.peerManager.dialPeer(peer, WakuReconciliationCodec)
let conn: Connection = connOpt.valueOr:
- return err("cannot establish sync connection")
+ return err("fail to dial remote " & $peer.peerId)
debug "sync session initialized",
local = self.peerManager.switch.peerInfo.peerId, remote = conn.peerId
@@ -288,7 +298,7 @@ proc new*(
syncInterval: timer.Duration = DefaultSyncInterval,
relayJitter: timer.Duration = DefaultGossipSubJitter,
idsRx: AsyncQueue[SyncID],
- localWantsTx: AsyncQueue[(PeerId, WakuMessageHash)],
+ localWantsTx: AsyncQueue[PeerId],
remoteNeedsTx: AsyncQueue[(PeerId, WakuMessageHash)],
): Future[Result[T, string]] {.async.} =
let res = await initFillStorage(syncRange, wakuArchive)
@@ -354,6 +364,8 @@ proc periodicPrune(self: SyncReconciliation) {.async.} =
let count = self.storage.prune(time)
+ total_messages_cached.set(self.storage.length())
+
debug "periodic prune done", elements_pruned = count
proc idsReceiverLoop(self: SyncReconciliation) {.async.} =
diff --git a/waku/waku_store_sync/transfer.nim b/waku/waku_store_sync/transfer.nim
index 0ac959de0..c1e5d3e37 100644
--- a/waku/waku_store_sync/transfer.nim
+++ b/waku/waku_store_sync/transfer.nim
@@ -37,9 +37,9 @@ type SyncTransfer* = ref object of LPProtocol
idsTx: AsyncQueue[SyncID]
# Receive Hashes from reconciliation protocol for reception
- localWantsRx: AsyncQueue[(PeerId, WakuMessageHash)]
+ localWantsRx: AsyncQueue[PeerId]
localWantsRxFut: Future[void]
- inSessions: Table[PeerId, HashSet[WakuMessageHash]]
+ inSessions: HashSet[PeerId]
# Receive Hashes from reconciliation protocol for transmission
remoteNeedsRx: AsyncQueue[(PeerId, WakuMessageHash)]
@@ -51,13 +51,14 @@ proc sendMessage(
): Future[Result[void, string]] {.async.} =
let rawPayload = payload.encode().buffer
- total_bytes_exchanged.observe(rawPayload.len, labelValues = [Transfer, Sending])
+ total_bytes_exchanged.inc(rawPayload.len, labelValues = [Transfer, Sending])
let writeRes = catch:
await conn.writeLP(rawPayload)
if writeRes.isErr():
- return err("connection write error: " & writeRes.error.msg)
+ return
+ err("remote " & $conn.peerId & " connection write error: " & writeRes.error.msg)
total_transfer_messages_exchanged.inc(labelValues = [Sending])
@@ -69,7 +70,7 @@ proc openConnection(
let connOpt = await self.peerManager.dialPeer(peerId, WakuTransferCodec)
let conn: Connection = connOpt.valueOr:
- return err("Cannot establish transfer connection")
+ return err("fail to dial remote " & $peerId)
debug "transfer session initialized",
local = self.peerManager.switch.peerInfo.peerId, remote = conn.peerId
@@ -77,19 +78,14 @@ proc openConnection(
return ok(conn)
proc wantsReceiverLoop(self: SyncTransfer) {.async.} =
- ## Waits for message hashes,
- ## store the peers and hashes locally as
- ## "supposed to be received"
+ ## Waits for peer ids of nodes
+ ## we are reconciliating with
while true: # infinite loop
- let (peerId, fingerprint) = await self.localWantsRx.popFirst()
+ let peerId = await self.localWantsRx.popFirst()
- self.inSessions.withValue(peerId, value):
- value[].incl(fingerprint)
- do:
- var hashes = initHashSet[WakuMessageHash]()
- hashes.incl(fingerprint)
- self.inSessions[peerId] = hashes
+ if self.inSessions.containsOrIncl(peerId):
+ self.inSessions.excl(peerId)
return
@@ -126,6 +122,8 @@ proc needsReceiverLoop(self: SyncTransfer) {.async.} =
WakuMessageAndTopic(pubsub: response.topics[0], message: response.messages[0])
(await sendMessage(connection, msg)).isOkOr:
+ self.outSessions.del(peerId)
+ await connection.close()
error "failed to send message", error = error
continue
@@ -134,6 +132,10 @@ proc needsReceiverLoop(self: SyncTransfer) {.async.} =
proc initProtocolHandler(self: SyncTransfer) =
let handler = proc(conn: Connection, proto: string) {.async, closure.} =
while true:
+ if not self.inSessions.contains(conn.peerId):
+ error "unwanted peer, disconnecting", remote = conn.peerId
+ break
+
let readRes = catch:
await conn.readLp(int64(DefaultMaxWakuMessageSize))
@@ -141,7 +143,7 @@ proc initProtocolHandler(self: SyncTransfer) =
# connection closed normally
break
- total_bytes_exchanged.observe(buffer.len, labelValues = [Transfer, Receiving])
+ total_bytes_exchanged.inc(buffer.len, labelValues = [Transfer, Receiving])
let payload = WakuMessageAndTopic.decode(buffer).valueOr:
error "decoding error", error = $error
@@ -154,21 +156,10 @@ proc initProtocolHandler(self: SyncTransfer) =
let hash = computeMessageHash(pubsub, msg)
- self.inSessions.withValue(conn.peerId, value):
- if value[].missingOrExcl(hash):
- error "unwanted hash received, disconnecting"
- self.inSessions.del(conn.peerId)
- await conn.close()
- break
- do:
- error "unwanted hash received, disconnecting"
- self.inSessions.del(conn.peerId)
- await conn.close()
- break
-
#TODO verify msg RLN proof...
(await self.wakuArchive.syncMessageIngress(hash, pubsub, msg)).isOkOr:
+ error "failed to archive message", error = $error
continue
let id = SyncID(time: msg.timestamp, hash: hash)
@@ -176,6 +167,8 @@ proc initProtocolHandler(self: SyncTransfer) =
continue
+ await conn.close()
+
debug "transfer session ended",
local = self.peerManager.switch.peerInfo.peerId, remote = conn.peerId
@@ -189,7 +182,7 @@ proc new*(
peerManager: PeerManager,
wakuArchive: WakuArchive,
idsTx: AsyncQueue[SyncID],
- localWantsRx: AsyncQueue[(PeerId, WakuMessageHash)],
+ localWantsRx: AsyncQueue[PeerId],
remoteNeedsRx: AsyncQueue[(PeerId, WakuMessageHash)],
): T =
var transfer = SyncTransfer(