From 39e65dea28ad89309f4c3a74d2c205334ad9d2b5 Mon Sep 17 00:00:00 2001
From: Darshan K <35736874+darshankabariya@users.noreply.github.com>
Date: Mon, 26 May 2025 17:56:29 +0530
Subject: [PATCH 01/47] fix: timestamp based validation (#3406)
---
tests/waku_rln_relay/test_waku_rln_relay.nim | 85 +++++++++++++++----
.../test_wakunode_rln_relay.nim | 63 ++++++++++----
tests/wakunode_rest/test_rest_relay.nim | 4 +-
waku/waku_rln_relay/rln_relay.nim | 51 ++++++-----
4 files changed, 145 insertions(+), 58 deletions(-)
diff --git a/tests/waku_rln_relay/test_waku_rln_relay.nim b/tests/waku_rln_relay/test_waku_rln_relay.nim
index 907b7c1b3..e89d8e2e6 100644
--- a/tests/waku_rln_relay/test_waku_rln_relay.nim
+++ b/tests/waku_rln_relay/test_waku_rln_relay.nim
@@ -1,7 +1,7 @@
{.used.}
import
- std/[options, os, sequtils, times, tempfiles],
+ std/[options, os, sequtils, tempfiles],
stew/byteutils,
stew/shims/net as stewNet,
testutils/unittests,
@@ -17,7 +17,10 @@ import
waku_rln_relay/protocol_metrics,
waku_keystore,
],
- ./rln/waku_rln_relay_utils
+ ./rln/waku_rln_relay_utils,
+ ../testlib/[wakucore, futures, wakunode, testutils]
+
+from std/times import epochTime
suite "Waku rln relay":
test "key_gen Nim Wrappers":
@@ -686,7 +689,7 @@ suite "Waku rln relay":
# it is a duplicate
assert isDuplicate3, "duplicate should be found"
- asyncTest "validateMessageAndUpdateLog test":
+ asyncTest "validateMessageAndUpdateLog: against epoch gap":
let index = MembershipIndex(5)
let wakuRlnConfig = WakuRlnConfig(
@@ -700,27 +703,31 @@ suite "Waku rln relay":
let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr:
raiseAssert $error
- # get the current epoch time
- let time = epochTime()
+ let time_1 = epochTime()
- # create some messages from the same peer and append rln proof to them, except wm4
var
- wm1 = WakuMessage(payload: "Valid message".toBytes())
+ # create some messages from the same peer and append rln proof to them, except wm4
+ wm1 = WakuMessage(payload: "Valid message".toBytes(), timestamp: now())
# another message in the same epoch as wm1, it will break the messaging rate limit
- wm2 = WakuMessage(payload: "Spam".toBytes())
- # wm3 points to the next epoch
- wm3 = WakuMessage(payload: "Valid message".toBytes())
- wm4 = WakuMessage(payload: "Invalid message".toBytes())
+ wm2 = WakuMessage(payload: "Spam message".toBytes(), timestamp: now())
- wakuRlnRelay.unsafeAppendRLNProof(wm1, time).isOkOr:
+ await sleepAsync(1.seconds)
+ let time_2 = epochTime()
+
+ var
+ # wm3 points to the next epoch bcz of the sleep
+ wm3 = WakuMessage(payload: "Valid message".toBytes(), timestamp: now())
+ wm4 = WakuMessage(payload: "Invalid message".toBytes(), timestamp: now())
+
+ wakuRlnRelay.unsafeAppendRLNProof(wm1, time_1).isOkOr:
raiseAssert $error
- wakuRlnRelay.unsafeAppendRLNProof(wm2, time).isOkOr:
+ wakuRlnRelay.unsafeAppendRLNProof(wm2, time_1).isOkOr:
raiseAssert $error
- wakuRlnRelay.unsafeAppendRLNProof(wm3, time + float64(wakuRlnRelay.rlnEpochSizeSec)).isOkOr:
+
+ wakuRlnRelay.unsafeAppendRLNProof(wm3, time_2).isOkOr:
raiseAssert $error
# validate messages
- # validateMessage proc checks the validity of the message fields and adds it to the log (if valid)
let
msgValidate1 = wakuRlnRelay.validateMessageAndUpdateLog(wm1)
# wm2 is published within the same Epoch as wm1 and should be found as spam
@@ -736,6 +743,48 @@ suite "Waku rln relay":
msgValidate3 == MessageValidationResult.Valid
msgValidate4 == MessageValidationResult.Invalid
+ asyncTest "validateMessageAndUpdateLog: against timestamp gap":
+ let index = MembershipIndex(5)
+
+ let wakuRlnConfig = WakuRlnConfig(
+ dynamic: false,
+ credIndex: some(index),
+ userMessageLimit: 10,
+ epochSizeSec: 10,
+ treePath: genTempPath("rln_tree", "waku_rln_relay_2"),
+ )
+
+ let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr:
+ raiseAssert $error
+
+ # usually it's 20 seconds but we set it to 2 for testing purposes which make the test faster
+ wakuRlnRelay.rlnMaxTimestampGap = 1
+
+ var time = epochTime()
+
+ var
+ wm1 = WakuMessage(payload: "timestamp message".toBytes(), timestamp: now())
+ wm2 = WakuMessage(payload: "timestamp message".toBytes(), timestamp: now())
+
+ wakuRlnRelay.unsafeAppendRLNProof(wm1, time).isOkOr:
+ raiseAssert $error
+
+ wakuRlnRelay.unsafeAppendRLNProof(wm2, time).isOkOr:
+ raiseAssert $error
+
+ # validate the first message because it's timestamp is the same as the generated timestamp
+ let msgValidate1 = wakuRlnRelay.validateMessageAndUpdateLog(wm1)
+
+ # wait for 2 seconds to make the timestamp different from generated timestamp
+ await sleepAsync(2.seconds)
+
+ # invalidate the second message because it's timestamp is different from the generated timestamp
+ let msgValidate2 = wakuRlnRelay.validateMessageAndUpdateLog(wm2)
+
+ check:
+ msgValidate1 == MessageValidationResult.Valid
+ msgValidate2 == MessageValidationResult.Invalid
+
asyncTest "validateMessageAndUpdateLog: multiple senders with same external nullifier":
let index1 = MembershipIndex(5)
let index2 = MembershipIndex(6)
@@ -766,9 +815,11 @@ suite "Waku rln relay":
# create messages from different peers and append rln proofs to them
var
- wm1 = WakuMessage(payload: "Valid message from sender 1".toBytes())
+ wm1 =
+ WakuMessage(payload: "Valid message from sender 1".toBytes(), timestamp: now())
# another message in the same epoch as wm1, it will break the messaging rate limit
- wm2 = WakuMessage(payload: "Valid message from sender 2".toBytes())
+ wm2 =
+ WakuMessage(payload: "Valid message from sender 2".toBytes(), timestamp: now())
wakuRlnRelay1.appendRLNProof(wm1, time).isOkOr:
raiseAssert $error
diff --git a/tests/waku_rln_relay/test_wakunode_rln_relay.nim b/tests/waku_rln_relay/test_wakunode_rln_relay.nim
index 3ff6923e0..7620dfc14 100644
--- a/tests/waku_rln_relay/test_wakunode_rln_relay.nim
+++ b/tests/waku_rln_relay/test_wakunode_rln_relay.nim
@@ -132,7 +132,8 @@ procSuite "WakuNode - RLN relay":
let payload = "Hello".toBytes()
# prepare the epoch
- var message = WakuMessage(payload: @payload, contentTopic: contentTopic)
+ var message =
+ WakuMessage(payload: @payload, contentTopic: contentTopic, timestamp: now())
doAssert(node1.wakuRlnRelay.unsafeAppendRLNProof(message, epochTime()).isOk())
debug "Nodes participating in the test",
@@ -221,19 +222,25 @@ procSuite "WakuNode - RLN relay":
var messages1: seq[WakuMessage] = @[]
var messages2: seq[WakuMessage] = @[]
- let epochTime = epochTime()
+ var epochTime = epochTime()
for i in 0 ..< 3:
var message = WakuMessage(
- payload: ("Payload_" & $i).toBytes(), contentTopic: contentTopics[0]
+ payload: ("Payload_" & $i).toBytes(),
+ timestamp: now(),
+ contentTopic: contentTopics[0],
)
nodes[0].wakuRlnRelay.unsafeAppendRLNProof(message, epochTime).isOkOr:
raiseAssert $error
messages1.add(message)
+ epochTime = epochTime()
+
for i in 0 ..< 3:
var message = WakuMessage(
- payload: ("Payload_" & $i).toBytes(), contentTopic: contentTopics[1]
+ payload: ("Payload_" & $i).toBytes(),
+ timestamp: now(),
+ contentTopic: contentTopics[1],
)
nodes[1].wakuRlnRelay.unsafeAppendRLNProof(message, epochTime).isOkOr:
raiseAssert $error
@@ -364,8 +371,12 @@ procSuite "WakuNode - RLN relay":
# check the proof is generated correctly outside when block to avoid duplication
let rateLimitProof = rateLimitProofRes.get().encode().buffer
- let message =
- WakuMessage(payload: @payload, contentTopic: contentTopic, proof: rateLimitProof)
+ let message = WakuMessage(
+ payload: @payload,
+ contentTopic: contentTopic,
+ proof: rateLimitProof,
+ timestamp: now(),
+ )
## node1 publishes a message with an invalid rln proof, the message is then relayed to node2 which in turn
## attempts to verify the rate limit proof and fails hence does not relay the message to node3, thus the relayHandler of node3
@@ -452,24 +463,36 @@ procSuite "WakuNode - RLN relay":
await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
# get the current epoch time
- let time = epochTime()
+ let time_1 = epochTime()
+
# create some messages with rate limit proofs
var
- wm1 = WakuMessage(payload: "message 1".toBytes(), contentTopic: contentTopic)
+ wm1 = WakuMessage(
+ payload: "message 1".toBytes(), timestamp: now(), contentTopic: contentTopic
+ )
# another message in the same epoch as wm1, it will break the messaging rate limit
- wm2 = WakuMessage(payload: "message 2".toBytes(), contentTopic: contentTopic)
+ wm2 = WakuMessage(
+ payload: "message 2".toBytes(), timestamp: now(), contentTopic: contentTopic
+ )
# wm3 points to the next epoch
- wm3 = WakuMessage(payload: "message 3".toBytes(), contentTopic: contentTopic)
- wm4 = WakuMessage(payload: "message 4".toBytes(), contentTopic: contentTopic)
- node3.wakuRlnRelay.unsafeAppendRLNProof(wm1, time).isOkOr:
+ await sleepAsync(1000.millis)
+ let time_2 = epochTime()
+
+ var
+ wm3 = WakuMessage(
+ payload: "message 3".toBytes(), timestamp: now(), contentTopic: contentTopic
+ )
+ wm4 = WakuMessage(
+ payload: "message 4".toBytes(), timestamp: now(), contentTopic: contentTopic
+ )
+
+ node3.wakuRlnRelay.unsafeAppendRLNProof(wm1, time_1).isOkOr:
raiseAssert $error
- node3.wakuRlnRelay.unsafeAppendRLNProof(wm2, time).isOkOr:
+ node3.wakuRlnRelay.unsafeAppendRLNProof(wm2, time_1).isOkOr:
raiseAssert $error
- node3.wakuRlnRelay.unsafeAppendRLNProof(
- wm3, time + float64(node3.wakuRlnRelay.rlnEpochSizeSec)
- ).isOkOr:
+ node3.wakuRlnRelay.unsafeAppendRLNProof(wm3, time_2).isOkOr:
raiseAssert $error
# relay handler for node3
@@ -700,8 +723,12 @@ procSuite "WakuNode - RLN relay":
# Given some messages with rln proofs
let time = epochTime()
var
- msg1 = WakuMessage(payload: "message 1".toBytes(), contentTopic: contentTopic)
- msg2 = WakuMessage(payload: "message 2".toBytes(), contentTopic: contentTopic)
+ msg1 = WakuMessage(
+ payload: "message 1".toBytes(), timestamp: now(), contentTopic: contentTopic
+ )
+ msg2 = WakuMessage(
+ payload: "message 2".toBytes(), timestamp: now(), contentTopic: contentTopic
+ )
node1.wakuRlnRelay.unsafeAppendRLNProof(msg1, time).isOkOr:
raiseAssert $error
diff --git a/tests/wakunode_rest/test_rest_relay.nim b/tests/wakunode_rest/test_rest_relay.nim
index 719e66b8a..8bca121e3 100644
--- a/tests/wakunode_rest/test_rest_relay.nim
+++ b/tests/wakunode_rest/test_rest_relay.nim
@@ -260,7 +260,7 @@ suite "Waku v2 Rest API - Relay":
RelayWakuMessage(
payload: base64.encode("TEST-PAYLOAD"),
contentTopic: some(DefaultContentTopic),
- timestamp: some(int64(2022)),
+ timestamp: some(now()),
),
)
@@ -488,7 +488,7 @@ suite "Waku v2 Rest API - Relay":
RelayWakuMessage(
payload: base64.encode("TEST-PAYLOAD"),
contentTopic: some(DefaultContentTopic),
- timestamp: some(int64(2022)),
+ timestamp: some(now()),
)
)
diff --git a/waku/waku_rln_relay/rln_relay.nim b/waku/waku_rln_relay/rln_relay.nim
index 5dae3bd51..48b3e8d79 100644
--- a/waku/waku_rln_relay/rln_relay.nim
+++ b/waku/waku_rln_relay/rln_relay.nim
@@ -93,6 +93,7 @@ type WakuRLNRelay* = ref object of RootObj
nullifierLog*: OrderedTable[Epoch, Table[Nullifier, ProofMetadata]]
lastEpoch*: Epoch # the epoch of the last published rln message
rlnEpochSizeSec*: uint64
+ rlnMaxTimestampGap*: uint64
rlnMaxEpochGap*: uint64
groupManager*: GroupManager
onFatalErrorAction*: OnFatalErrorHandler
@@ -103,6 +104,7 @@ type WakuRLNRelay* = ref object of RootObj
proc calcEpoch*(rlnPeer: WakuRLNRelay, t: float64): Epoch =
## gets time `t` as `flaot64` with subseconds resolution in the fractional part
## and returns its corresponding rln `Epoch` value
+
let e = uint64(t / rlnPeer.rlnEpochSizeSec.float64)
return toEpoch(e)
@@ -211,25 +213,26 @@ proc validateMessage*(
# track message count for metrics
waku_rln_messages_total.inc()
- # checks if the `msg`'s epoch is far from the current epoch
- # it corresponds to the validation of rln external nullifier
- # get current rln epoch
- let epoch: Epoch = rlnPeer.getCurrentEpoch()
+ # checks if the message's timestamp is within acceptable range
+ let currentTime = getTime().toUnixFloat()
+ let messageTime = msg.timestamp.float64 / 1e9
- let
- msgEpoch = proof.epoch
- # calculate the gaps
- gap = absDiff(epoch, msgEpoch)
+ let timeDiff = uint64(abs(currentTime - messageTime))
- trace "epoch info", currentEpoch = fromEpoch(epoch), msgEpoch = fromEpoch(msgEpoch)
+ debug "time info",
+ currentTime = currentTime, messageTime = messageTime, msgHash = msg.hash
- # validate the epoch
- if gap > rlnPeer.rlnMaxEpochGap:
- # message's epoch is too old or too ahead
- # accept messages whose epoch is within +-MaxEpochGap from the current epoch
- warn "invalid message: epoch gap exceeds a threshold",
- gap = gap, payloadLen = msg.payload.len, msgEpoch = fromEpoch(proof.epoch)
- waku_rln_invalid_messages_total.inc(labelValues = ["invalid_epoch"])
+ if timeDiff > rlnPeer.rlnMaxTimestampGap:
+ warn "invalid message: timestamp difference exceeds threshold",
+ timeDiff = timeDiff, maxTimestampGap = rlnPeer.rlnMaxTimestampGap
+ waku_rln_invalid_messages_total.inc(labelValues = ["invalid_timestamp"])
+ return MessageValidationResult.Invalid
+
+ let computedEpoch = rlnPeer.calcEpoch(messageTime)
+ if proof.epoch != computedEpoch:
+ warn "invalid message: timestamp mismatches epoch",
+ proofEpoch = fromEpoch(proof.epoch), computedEpoch = fromEpoch(computedEpoch)
+ waku_rln_invalid_messages_total.inc(labelValues = ["timestamp_mismatch"])
return MessageValidationResult.Invalid
let rootValidationRes = rlnPeer.groupManager.validateRoot(proof.merkleRoot)
@@ -242,8 +245,9 @@ proc validateMessage*(
# verify the proof
let
- contentTopicBytes = msg.contentTopic.toBytes
- input = concat(msg.payload, contentTopicBytes)
+ contentTopicBytes = toBytes(msg.contentTopic)
+ timestampBytes = toBytes(msg.timestamp.uint64)
+ input = concat(msg.payload, contentTopicBytes, @(timestampBytes))
waku_rln_proof_verification_total.inc()
waku_rln_proof_verification_duration_seconds.nanosecondTime:
@@ -265,6 +269,8 @@ proc validateMessage*(
if proofMetadataRes.isErr():
waku_rln_errors_total.inc(labelValues = ["proof_metadata_extraction"])
return MessageValidationResult.Invalid
+
+ let msgEpoch = proof.epoch
let hasDup = rlnPeer.hasDuplicate(msgEpoch, proofMetadataRes.get())
if hasDup.isErr():
waku_rln_errors_total.inc(labelValues = ["duplicate_check"])
@@ -305,10 +311,12 @@ proc validateMessageAndUpdateLog*(
proc toRLNSignal*(wakumessage: WakuMessage): seq[byte] =
## it is a utility proc that prepares the `data` parameter of the proof generation procedure i.e., `proofGen` that resides in the current module
- ## it extracts the `contentTopic` and the `payload` of the supplied `wakumessage` and serializes them into a byte sequence
+ ## it extracts the `contentTopic`, `timestamp` and the `payload` of the supplied `wakumessage` and serializes them into a byte sequence
+
let
- contentTopicBytes = wakumessage.contentTopic.toBytes()
- output = concat(wakumessage.payload, contentTopicBytes)
+ contentTopicBytes = toBytes(wakumessage.contentTopic)
+ timestampBytes = toBytes(wakumessage.timestamp.uint64)
+ output = concat(wakumessage.payload, contentTopicBytes, @(timestampBytes))
return output
proc appendRLNProof*(
@@ -479,6 +487,7 @@ proc mount(
nonceManager: NonceManager.init(conf.userMessageLimit, conf.epochSizeSec.float),
rlnEpochSizeSec: conf.epochSizeSec,
rlnMaxEpochGap: max(uint64(MaxClockGapSeconds / float64(conf.epochSizeSec)), 1),
+ rlnMaxTimestampGap: uint64(MaxClockGapSeconds),
onFatalErrorAction: conf.onFatalErrorAction,
)
From 9f68c83fed3dc1351a035e50e5ee311ef881c9f4 Mon Sep 17 00:00:00 2001
From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com>
Date: Mon, 26 May 2025 21:58:02 +0200
Subject: [PATCH 02/47] chore: bump dependencies for v0.36 (#3410)
* properly pass userMessageLimit to OnchainGroupManager
* waku.nimble 2.2.4 Nim compiler
* rm stew/shims/net import
* change ValidIpAddress.init with parseIpAddress
* fix serialize for zerokit
* group_manager: separate if statements
* protocol_types: add encode UInt32 with zeros up to 32 bytes
* windows build: skip libunwind build and rm libunwind.a inlcusion step
* bump nph to overcome the compilation issues with 2.2.x
* bump nim-libp2p to v1.10.1
---
.github/ISSUE_TEMPLATE/bump_dependencies.md | 13 +++--
.github/workflows/windows-build.yml | 25 ---------
apps/chat2/chat2.nim | 3 +-
apps/networkmonitor/networkmonitor_config.nim | 1 -
apps/networkmonitor/networkmonitor_utils.nim | 1 -
apps/wakucanary/wakucanary.nim | 1 -
examples/filter_subscriber.nim | 1 -
examples/lightpush_publisher.nim | 1 -
examples/publisher.nim | 1 -
examples/subscriber.nim | 1 -
examples/wakustealthcommitments/node_spec.nim | 1 -
tests/common/test_confutils_envvar.nim | 1 -
tests/common/test_enr_builder.nim | 2 +-
tests/factory/test_external_config.nim | 3 +-
.../peer_store/test_waku_peer_storage.nim | 1 -
tests/node/peer_manager/test_peer_manager.nim | 9 +---
tests/node/test_wakunode_filter.nim | 1 -
tests/node/test_wakunode_legacy_lightpush.nim | 15 +++---
tests/node/test_wakunode_legacy_store.nim | 29 +++++-----
tests/node/test_wakunode_lightpush.nim | 13 +++--
tests/node/test_wakunode_peer_exchange.nim | 1 -
tests/node/test_wakunode_peer_manager.nim | 3 +-
tests/node/test_wakunode_relay_rln.nim | 9 ++--
tests/node/test_wakunode_sharding.nim | 10 +---
tests/node/test_wakunode_store.nim | 33 +++++-------
tests/test_peer_manager.nim | 48 ++++++++---------
tests/test_relay_peer_exchange.nim | 1 -
tests/test_waku_dnsdisc.nim | 1 -
tests/test_waku_keepalive.nim | 1 -
tests/test_waku_metadata.nim | 1 -
tests/test_waku_switch.nim | 11 +++-
tests/test_wakunode.nim | 4 +-
tests/testlib/wakunode.nim | 1 -
tests/waku_core/test_published_address.nim | 2 +-
tests/waku_discv5/utils.nim | 7 +--
tests/waku_enr/test_sharding.nim | 1 -
tests/waku_enr/utils.nim | 1 -
tests/waku_relay/test_message_id.nim | 2 +-
tests/waku_relay/test_protocol.nim | 1 -
tests/waku_relay/test_wakunode_relay.nim | 1 -
tests/waku_relay/utils.nim | 1 -
tests/waku_rln_relay/rln/test_wrappers.nim | 1 -
.../test_rln_group_manager_onchain.nim | 17 +++---
.../test_rln_group_manager_static.nim | 2 +-
tests/waku_rln_relay/test_waku_rln_relay.nim | 7 ++-
.../test_wakunode_rln_relay.nim | 1 -
tests/waku_rln_relay/utils_onchain.nim | 6 ++-
tests/waku_rln_relay/utils_static.nim | 1 -
tests/waku_store/test_wakunode_store.nim | 1 -
tests/wakunode2/test_app.nim | 1 -
tests/wakunode2/test_validators.nim | 1 -
tests/wakunode_rest/test_rest_cors.nim | 1 -
tests/wakunode_rest/test_rest_debug.nim | 1 -
tests/wakunode_rest/test_rest_filter.nim | 1 -
tests/wakunode_rest/test_rest_health.nim | 1 -
tests/wakunode_rest/test_rest_lightpush.nim | 1 -
.../test_rest_lightpush_legacy.nim | 1 -
tests/wakunode_rest/test_rest_relay.nim | 3 +-
tests/wakunode_rest/test_rest_store.nim | 1 -
.../rln_keystore_generator.nim | 2 +-
vendor/nim-chronicles | 2 +-
vendor/nim-chronos | 2 +-
vendor/nim-eth | 2 +-
vendor/nim-faststreams | 2 +-
vendor/nim-json-rpc | 2 +-
vendor/nim-libbacktrace | 2 +-
vendor/nim-libp2p | 2 +-
vendor/nim-metrics | 2 +-
vendor/nim-nat-traversal | 2 +-
vendor/nim-regex | 2 +-
vendor/nim-secp256k1 | 2 +-
vendor/nim-sqlite3-abi | 2 +-
vendor/nim-stew | 2 +-
vendor/nim-taskpools | 2 +-
vendor/nim-testutils | 2 +-
vendor/nim-unittest2 | 2 +-
vendor/nim-web3 | 2 +-
vendor/nim-zlib | 2 +-
vendor/nimbus-build-system | 2 +-
vendor/nimcrypto | 2 +-
vendor/nph | 2 +-
waku.nimble | 2 +-
waku/discovery/autonat_service.nim | 2 +-
.../conf_builder/rln_relay_conf_builder.nim | 11 ++--
.../conf_builder/waku_conf_builder.nim | 5 +-
waku/factory/external_config.nim | 4 +-
waku/factory/networks_config.nim | 7 ++-
waku/node/peer_manager/peer_manager.nim | 9 +++-
waku/node/waku_node.nim | 2 +-
waku/node/waku_switch.nim | 4 +-
waku/waku_filter_v2/client.nim | 2 +-
waku/waku_filter_v2/protocol.nim | 37 ++++++++++---
waku/waku_lightpush/protocol.nim | 21 ++++++--
waku/waku_lightpush_legacy/protocol.nim | 21 ++++++--
waku/waku_metadata/protocol.nim | 17 +++---
waku/waku_peer_exchange/protocol.nim | 53 +++++++++++++------
waku/waku_relay/protocol.nim | 2 +-
waku/waku_rendezvous/protocol.nim | 2 +-
.../group_manager/on_chain/group_manager.nim | 16 +++---
waku/waku_rln_relay/protocol_types.nim | 7 +++
waku/waku_rln_relay/rln/wrappers.nim | 13 +++--
waku/waku_rln_relay/rln_relay.nim | 2 +-
waku/waku_store/protocol.nim | 11 +++-
waku/waku_store_legacy/protocol.nim | 10 +++-
waku/waku_store_sync/reconciliation.nim | 11 ++--
waku/waku_store_sync/transfer.nim | 14 +++--
106 files changed, 331 insertions(+), 309 deletions(-)
diff --git a/.github/ISSUE_TEMPLATE/bump_dependencies.md b/.github/ISSUE_TEMPLATE/bump_dependencies.md
index 6c1e777a1..0413cbfd2 100644
--- a/.github/ISSUE_TEMPLATE/bump_dependencies.md
+++ b/.github/ISSUE_TEMPLATE/bump_dependencies.md
@@ -12,7 +12,6 @@ assignees: ''
Update `nwaku` "vendor" dependencies.
### Items to bump
-- [ ] negentropy
- [ ] dnsclient.nim ( update to the latest tag version )
- [ ] nim-bearssl
- [ ] nimbus-build-system
@@ -38,12 +37,12 @@ Update `nwaku` "vendor" dependencies.
- [ ] nim-sqlite3-abi ( update to the latest tag version )
- [ ] nim-stew
- [ ] nim-stint
-- [ ] nim-taskpools
-- [ ] nim-testutils
+- [ ] nim-taskpools ( update to the latest tag version )
+- [ ] nim-testutils ( update to the latest tag version )
- [ ] nim-toml-serialization
- [ ] nim-unicodedb
-- [ ] nim-unittest2
-- [ ] nim-web3
-- [ ] nim-websock
+- [ ] nim-unittest2 ( update to the latest tag version )
+- [ ] nim-web3 ( update to the latest tag version )
+- [ ] nim-websock ( update to the latest tag version )
- [ ] nim-zlib
-- [ ] zerokit ( this should be kept in version `v0.5.1` )
+- [ ] zerokit ( this should be kept in version `v0.7.0` )
diff --git a/.github/workflows/windows-build.yml b/.github/workflows/windows-build.yml
index 3ac6ce15d..0582d5fd1 100644
--- a/.github/workflows/windows-build.yml
+++ b/.github/workflows/windows-build.yml
@@ -68,28 +68,6 @@ jobs:
./build_all.bat
cd ../../../..
- - name: Building libunwind
- run: |
- cd vendor/nim-libbacktrace
- mkdir -p vendor/libunwind/build
- pushd vendor/libunwind
-
- cmake -S runtimes \
- -DLLVM_ENABLE_RUNTIMES="libunwind" \
- -DLIBUNWIND_ENABLE_SHARED=OFF -DLIBUNWIND_ENABLE_STATIC=ON \
- -DLIBUNWIND_INCLUDE_DOCS=OFF -DLIBUNWIND_INSTALL_HEADERS=ON \
- -DCMAKE_INSTALL_PREFIX="$(pwd)/../install/usr" \
- -G "MinGW Makefiles" -B build
-
- cd build
- mingw32-make VERBOSE=1 clean
- mingw32-make VERBOSE=1 unwind_static
- mingw32-make VERBOSE=1 install-unwind
-
- popd
- mkdir -p install/usr/lib
- cp -r vendor/libunwind/build/lib/libunwind.a install/usr/lib/
-
- name: Building miniupnpc
run: |
cd vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc
@@ -105,9 +83,6 @@ jobs:
- name: Building wakunode2
run: |
- cd vendor/nim-libbacktrace
- cp ./vendor/libunwind/build/lib/libunwind.a install/usr/lib
- cd ../..
make wakunode2 LOG_LEVEL=DEBUG V=3 -j8
- name: Check Executable
diff --git a/apps/chat2/chat2.nim b/apps/chat2/chat2.nim
index 127a761c0..9c0f47dcd 100644
--- a/apps/chat2/chat2.nim
+++ b/apps/chat2/chat2.nim
@@ -11,7 +11,6 @@ import
confutils,
chronicles,
chronos,
- stew/shims/net as stewNet,
eth/keys,
bearssl,
stew/[byteutils, results],
@@ -559,7 +558,7 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
let rlnConf = WakuRlnConfig(
dynamic: conf.rlnRelayDynamic,
credIndex: conf.rlnRelayCredIndex,
- chainId: conf.rlnRelayChainId,
+ chainId: UInt256.fromBytesBE(conf.rlnRelayChainId.toBytesBE()),
ethClientUrls: conf.ethClientUrls.mapIt(string(it)),
creds: some(
RlnRelayCreds(
diff --git a/apps/networkmonitor/networkmonitor_config.nim b/apps/networkmonitor/networkmonitor_config.nim
index 04245f9dd..8f5298a53 100644
--- a/apps/networkmonitor/networkmonitor_config.nim
+++ b/apps/networkmonitor/networkmonitor_config.nim
@@ -5,7 +5,6 @@ import
chronos,
std/strutils,
results,
- stew/shims/net,
regex
type EthRpcUrl* = distinct string
diff --git a/apps/networkmonitor/networkmonitor_utils.nim b/apps/networkmonitor/networkmonitor_utils.nim
index f12b16014..25b79da65 100644
--- a/apps/networkmonitor/networkmonitor_utils.nim
+++ b/apps/networkmonitor/networkmonitor_utils.nim
@@ -3,7 +3,6 @@
import
std/json,
results,
- stew/shims/net,
chronicles,
chronicles/topics_registry,
chronos,
diff --git a/apps/wakucanary/wakucanary.nim b/apps/wakucanary/wakucanary.nim
index 3565c619f..84ac6350c 100644
--- a/apps/wakucanary/wakucanary.nim
+++ b/apps/wakucanary/wakucanary.nim
@@ -2,7 +2,6 @@ import
std/[strutils, sequtils, tables, strformat],
confutils,
chronos,
- stew/shims/net,
chronicles/topics_registry,
os
import
diff --git a/examples/filter_subscriber.nim b/examples/filter_subscriber.nim
index 2216e4a41..5554966d4 100644
--- a/examples/filter_subscriber.nim
+++ b/examples/filter_subscriber.nim
@@ -1,7 +1,6 @@
import
std/[tables, sequtils],
stew/byteutils,
- stew/shims/net,
chronicles,
chronos,
confutils,
diff --git a/examples/lightpush_publisher.nim b/examples/lightpush_publisher.nim
index b0f919a89..9c7499695 100644
--- a/examples/lightpush_publisher.nim
+++ b/examples/lightpush_publisher.nim
@@ -1,7 +1,6 @@
import
std/[tables, times, sequtils],
stew/byteutils,
- stew/shims/net,
chronicles,
results,
chronos,
diff --git a/examples/publisher.nim b/examples/publisher.nim
index 907ce2274..8c2d03679 100644
--- a/examples/publisher.nim
+++ b/examples/publisher.nim
@@ -1,7 +1,6 @@
import
std/[tables, times, sequtils],
stew/byteutils,
- stew/shims/net,
chronicles,
chronos,
confutils,
diff --git a/examples/subscriber.nim b/examples/subscriber.nim
index 633bfa4ca..7eb900792 100644
--- a/examples/subscriber.nim
+++ b/examples/subscriber.nim
@@ -1,7 +1,6 @@
import
std/[tables, sequtils],
stew/byteutils,
- stew/shims/net,
chronicles,
chronos,
confutils,
diff --git a/examples/wakustealthcommitments/node_spec.nim b/examples/wakustealthcommitments/node_spec.nim
index b5dafb0be..fdcd36986 100644
--- a/examples/wakustealthcommitments/node_spec.nim
+++ b/examples/wakustealthcommitments/node_spec.nim
@@ -3,7 +3,6 @@
import waku/[common/logging, factory/[waku, networks_config, external_config]]
import
std/[options, strutils, os, sequtils],
- stew/shims/net as stewNet,
chronicles,
chronos,
metrics,
diff --git a/tests/common/test_confutils_envvar.nim b/tests/common/test_confutils_envvar.nim
index 676a35ae1..fca11cca6 100644
--- a/tests/common/test_confutils_envvar.nim
+++ b/tests/common/test_confutils_envvar.nim
@@ -3,7 +3,6 @@
import
std/[os, options],
results,
- stew/shims/net as stewNet,
testutils/unittests,
confutils,
confutils/defs,
diff --git a/tests/common/test_enr_builder.nim b/tests/common/test_enr_builder.nim
index 9fe8f6807..0cf7bcb55 100644
--- a/tests/common/test_enr_builder.nim
+++ b/tests/common/test_enr_builder.nim
@@ -1,6 +1,6 @@
{.used.}
-import std/options, results, stew/shims/net, testutils/unittests
+import std/[options, net], results, testutils/unittests
import waku/common/enr, ../testlib/wakucore
suite "nim-eth ENR - builder and typed record":
diff --git a/tests/factory/test_external_config.nim b/tests/factory/test_external_config.nim
index 5bd4e2c86..927246b0d 100644
--- a/tests/factory/test_external_config.nim
+++ b/tests/factory/test_external_config.nim
@@ -8,7 +8,8 @@ import
libp2p/multiaddress,
nimcrypto/utils,
secp256k1,
- confutils
+ confutils,
+ stint
import
../../waku/factory/external_config,
../../waku/factory/networks_config,
diff --git a/tests/node/peer_manager/peer_store/test_waku_peer_storage.nim b/tests/node/peer_manager/peer_store/test_waku_peer_storage.nim
index 1edd9243e..c0e25ec6a 100644
--- a/tests/node/peer_manager/peer_store/test_waku_peer_storage.nim
+++ b/tests/node/peer_manager/peer_store/test_waku_peer_storage.nim
@@ -3,7 +3,6 @@ import
testutils/unittests,
libp2p/[multiaddress, peerid],
libp2p/crypto/crypto,
- stew/shims/net,
eth/keys,
eth/p2p/discoveryv5/enr,
nimcrypto/utils
diff --git a/tests/node/peer_manager/test_peer_manager.nim b/tests/node/peer_manager/test_peer_manager.nim
index 6eddda0d6..ad1f1bf0e 100644
--- a/tests/node/peer_manager/test_peer_manager.nim
+++ b/tests/node/peer_manager/test_peer_manager.nim
@@ -1,9 +1,4 @@
-import
- chronicles,
- std/[options, tables, strutils],
- stew/shims/net,
- chronos,
- testutils/unittests
+import chronicles, std/[options, tables, strutils], chronos, testutils/unittests
import
waku/node/waku_node,
@@ -23,7 +18,7 @@ suite "Peer Manager":
asyncSetup:
listenPort = Port(0)
- listenAddress = ValidIpAddress.init("0.0.0.0")
+ listenAddress = parseIpAddress("0.0.0.0")
serverKey = generateSecp256k1Key()
clientKey = generateSecp256k1Key()
clusterId = 1
diff --git a/tests/node/test_wakunode_filter.nim b/tests/node/test_wakunode_filter.nim
index bf9f2495b..abf555b68 100644
--- a/tests/node/test_wakunode_filter.nim
+++ b/tests/node/test_wakunode_filter.nim
@@ -2,7 +2,6 @@
import
std/[options, tables, sequtils, strutils, sets],
- stew/shims/net as stewNet,
testutils/unittests,
chronos,
chronicles,
diff --git a/tests/node/test_wakunode_legacy_lightpush.nim b/tests/node/test_wakunode_legacy_lightpush.nim
index dfc306de8..806bfe032 100644
--- a/tests/node/test_wakunode_legacy_lightpush.nim
+++ b/tests/node/test_wakunode_legacy_lightpush.nim
@@ -1,8 +1,7 @@
{.used.}
import
- std/[options, tempfiles],
- stew/shims/net as stewNet,
+ std/[options, tempfiles, net],
testutils/unittests,
chronos,
std/strformat,
@@ -46,8 +45,8 @@ suite "Waku Legacy Lightpush - End To End":
serverKey = generateSecp256k1Key()
clientKey = generateSecp256k1Key()
- server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
- client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
+ server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
+ client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
await allFutures(server.start(), client.start())
await server.start()
@@ -70,7 +69,7 @@ suite "Waku Legacy Lightpush - End To End":
asyncTest "Via 11/WAKU2-RELAY from Relay/Full Node":
# Given a light lightpush client
let lightpushClient =
- newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
+ newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
lightpushClient.mountLegacyLightpushClient()
# When the client publishes a message
@@ -129,8 +128,8 @@ suite "RLN Proofs as a Lightpush Service":
serverKey = generateSecp256k1Key()
clientKey = generateSecp256k1Key()
- server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
- client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
+ server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
+ client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
# mount rln-relay
let wakuRlnConfig = WakuRlnConfig(
@@ -162,7 +161,7 @@ suite "RLN Proofs as a Lightpush Service":
asyncTest "Message is published when RLN enabled":
# Given a light lightpush client
let lightpushClient =
- newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
+ newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
lightpushClient.mountLegacyLightPushClient()
# When the client publishes a message
diff --git a/tests/node/test_wakunode_legacy_store.nim b/tests/node/test_wakunode_legacy_store.nim
index 8ede3f6f2..beed3c1c6 100644
--- a/tests/node/test_wakunode_legacy_store.nim
+++ b/tests/node/test_wakunode_legacy_store.nim
@@ -1,11 +1,6 @@
{.used.}
-import
- std/options,
- stew/shims/net as stewNet,
- testutils/unittests,
- chronos,
- libp2p/crypto/crypto
+import std/options, testutils/unittests, chronos, libp2p/crypto/crypto
import
waku/[
@@ -66,8 +61,8 @@ suite "Waku Store - End to End - Sorted Archive":
serverKey = generateSecp256k1Key()
clientKey = generateSecp256k1Key()
- server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
- client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
+ server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
+ client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
archiveDriver = newArchiveDriverWithMessages(pubsubTopic, archiveMessages)
let mountArchiveResult = server.mountLegacyArchive(archiveDriver)
@@ -440,7 +435,7 @@ suite "Waku Store - End to End - Sorted Archive":
newArchiveDriverWithMessages(pubsubTopic, archiveMessages)
otherServerKey = generateSecp256k1Key()
otherServer =
- newTestWakuNode(otherServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
+ newTestWakuNode(otherServerKey, parseIpAddress("0.0.0.0"), Port(0))
mountOtherArchiveResult =
otherServer.mountLegacyArchive(otherArchiveDriverWithMessages)
assert mountOtherArchiveResult.isOk()
@@ -522,8 +517,8 @@ suite "Waku Store - End to End - Unsorted Archive":
serverKey = generateSecp256k1Key()
clientKey = generateSecp256k1Key()
- server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
- client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
+ server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
+ client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
let
unsortedArchiveDriverWithMessages =
@@ -678,8 +673,8 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
serverKey = generateSecp256k1Key()
clientKey = generateSecp256k1Key()
- server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
- client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
+ server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
+ client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
let archiveDriver = newSqliteArchiveDriver()
.put(pubsubTopic, archiveMessages[0 ..< 6])
@@ -927,7 +922,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
let
ephemeralServerKey = generateSecp256k1Key()
ephemeralServer =
- newTestWakuNode(ephemeralServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
+ newTestWakuNode(ephemeralServerKey, parseIpAddress("0.0.0.0"), Port(0))
mountEphemeralArchiveResult =
ephemeralServer.mountLegacyArchive(ephemeralArchiveDriver)
assert mountEphemeralArchiveResult.isOk()
@@ -970,7 +965,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
let
mixedServerKey = generateSecp256k1Key()
mixedServer =
- newTestWakuNode(mixedServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
+ newTestWakuNode(mixedServerKey, parseIpAddress("0.0.0.0"), Port(0))
mountMixedArchiveResult = mixedServer.mountLegacyArchive(mixedArchiveDriver)
assert mountMixedArchiveResult.isOk()
@@ -997,7 +992,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
let
emptyServerKey = generateSecp256k1Key()
emptyServer =
- newTestWakuNode(emptyServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
+ newTestWakuNode(emptyServerKey, parseIpAddress("0.0.0.0"), Port(0))
mountEmptyArchiveResult = emptyServer.mountLegacyArchive(emptyArchiveDriver)
assert mountEmptyArchiveResult.isOk()
@@ -1028,7 +1023,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
let
voluminousServerKey = generateSecp256k1Key()
voluminousServer =
- newTestWakuNode(voluminousServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
+ newTestWakuNode(voluminousServerKey, parseIpAddress("0.0.0.0"), Port(0))
mountVoluminousArchiveResult =
voluminousServer.mountLegacyArchive(voluminousArchiveDriverWithMessages)
assert mountVoluminousArchiveResult.isOk()
diff --git a/tests/node/test_wakunode_lightpush.nim b/tests/node/test_wakunode_lightpush.nim
index 8d48c8cb7..dccb899af 100644
--- a/tests/node/test_wakunode_lightpush.nim
+++ b/tests/node/test_wakunode_lightpush.nim
@@ -2,7 +2,6 @@
import
std/[options, tempfiles],
- stew/shims/net as stewNet,
testutils/unittests,
chronos,
std/strformat,
@@ -40,8 +39,8 @@ suite "Waku Lightpush - End To End":
serverKey = generateSecp256k1Key()
clientKey = generateSecp256k1Key()
- server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
- client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
+ server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
+ client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
await allFutures(server.start(), client.start())
await server.start()
@@ -63,7 +62,7 @@ suite "Waku Lightpush - End To End":
asyncTest "Via 11/WAKU2-RELAY from Relay/Full Node":
# Given a light lightpush client
let lightpushClient =
- newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
+ newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
lightpushClient.mountLightpushClient()
# When the client publishes a message
@@ -123,8 +122,8 @@ suite "RLN Proofs as a Lightpush Service":
serverKey = generateSecp256k1Key()
clientKey = generateSecp256k1Key()
- server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
- client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
+ server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
+ client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
# mount rln-relay
let wakuRlnConfig = WakuRlnConfig(
@@ -156,7 +155,7 @@ suite "RLN Proofs as a Lightpush Service":
asyncTest "Message is published when RLN enabled":
# Given a light lightpush client
let lightpushClient =
- newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
+ newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
lightpushClient.mountLightPushClient()
# When the client publishes a message
diff --git a/tests/node/test_wakunode_peer_exchange.nim b/tests/node/test_wakunode_peer_exchange.nim
index 26837869d..3996be0dc 100644
--- a/tests/node/test_wakunode_peer_exchange.nim
+++ b/tests/node/test_wakunode_peer_exchange.nim
@@ -5,7 +5,6 @@ import
testutils/unittests,
chronos,
chronicles,
- stew/shims/net,
libp2p/switch,
libp2p/peerId,
libp2p/crypto/crypto,
diff --git a/tests/node/test_wakunode_peer_manager.nim b/tests/node/test_wakunode_peer_manager.nim
index 88fcc827f..0ef2b1a13 100644
--- a/tests/node/test_wakunode_peer_manager.nim
+++ b/tests/node/test_wakunode_peer_manager.nim
@@ -3,7 +3,6 @@
import
os,
std/[options, tables],
- stew/shims/net as stewNet,
testutils/unittests,
chronos,
# chronos/timer,
@@ -32,7 +31,7 @@ const DEFAULT_PROTOCOLS: seq[string] =
@["/ipfs/id/1.0.0", "/libp2p/autonat/1.0.0", "/libp2p/circuit/relay/0.2.0/hop"]
let
- listenIp = ValidIpAddress.init("0.0.0.0")
+ listenIp = parseIpAddress("0.0.0.0")
listenPort = Port(0)
suite "Peer Manager":
diff --git a/tests/node/test_wakunode_relay_rln.nim b/tests/node/test_wakunode_relay_rln.nim
index afc282d50..66866c4da 100644
--- a/tests/node/test_wakunode_relay_rln.nim
+++ b/tests/node/test_wakunode_relay_rln.nim
@@ -2,7 +2,6 @@
import
std/[tempfiles, strutils, options],
- stew/shims/net as stewNet,
stew/results,
testutils/unittests,
chronos,
@@ -121,8 +120,8 @@ suite "Waku RlnRelay - End to End - Static":
serverKey = generateSecp256k1Key()
clientKey = generateSecp256k1Key()
- server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
- client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
+ server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
+ client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
await allFutures(server.start(), client.start())
@@ -410,8 +409,8 @@ suite "Waku RlnRelay - End to End - OnChain":
serverKey = generateSecp256k1Key()
clientKey = generateSecp256k1Key()
- server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
- client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
+ server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
+ client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
await allFutures(server.start(), client.start())
diff --git a/tests/node/test_wakunode_sharding.nim b/tests/node/test_wakunode_sharding.nim
index bdd6859b9..5b99689be 100644
--- a/tests/node/test_wakunode_sharding.nim
+++ b/tests/node/test_wakunode_sharding.nim
@@ -1,16 +1,10 @@
{.used.}
-import
- std/[options, sequtils, tempfiles],
- testutils/unittests,
- chronos,
- chronicles,
- stew/shims/net as stewNet
+import std/[options, sequtils, tempfiles], testutils/unittests, chronos, chronicles
import
std/[sequtils, tempfiles],
stew/byteutils,
- stew/shims/net as stewNet,
testutils/unittests,
chronos,
libp2p/switch,
@@ -35,7 +29,7 @@ import
import waku_relay/protocol
const
- listenIp = ValidIpAddress.init("0.0.0.0")
+ listenIp = parseIpAddress("0.0.0.0")
listenPort = Port(0)
suite "Sharding":
diff --git a/tests/node/test_wakunode_store.nim b/tests/node/test_wakunode_store.nim
index 622322d92..00dbfb7ee 100644
--- a/tests/node/test_wakunode_store.nim
+++ b/tests/node/test_wakunode_store.nim
@@ -1,11 +1,6 @@
{.used.}
-import
- std/[options, sequtils, sets],
- stew/shims/net as stewNet,
- testutils/unittests,
- chronos,
- libp2p/crypto/crypto
+import std/[options, sequtils, sets], testutils/unittests, chronos, libp2p/crypto/crypto
import
waku/[
@@ -75,8 +70,8 @@ suite "Waku Store - End to End - Sorted Archive":
serverKey = generateSecp256k1Key()
clientKey = generateSecp256k1Key()
- server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
- client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
+ server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
+ client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
archiveDriver = newArchiveDriverWithMessages(pubsubTopic, messages)
let mountArchiveResult = server.mountArchive(archiveDriver)
@@ -480,7 +475,7 @@ suite "Waku Store - End to End - Sorted Archive":
)
otherServerKey = generateSecp256k1Key()
otherServer =
- newTestWakuNode(otherServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
+ newTestWakuNode(otherServerKey, parseIpAddress("0.0.0.0"), Port(0))
mountOtherArchiveResult =
otherServer.mountArchive(otherArchiveDriverWithMessages)
assert mountOtherArchiveResult.isOk()
@@ -571,8 +566,8 @@ suite "Waku Store - End to End - Unsorted Archive":
serverKey = generateSecp256k1Key()
clientKey = generateSecp256k1Key()
- server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
- client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
+ server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
+ client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
let
unsortedArchiveDriverWithMessages =
@@ -788,8 +783,8 @@ suite "Waku Store - End to End - Unsorted Archive without provided Timestamp":
serverKey = generateSecp256k1Key()
clientKey = generateSecp256k1Key()
- server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
- client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
+ server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
+ client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
let
unsortedArchiveDriverWithMessages =
@@ -938,8 +933,8 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
serverKey = generateSecp256k1Key()
clientKey = generateSecp256k1Key()
- server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
- client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
+ server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
+ client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
let archiveDriver = newSqliteArchiveDriver().put(pubsubTopic, messages[0 ..< 6]).put(
pubsubTopicB, messages[6 ..< 10]
@@ -1189,7 +1184,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
let
ephemeralServerKey = generateSecp256k1Key()
ephemeralServer =
- newTestWakuNode(ephemeralServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
+ newTestWakuNode(ephemeralServerKey, parseIpAddress("0.0.0.0"), Port(0))
mountEphemeralArchiveResult =
ephemeralServer.mountArchive(ephemeralArchiveDriver)
assert mountEphemeralArchiveResult.isOk()
@@ -1231,7 +1226,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
let
mixedServerKey = generateSecp256k1Key()
mixedServer =
- newTestWakuNode(mixedServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
+ newTestWakuNode(mixedServerKey, parseIpAddress("0.0.0.0"), Port(0))
mountMixedArchiveResult = mixedServer.mountArchive(mixedArchiveDriver)
assert mountMixedArchiveResult.isOk()
@@ -1258,7 +1253,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
let
emptyServerKey = generateSecp256k1Key()
emptyServer =
- newTestWakuNode(emptyServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
+ newTestWakuNode(emptyServerKey, parseIpAddress("0.0.0.0"), Port(0))
mountEmptyArchiveResult = emptyServer.mountArchive(emptyArchiveDriver)
assert mountEmptyArchiveResult.isOk()
@@ -1298,7 +1293,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
let
voluminousServerKey = generateSecp256k1Key()
voluminousServer =
- newTestWakuNode(voluminousServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
+ newTestWakuNode(voluminousServerKey, parseIpAddress("0.0.0.0"), Port(0))
mountVoluminousArchiveResult =
voluminousServer.mountArchive(voluminousArchiveDriverWithMessages)
assert mountVoluminousArchiveResult.isOk()
diff --git a/tests/test_peer_manager.nim b/tests/test_peer_manager.nim
index 1c2805710..9ef5ddd90 100644
--- a/tests/test_peer_manager.nim
+++ b/tests/test_peer_manager.nim
@@ -2,7 +2,6 @@
import
std/[sequtils, times, sugar, net],
- stew/shims/net as stewNet,
testutils/unittests,
chronos,
json_rpc/rpcserver,
@@ -39,7 +38,7 @@ procSuite "Peer Manager":
asyncTest "connectPeer() works":
# Create 2 nodes
let nodes = toSeq(0 ..< 2).mapIt(
- newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
+ newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
)
await allFutures(nodes.mapIt(it.start()))
@@ -58,7 +57,7 @@ procSuite "Peer Manager":
asyncTest "dialPeer() works":
# Create 2 nodes
let nodes = toSeq(0 ..< 2).mapIt(
- newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
+ newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
)
await allFutures(nodes.mapIt(it.start()))
@@ -93,7 +92,7 @@ procSuite "Peer Manager":
asyncTest "dialPeer() fails gracefully":
# Create 2 nodes and start them
let nodes = toSeq(0 ..< 2).mapIt(
- newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
+ newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
)
await allFutures(nodes.mapIt(it.start()))
await allFutures(nodes.mapIt(it.mountRelay()))
@@ -121,8 +120,7 @@ procSuite "Peer Manager":
asyncTest "Adding, selecting and filtering peers work":
let
- node =
- newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
+ node = newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
# Create filter peer
filterLoc = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet()
@@ -155,7 +153,7 @@ procSuite "Peer Manager":
asyncTest "Peer manager keeps track of connections":
# Create 2 nodes
let nodes = toSeq(0 ..< 2).mapIt(
- newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
+ newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
)
await allFutures(nodes.mapIt(it.start()))
@@ -208,7 +206,7 @@ procSuite "Peer Manager":
asyncTest "Peer manager updates failed peers correctly":
# Create 2 nodes
let nodes = toSeq(0 ..< 2).mapIt(
- newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
+ newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
)
await allFutures(nodes.mapIt(it.start()))
@@ -310,7 +308,7 @@ procSuite "Peer Manager":
# Simulate restart by initialising a new node using the same storage
let node3 = newTestWakuNode(
generateSecp256k1Key(),
- ValidIpAddress.init("127.0.0.1"),
+ parseIpAddress("127.0.0.1"),
Port(56037),
peerStorage = storage,
)
@@ -383,7 +381,7 @@ procSuite "Peer Manager":
# Simulate restart by initialising a new node using the same storage
let node3 = newTestWakuNode(
generateSecp256k1Key(),
- ValidIpAddress.init("127.0.0.1"),
+ parseIpAddress("127.0.0.1"),
Port(56037),
peerStorage = storage,
)
@@ -419,7 +417,7 @@ procSuite "Peer Manager":
# different network
node1 = newTestWakuNode(
generateSecp256k1Key(),
- ValidIpAddress.init("0.0.0.0"),
+ parseIpAddress("0.0.0.0"),
port,
clusterId = 3,
shards = @[uint16(0)],
@@ -428,14 +426,14 @@ procSuite "Peer Manager":
# same network
node2 = newTestWakuNode(
generateSecp256k1Key(),
- ValidIpAddress.init("0.0.0.0"),
+ parseIpAddress("0.0.0.0"),
port,
clusterId = 4,
shards = @[uint16(0)],
)
node3 = newTestWakuNode(
generateSecp256k1Key(),
- ValidIpAddress.init("0.0.0.0"),
+ parseIpAddress("0.0.0.0"),
port,
clusterId = 4,
shards = @[uint16(0)],
@@ -475,12 +473,12 @@ procSuite "Peer Manager":
storage = WakuPeerStorage.new(database)[]
node1 = newTestWakuNode(
generateSecp256k1Key(),
- ValidIpAddress.init("0.0.0.0"),
+ parseIpAddress("0.0.0.0"),
Port(0),
peerStorage = storage,
)
node2 =
- newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
+ newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
peerInfo2 = node2.switch.peerInfo
betaCodec = "/vac/waku/relay/2.0.0-beta2"
stableCodec = "/vac/waku/relay/2.0.0"
@@ -508,10 +506,7 @@ procSuite "Peer Manager":
# Simulate restart by initialising a new node using the same storage
let node3 = newTestWakuNode(
- generateSecp256k1Key(),
- ValidIpAddress.init("0.0.0.0"),
- Port(0),
- peerStorage = storage,
+ generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0), peerStorage = storage
)
(await node3.mountRelay()).isOkOr:
@@ -546,7 +541,7 @@ procSuite "Peer Manager":
let nodes = toSeq(0 ..< 4).mapIt(
newTestWakuNode(
nodeKey = generateSecp256k1Key(),
- bindIp = ValidIpAddress.init("0.0.0.0"),
+ bindIp = parseIpAddress("0.0.0.0"),
bindPort = Port(0),
wakuFlags = some(CapabilitiesBitfield.init(@[Relay])),
)
@@ -616,7 +611,7 @@ procSuite "Peer Manager":
let nodes = toSeq(0 ..< 4).mapIt(
newTestWakuNode(
nodeKey = generateSecp256k1Key(),
- bindIp = ValidIpAddress.init("0.0.0.0"),
+ bindIp = parseIpAddress("0.0.0.0"),
bindPort = Port(0),
wakuFlags = some(CapabilitiesBitfield.init(@[Relay])),
)
@@ -684,7 +679,7 @@ procSuite "Peer Manager":
asyncTest "Peer store keeps track of incoming connections":
# Create 4 nodes
let nodes = toSeq(0 ..< 4).mapIt(
- newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
+ newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
)
# Start them
@@ -778,8 +773,7 @@ procSuite "Peer Manager":
let basePeerId = "16Uiu2HAm7QGEZKujdSbbo1aaQyfDPQ6Bw3ybQnj6fruH5Dxwd7D"
let
- node =
- newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
+ node = newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
peers = toSeq(1 .. 4)
.mapIt(parsePeerInfo("/ip4/0.0.0.0/tcp/30300/p2p/" & basePeerId & $it))
.filterIt(it.isOk())
@@ -818,7 +812,7 @@ procSuite "Peer Manager":
asyncTest "connectedPeers() returns expected number of connections per protocol":
# Create 4 nodes
let nodes = toSeq(0 ..< 4).mapIt(
- newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
+ newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
)
# Start them with relay + filter
@@ -873,7 +867,7 @@ procSuite "Peer Manager":
asyncTest "getNumStreams() returns expected number of connections per protocol":
# Create 2 nodes
let nodes = toSeq(0 ..< 2).mapIt(
- newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
+ newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
)
# Start them with relay + filter
@@ -1140,7 +1134,7 @@ procSuite "Peer Manager":
asyncTest "colocationLimit is enforced by pruneConnsByIp()":
# Create 5 nodes
let nodes = toSeq(0 ..< 5).mapIt(
- newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
+ newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
)
# Start them with relay + filter
diff --git a/tests/test_relay_peer_exchange.nim b/tests/test_relay_peer_exchange.nim
index a729ff1a7..a5e3b63ee 100644
--- a/tests/test_relay_peer_exchange.nim
+++ b/tests/test_relay_peer_exchange.nim
@@ -2,7 +2,6 @@
import
std/[sequtils, options],
- stew/shims/net,
testutils/unittests,
chronos,
libp2p/peerid,
diff --git a/tests/test_waku_dnsdisc.nim b/tests/test_waku_dnsdisc.nim
index 7028b20eb..758bdb3ca 100644
--- a/tests/test_waku_dnsdisc.nim
+++ b/tests/test_waku_dnsdisc.nim
@@ -2,7 +2,6 @@
import
std/[sequtils, tables],
- stew/shims/net,
results,
stew/base32,
testutils/unittests,
diff --git a/tests/test_waku_keepalive.nim b/tests/test_waku_keepalive.nim
index d4d05ad97..3fcf01b8e 100644
--- a/tests/test_waku_keepalive.nim
+++ b/tests/test_waku_keepalive.nim
@@ -1,7 +1,6 @@
{.used.}
import
- stew/shims/net as stewNet,
testutils/unittests,
chronos,
chronicles,
diff --git a/tests/test_waku_metadata.nim b/tests/test_waku_metadata.nim
index aa22a43f4..b30fd1712 100644
--- a/tests/test_waku_metadata.nim
+++ b/tests/test_waku_metadata.nim
@@ -5,7 +5,6 @@ import
testutils/unittests,
chronos,
chronicles,
- stew/shims/net,
libp2p/switch,
libp2p/peerId,
libp2p/crypto/crypto,
diff --git a/tests/test_waku_switch.nim b/tests/test_waku_switch.nim
index e58bff12e..3e6fd08eb 100644
--- a/tests/test_waku_switch.nim
+++ b/tests/test_waku_switch.nim
@@ -75,8 +75,15 @@ suite "Waku Switch":
completionFut = newFuture[bool]()
proto = new LPProtocol
proto.codec = customProtoCodec
- proto.handler = proc(conn: Connection, proto: string) {.async.} =
- assert (await conn.readLp(1024)) == msg.toBytes()
+ proto.handler = proc(
+ conn: Connection, proto: string
+ ) {.async: (raises: [CancelledError]).} =
+ try:
+ assert (await conn.readLp(1024)) == msg.toBytes()
+ except LPStreamError:
+ error "Connection read error", error = getCurrentExceptionMsg()
+ assert false, getCurrentExceptionMsg()
+
completionFut.complete(true)
await proto.start()
diff --git a/tests/test_wakunode.nim b/tests/test_wakunode.nim
index 51dd999b0..e50f3fe98 100644
--- a/tests/test_wakunode.nim
+++ b/tests/test_wakunode.nim
@@ -3,7 +3,6 @@
import
std/[sequtils, strutils, net],
stew/byteutils,
- stew/shims/net as stewNet,
testutils/unittests,
chronicles,
chronos,
@@ -15,7 +14,8 @@ import
libp2p/protocols/pubsub/pubsub,
libp2p/protocols/pubsub/gossipsub,
libp2p/nameresolving/mockresolver,
- eth/p2p/discoveryv5/enr
+ eth/p2p/discoveryv5/enr,
+ eth/net/utils
import
waku/[waku_core, waku_node, node/peer_manager], ./testlib/wakucore, ./testlib/wakunode
diff --git a/tests/testlib/wakunode.nim b/tests/testlib/wakunode.nim
index 87fdbcf5f..54719aac1 100644
--- a/tests/testlib/wakunode.nim
+++ b/tests/testlib/wakunode.nim
@@ -1,7 +1,6 @@
import
std/options,
results,
- stew/shims/net,
chronos,
libp2p/switch,
libp2p/builders,
diff --git a/tests/waku_core/test_published_address.nim b/tests/waku_core/test_published_address.nim
index 37f263ea0..9d6201a77 100644
--- a/tests/waku_core/test_published_address.nim
+++ b/tests/waku_core/test_published_address.nim
@@ -1,6 +1,6 @@
{.used.}
-import stew/shims/net as stewNet, std/strutils, testutils/unittests
+import std/[strutils, net], testutils/unittests
import ../testlib/wakucore, ../testlib/wakunode
suite "Waku Core - Published Address":
diff --git a/tests/waku_discv5/utils.nim b/tests/waku_discv5/utils.nim
index 422e13fd9..5a69108c5 100644
--- a/tests/waku_discv5/utils.nim
+++ b/tests/waku_discv5/utils.nim
@@ -1,9 +1,4 @@
-import
- std/options,
- stew/shims/net,
- chronos,
- libp2p/crypto/crypto as libp2p_keys,
- eth/keys as eth_keys
+import std/options, chronos, libp2p/crypto/crypto as libp2p_keys, eth/keys as eth_keys
import
waku/
diff --git a/tests/waku_enr/test_sharding.nim b/tests/waku_enr/test_sharding.nim
index 7c65d83fb..0984b7d8d 100644
--- a/tests/waku_enr/test_sharding.nim
+++ b/tests/waku_enr/test_sharding.nim
@@ -2,7 +2,6 @@
import
stew/results,
- stew/shims/net,
chronos,
testutils/unittests,
libp2p/crypto/crypto as libp2p_keys,
diff --git a/tests/waku_enr/utils.nim b/tests/waku_enr/utils.nim
index 8f79b1d8f..7302c2112 100644
--- a/tests/waku_enr/utils.nim
+++ b/tests/waku_enr/utils.nim
@@ -2,7 +2,6 @@ import
std/options,
sequtils,
results,
- stew/shims/net,
chronos,
libp2p/crypto/crypto as libp2p_keys,
eth/keys as eth_keys
diff --git a/tests/waku_relay/test_message_id.nim b/tests/waku_relay/test_message_id.nim
index 633303120..6dcd72ab7 100644
--- a/tests/waku_relay/test_message_id.nim
+++ b/tests/waku_relay/test_message_id.nim
@@ -1,7 +1,7 @@
import
unittest,
results,
- stew/[shims/net, byteutils],
+ stew/byteutils,
nimcrypto/sha2,
libp2p/protocols/pubsub/rpc/messages
diff --git a/tests/waku_relay/test_protocol.nim b/tests/waku_relay/test_protocol.nim
index d0e8a7ed6..bc2097caa 100644
--- a/tests/waku_relay/test_protocol.nim
+++ b/tests/waku_relay/test_protocol.nim
@@ -2,7 +2,6 @@
import
std/[options, strformat],
- stew/shims/net as stewNet,
testutils/unittests,
chronos,
libp2p/protocols/pubsub/[pubsub, gossipsub],
diff --git a/tests/waku_relay/test_wakunode_relay.nim b/tests/waku_relay/test_wakunode_relay.nim
index 5d5ce8458..c9c8d82ef 100644
--- a/tests/waku_relay/test_wakunode_relay.nim
+++ b/tests/waku_relay/test_wakunode_relay.nim
@@ -3,7 +3,6 @@
import
std/[os, sequtils, sysrand, math],
stew/byteutils,
- stew/shims/net as stewNet,
testutils/unittests,
chronos,
libp2p/switch,
diff --git a/tests/waku_relay/utils.nim b/tests/waku_relay/utils.nim
index 6de28583e..309f800dc 100644
--- a/tests/waku_relay/utils.nim
+++ b/tests/waku_relay/utils.nim
@@ -3,7 +3,6 @@
import
std/[strutils, sequtils, tempfiles],
stew/byteutils,
- stew/shims/net as stewNet,
chronos,
chronicles,
libp2p/switch,
diff --git a/tests/waku_rln_relay/rln/test_wrappers.nim b/tests/waku_rln_relay/rln/test_wrappers.nim
index 26e18f9da..f19599e4f 100644
--- a/tests/waku_rln_relay/rln/test_wrappers.nim
+++ b/tests/waku_rln_relay/rln/test_wrappers.nim
@@ -3,7 +3,6 @@ import
testutils/unittests,
chronicles,
chronos,
- stew/shims/net as stewNet,
eth/keys,
bearssl,
stew/[results],
diff --git a/tests/waku_rln_relay/test_rln_group_manager_onchain.nim b/tests/waku_rln_relay/test_rln_group_manager_onchain.nim
index 7ba64e39b..b19d15030 100644
--- a/tests/waku_rln_relay/test_rln_group_manager_onchain.nim
+++ b/tests/waku_rln_relay/test_rln_group_manager_onchain.nim
@@ -57,19 +57,21 @@ suite "Onchain group manager":
raiseAssert "Expected error when chainId does not match"
asyncTest "should initialize when chainId is set to 0":
- manager.chainId = 0
+ manager.chainId = 0x0'u256
(await manager.init()).isOkOr:
raiseAssert $error
asyncTest "should error on initialization when loaded metadata does not match":
(await manager.init()).isOkOr:
- raiseAssert $error
+ assert false, $error
let metadataSetRes = manager.setMetadata()
assert metadataSetRes.isOk(), metadataSetRes.error
let metadataOpt = manager.rlnInstance.getMetadata().valueOr:
- raiseAssert $error
+ assert false, $error
+ return
+
assert metadataOpt.isSome(), "metadata is not set"
let metadata = metadataOpt.get()
@@ -84,17 +86,12 @@ suite "Onchain group manager":
ethContractAddress: $differentContractAddress,
rlnInstance: manager.rlnInstance,
onFatalErrorAction: proc(errStr: string) =
- raiseAssert errStr
+ assert false, errStr
,
)
let e = await manager2.init()
(e).isErrOr:
- raiseAssert "Expected error when contract address doesn't match"
-
- echo "---"
- discard "persisted data: contract address mismatch"
- echo e.error
- echo "---"
+ assert false, "Expected error when contract address doesn't match"
asyncTest "should error if contract does not exist":
manager.ethContractAddress = "0x0000000000000000000000000000000000000000"
diff --git a/tests/waku_rln_relay/test_rln_group_manager_static.nim b/tests/waku_rln_relay/test_rln_group_manager_static.nim
index 5d1916f63..73dff8a8b 100644
--- a/tests/waku_rln_relay/test_rln_group_manager_static.nim
+++ b/tests/waku_rln_relay/test_rln_group_manager_static.nim
@@ -13,7 +13,7 @@ import
waku_rln_relay/group_manager/static/group_manager,
]
-import stew/shims/net, chronos, libp2p/crypto/crypto, eth/keys, dnsdisc/builder
+import chronos, libp2p/crypto/crypto, eth/keys, dnsdisc/builder
import std/tempfiles
diff --git a/tests/waku_rln_relay/test_waku_rln_relay.nim b/tests/waku_rln_relay/test_waku_rln_relay.nim
index e89d8e2e6..d09764ca2 100644
--- a/tests/waku_rln_relay/test_waku_rln_relay.nim
+++ b/tests/waku_rln_relay/test_waku_rln_relay.nim
@@ -3,7 +3,6 @@
import
std/[options, os, sequtils, tempfiles],
stew/byteutils,
- stew/shims/net as stewNet,
testutils/unittests,
chronos,
chronicles,
@@ -247,7 +246,7 @@ suite "Waku rln relay":
.setMetadata(
RlnMetadata(
lastProcessedBlock: 128,
- chainId: 1155511,
+ chainId: 1155511'u256,
contractAddress: "0x9c09146844c1326c2dbc41c451766c7138f88155",
)
)
@@ -265,7 +264,7 @@ suite "Waku rln relay":
.setMetadata(
RlnMetadata(
lastProcessedBlock: 128,
- chainId: 1155511,
+ chainId: 1155511'u256,
contractAddress: "0x9c09146844c1326c2dbc41c451766c7138f88155",
)
)
@@ -278,7 +277,7 @@ suite "Waku rln relay":
let metadata = metadataOpt.get()
check:
metadata.lastProcessedBlock == 128
- metadata.chainId == 1155511
+ metadata.chainId == 1155511'u256
metadata.contractAddress == "0x9c09146844c1326c2dbc41c451766c7138f88155"
test "getMetadata: empty rln metadata":
diff --git a/tests/waku_rln_relay/test_wakunode_rln_relay.nim b/tests/waku_rln_relay/test_wakunode_rln_relay.nim
index 7620dfc14..312fe5cfc 100644
--- a/tests/waku_rln_relay/test_wakunode_rln_relay.nim
+++ b/tests/waku_rln_relay/test_wakunode_rln_relay.nim
@@ -3,7 +3,6 @@
import
std/[sequtils, tempfiles],
stew/byteutils,
- stew/shims/net as stewNet,
testutils/unittests,
chronicles,
chronos,
diff --git a/tests/waku_rln_relay/utils_onchain.nim b/tests/waku_rln_relay/utils_onchain.nim
index 433f865c4..0c7fcce26 100644
--- a/tests/waku_rln_relay/utils_onchain.nim
+++ b/tests/waku_rln_relay/utils_onchain.nim
@@ -13,6 +13,7 @@ import
web3,
web3/conversions,
web3/eth_api_types,
+ json_rpc/rpcclient,
json,
libp2p/crypto/crypto,
eth/keys,
@@ -29,7 +30,7 @@ import
../testlib/common,
./utils
-const CHAIN_ID* = 1337
+const CHAIN_ID* = 1337'u256
template skip0xPrefix(hexStr: string): int =
## Returns the index of the first meaningful char in `hexStr` by skipping
@@ -74,7 +75,8 @@ proc uploadRLNContract*(ethClientAddress: string): Future[Address] {.async.} =
let add = web3.defaultAccount
debug "contract deployer account address ", add
- let balance = await web3.provider.eth_getBalance(web3.defaultAccount, "latest")
+ let balance =
+ await web3.provider.eth_getBalance(web3.defaultAccount, blockId("latest"))
debug "Initial account balance: ", balance
# deploy poseidon hasher bytecode
diff --git a/tests/waku_rln_relay/utils_static.nim b/tests/waku_rln_relay/utils_static.nim
index 8f564beb1..cbf3e9253 100644
--- a/tests/waku_rln_relay/utils_static.nim
+++ b/tests/waku_rln_relay/utils_static.nim
@@ -3,7 +3,6 @@
import
std/[sequtils, tempfiles],
stew/byteutils,
- stew/shims/net as stewNet,
chronos,
chronicles,
libp2p/switch,
diff --git a/tests/waku_store/test_wakunode_store.nim b/tests/waku_store/test_wakunode_store.nim
index 1d5e4dcfd..b20309079 100644
--- a/tests/waku_store/test_wakunode_store.nim
+++ b/tests/waku_store/test_wakunode_store.nim
@@ -2,7 +2,6 @@
import
std/sequtils,
- stew/shims/net as stewNet,
testutils/unittests,
chronicles,
chronos,
diff --git a/tests/wakunode2/test_app.nim b/tests/wakunode2/test_app.nim
index 2ee933e3f..4f52732da 100644
--- a/tests/wakunode2/test_app.nim
+++ b/tests/wakunode2/test_app.nim
@@ -1,7 +1,6 @@
{.used.}
import
- stew/shims/net,
testutils/unittests,
chronicles,
chronos,
diff --git a/tests/wakunode2/test_validators.nim b/tests/wakunode2/test_validators.nim
index 23a3e5d6f..44b6ae118 100644
--- a/tests/wakunode2/test_validators.nim
+++ b/tests/wakunode2/test_validators.nim
@@ -2,7 +2,6 @@
import
std/[sequtils, sysrand, math],
- stew/shims/net as stewNet,
testutils/unittests,
chronos,
libp2p/crypto/crypto,
diff --git a/tests/wakunode_rest/test_rest_cors.nim b/tests/wakunode_rest/test_rest_cors.nim
index 7d29711b1..58e70aa25 100644
--- a/tests/wakunode_rest/test_rest_cors.nim
+++ b/tests/wakunode_rest/test_rest_cors.nim
@@ -1,7 +1,6 @@
{.used.}
import
- stew/shims/net,
testutils/unittests,
presto,
presto/client as presto_client,
diff --git a/tests/wakunode_rest/test_rest_debug.nim b/tests/wakunode_rest/test_rest_debug.nim
index 3129b3544..9add57cbe 100644
--- a/tests/wakunode_rest/test_rest_debug.nim
+++ b/tests/wakunode_rest/test_rest_debug.nim
@@ -1,7 +1,6 @@
{.used.}
import
- stew/shims/net,
testutils/unittests,
presto,
presto/client as presto_client,
diff --git a/tests/wakunode_rest/test_rest_filter.nim b/tests/wakunode_rest/test_rest_filter.nim
index 556b6b52e..dcd430a0e 100644
--- a/tests/wakunode_rest/test_rest_filter.nim
+++ b/tests/wakunode_rest/test_rest_filter.nim
@@ -3,7 +3,6 @@
import
chronos/timer,
stew/byteutils,
- stew/shims/net,
testutils/unittests,
presto,
presto/client as presto_client,
diff --git a/tests/wakunode_rest/test_rest_health.nim b/tests/wakunode_rest/test_rest_health.nim
index 3c7c94e87..1f6c6314f 100644
--- a/tests/wakunode_rest/test_rest_health.nim
+++ b/tests/wakunode_rest/test_rest_health.nim
@@ -2,7 +2,6 @@
import
std/tempfiles,
- stew/shims/net,
testutils/unittests,
presto,
presto/client as presto_client,
diff --git a/tests/wakunode_rest/test_rest_lightpush.nim b/tests/wakunode_rest/test_rest_lightpush.nim
index 72e309a13..035b2a884 100644
--- a/tests/wakunode_rest/test_rest_lightpush.nim
+++ b/tests/wakunode_rest/test_rest_lightpush.nim
@@ -3,7 +3,6 @@
import
std/sequtils,
stew/byteutils,
- stew/shims/net,
testutils/unittests,
presto,
presto/client as presto_client,
diff --git a/tests/wakunode_rest/test_rest_lightpush_legacy.nim b/tests/wakunode_rest/test_rest_lightpush_legacy.nim
index e1d6dca30..f50703bae 100644
--- a/tests/wakunode_rest/test_rest_lightpush_legacy.nim
+++ b/tests/wakunode_rest/test_rest_lightpush_legacy.nim
@@ -3,7 +3,6 @@
import
std/sequtils,
stew/byteutils,
- stew/shims/net,
testutils/unittests,
presto,
presto/client as presto_client,
diff --git a/tests/wakunode_rest/test_rest_relay.nim b/tests/wakunode_rest/test_rest_relay.nim
index 8bca121e3..208b86190 100644
--- a/tests/wakunode_rest/test_rest_relay.nim
+++ b/tests/wakunode_rest/test_rest_relay.nim
@@ -3,7 +3,6 @@
import
std/[sequtils, strformat, tempfiles],
stew/byteutils,
- stew/shims/net,
testutils/unittests,
presto,
presto/client as presto_client,
@@ -320,7 +319,7 @@ suite "Waku v2 Rest API - Relay":
check:
# Node should be subscribed to all shards
node.wakuRelay.subscribedTopics ==
- @["/waku/2/rs/1/7", "/waku/2/rs/1/2", "/waku/2/rs/1/5"]
+ @["/waku/2/rs/1/5", "/waku/2/rs/1/7", "/waku/2/rs/1/2"]
await restServer.stop()
await restServer.closeWait()
diff --git a/tests/wakunode_rest/test_rest_store.nim b/tests/wakunode_rest/test_rest_store.nim
index d0631bfbf..f08ed0a17 100644
--- a/tests/wakunode_rest/test_rest_store.nim
+++ b/tests/wakunode_rest/test_rest_store.nim
@@ -2,7 +2,6 @@
import
std/[options, sugar],
- stew/shims/net as stewNet,
chronicles,
chronos/timer,
testutils/unittests,
diff --git a/tools/rln_keystore_generator/rln_keystore_generator.nim b/tools/rln_keystore_generator/rln_keystore_generator.nim
index cd501e52d..ee5911abf 100644
--- a/tools/rln_keystore_generator/rln_keystore_generator.nim
+++ b/tools/rln_keystore_generator/rln_keystore_generator.nim
@@ -20,7 +20,7 @@ type RlnKeystoreGeneratorConf* = object
execute*: bool
ethContractAddress*: string
ethClientUrls*: seq[string]
- chainId*: uint
+ chainId*: UInt256
credPath*: string
credPassword*: string
userMessageLimit*: uint64
diff --git a/vendor/nim-chronicles b/vendor/nim-chronicles
index 81a4a7a36..a8fb38a10 160000
--- a/vendor/nim-chronicles
+++ b/vendor/nim-chronicles
@@ -1 +1 @@
-Subproject commit 81a4a7a360c78be9c80c8f735c76b6d4a1517304
+Subproject commit a8fb38a10bcb548df78e9a70bd77b26bb50abd12
diff --git a/vendor/nim-chronos b/vendor/nim-chronos
index c04576d82..0646c444f 160000
--- a/vendor/nim-chronos
+++ b/vendor/nim-chronos
@@ -1 +1 @@
-Subproject commit c04576d829b8a0a1b12baaa8bc92037501b3a4a0
+Subproject commit 0646c444fce7c7ed08ef6f2c9a7abfd172ffe655
diff --git a/vendor/nim-eth b/vendor/nim-eth
index c6c9dc7ae..a1f7d63ab 160000
--- a/vendor/nim-eth
+++ b/vendor/nim-eth
@@ -1 +1 @@
-Subproject commit c6c9dc7ae01656eba8126b913e84bdfb95c8c323
+Subproject commit a1f7d63ababa6ce90798e16a110fc4e43ac93f03
diff --git a/vendor/nim-faststreams b/vendor/nim-faststreams
index 2b08c774a..c51315d0a 160000
--- a/vendor/nim-faststreams
+++ b/vendor/nim-faststreams
@@ -1 +1 @@
-Subproject commit 2b08c774afaafd600cf4c6f994cf78b8aa090c0c
+Subproject commit c51315d0ae5eb2594d0bf41181d0e1aca1b3c01d
diff --git a/vendor/nim-json-rpc b/vendor/nim-json-rpc
index c0ac84873..cbe8edf69 160000
--- a/vendor/nim-json-rpc
+++ b/vendor/nim-json-rpc
@@ -1 +1 @@
-Subproject commit c0ac848733e42e672081f429fb146451894f7711
+Subproject commit cbe8edf69d743a787b76b1cd25bfc4eae89927f7
diff --git a/vendor/nim-libbacktrace b/vendor/nim-libbacktrace
index dbade9ba2..822849874 160000
--- a/vendor/nim-libbacktrace
+++ b/vendor/nim-libbacktrace
@@ -1 +1 @@
-Subproject commit dbade9ba250da7db519c5cdfb225d03ca1255efc
+Subproject commit 822849874926ba3849a86cb3eafdf017bd11bd2d
diff --git a/vendor/nim-libp2p b/vendor/nim-libp2p
index 78a434405..cd60b254a 160000
--- a/vendor/nim-libp2p
+++ b/vendor/nim-libp2p
@@ -1 +1 @@
-Subproject commit 78a434405435b69a24e8b263d48d622d57c4db5b
+Subproject commit cd60b254a0700b0daac7a6cb2c0c48860b57c539
diff --git a/vendor/nim-metrics b/vendor/nim-metrics
index 25ffd054f..11d0cddfb 160000
--- a/vendor/nim-metrics
+++ b/vendor/nim-metrics
@@ -1 +1 @@
-Subproject commit 25ffd054fd774f8cf7935e75d6cad542306d7802
+Subproject commit 11d0cddfb0e711aa2a8c75d1892ae24a64c299fc
diff --git a/vendor/nim-nat-traversal b/vendor/nim-nat-traversal
index 213ac13df..dfbf8c9ad 160000
--- a/vendor/nim-nat-traversal
+++ b/vendor/nim-nat-traversal
@@ -1 +1 @@
-Subproject commit 213ac13dfe5c4830474912c48181b86b73f1ec1f
+Subproject commit dfbf8c9ad3655f238b350f690bbfce5ec34d25fb
diff --git a/vendor/nim-regex b/vendor/nim-regex
index 0673df07c..4593305ed 160000
--- a/vendor/nim-regex
+++ b/vendor/nim-regex
@@ -1 +1 @@
-Subproject commit 0673df07cb266e15942c3b5f5b8a4732f049cd73
+Subproject commit 4593305ed1e49731fc75af1dc572dd2559aad19c
diff --git a/vendor/nim-secp256k1 b/vendor/nim-secp256k1
index 62e16b4df..f808ed5e7 160000
--- a/vendor/nim-secp256k1
+++ b/vendor/nim-secp256k1
@@ -1 +1 @@
-Subproject commit 62e16b4dff513f1eea7148a8cbba8a8c547b9546
+Subproject commit f808ed5e7a7bfc42204ec7830f14b7a42b63c284
diff --git a/vendor/nim-sqlite3-abi b/vendor/nim-sqlite3-abi
index cc4fefd53..d08e96487 160000
--- a/vendor/nim-sqlite3-abi
+++ b/vendor/nim-sqlite3-abi
@@ -1 +1 @@
-Subproject commit cc4fefd538aa43814c5864c540fb75b567c2dcc3
+Subproject commit d08e964872271e83fb1b6de67ad57c2d0fcdfe63
diff --git a/vendor/nim-stew b/vendor/nim-stew
index 687d1b4ab..58abb4891 160000
--- a/vendor/nim-stew
+++ b/vendor/nim-stew
@@ -1 +1 @@
-Subproject commit 687d1b4ab1a91e6cc9c92e4fd4d98bec7874c259
+Subproject commit 58abb4891f97c6cdc07335e868414e0c7b736c68
diff --git a/vendor/nim-taskpools b/vendor/nim-taskpools
index 7b74a716a..9e8ccc754 160000
--- a/vendor/nim-taskpools
+++ b/vendor/nim-taskpools
@@ -1 +1 @@
-Subproject commit 7b74a716a40249720fd7da428113147942b9642d
+Subproject commit 9e8ccc754631ac55ac2fd495e167e74e86293edb
diff --git a/vendor/nim-testutils b/vendor/nim-testutils
index 14a56ae5a..94d68e796 160000
--- a/vendor/nim-testutils
+++ b/vendor/nim-testutils
@@ -1 +1 @@
-Subproject commit 14a56ae5aada81bed43e29d2368fc8ab8a449bf5
+Subproject commit 94d68e796c045d5b37cabc6be32d7bfa168f8857
diff --git a/vendor/nim-unittest2 b/vendor/nim-unittest2
index 88a613ffa..8b51e99b4 160000
--- a/vendor/nim-unittest2
+++ b/vendor/nim-unittest2
@@ -1 +1 @@
-Subproject commit 88a613ffa4dbe452971beb937ea2db736dc9a9f4
+Subproject commit 8b51e99b4a57fcfb31689230e75595f024543024
diff --git a/vendor/nim-web3 b/vendor/nim-web3
index 94aac8a77..3ef986c9d 160000
--- a/vendor/nim-web3
+++ b/vendor/nim-web3
@@ -1 +1 @@
-Subproject commit 94aac8a77cd265fe779ce8ed25a028340b925fd1
+Subproject commit 3ef986c9d93604775595f116a35c6ac0bf5257fc
diff --git a/vendor/nim-zlib b/vendor/nim-zlib
index 3f7998095..daa8723fd 160000
--- a/vendor/nim-zlib
+++ b/vendor/nim-zlib
@@ -1 +1 @@
-Subproject commit 3f7998095264d262a8d99e2be89045e6d9301537
+Subproject commit daa8723fd32299d4ca621c837430c29a5a11e19a
diff --git a/vendor/nimbus-build-system b/vendor/nimbus-build-system
index 8fafcd0ba..0be0663e1 160000
--- a/vendor/nimbus-build-system
+++ b/vendor/nimbus-build-system
@@ -1 +1 @@
-Subproject commit 8fafcd0bac9f409091b7bcaee62ab6330f57441e
+Subproject commit 0be0663e1af76e869837226a4ef3e586fcc737d3
diff --git a/vendor/nimcrypto b/vendor/nimcrypto
index dc07e3058..19c41d6be 160000
--- a/vendor/nimcrypto
+++ b/vendor/nimcrypto
@@ -1 +1 @@
-Subproject commit dc07e3058c6904eef965394493b6ea99aa2adefc
+Subproject commit 19c41d6be4c00b4a2c8000583bd30cf8ceb5f4b1
diff --git a/vendor/nph b/vendor/nph
index 0d8000e74..3191cc71f 160000
--- a/vendor/nph
+++ b/vendor/nph
@@ -1 +1 @@
-Subproject commit 0d8000e741fa11ed48fdd116f24b4251b92aa9b5
+Subproject commit 3191cc71f4d49473de6cf73a2680009a92419407
diff --git a/waku.nimble b/waku.nimble
index 9cf73295f..6ec05caaf 100644
--- a/waku.nimble
+++ b/waku.nimble
@@ -8,7 +8,7 @@ license = "MIT or Apache License 2.0"
#bin = @["build/waku"]
### Dependencies
-requires "nim >= 2.0.8",
+requires "nim >= 2.2.4",
"chronicles",
"confutils",
"chronos",
diff --git a/waku/discovery/autonat_service.nim b/waku/discovery/autonat_service.nim
index c4e2dd8ed..efc3de561 100644
--- a/waku/discovery/autonat_service.nim
+++ b/waku/discovery/autonat_service.nim
@@ -26,7 +26,7 @@ proc getAutonatService*(rng: ref HmacDrbgContext): AutonatService =
proc statusAndConfidenceHandler(
networkReachability: NetworkReachability, confidence: Opt[float]
- ): Future[void] {.async.} =
+ ): Future[void] {.gcsafe, async: (raises: [CancelledError]).} =
if confidence.isSome():
info "Peer reachability status",
networkReachability = networkReachability, confidence = confidence.get()
diff --git a/waku/factory/conf_builder/rln_relay_conf_builder.nim b/waku/factory/conf_builder/rln_relay_conf_builder.nim
index ea87eb278..455f0a57f 100644
--- a/waku/factory/conf_builder/rln_relay_conf_builder.nim
+++ b/waku/factory/conf_builder/rln_relay_conf_builder.nim
@@ -1,4 +1,4 @@
-import chronicles, std/options, results
+import chronicles, std/options, results, stint, stew/endians2
import ../waku_conf
logScope:
@@ -9,7 +9,7 @@ logScope:
##############################
type RlnRelayConfBuilder* = object
enabled*: Option[bool]
- chainId*: Option[uint]
+ chainId*: Option[UInt256]
ethClientUrls*: Option[seq[string]]
ethContractAddress*: Option[string]
credIndex*: Option[uint]
@@ -26,8 +26,11 @@ proc init*(T: type RlnRelayConfBuilder): RlnRelayConfBuilder =
proc withEnabled*(b: var RlnRelayConfBuilder, enabled: bool) =
b.enabled = some(enabled)
-proc withChainId*(b: var RlnRelayConfBuilder, chainId: uint) =
- b.chainId = some(chainId)
+proc withChainId*(b: var RlnRelayConfBuilder, chainId: uint | UInt256) =
+ when chainId is uint:
+ b.chainId = some(UInt256.fromBytesBE(chainId.toBytesBE()))
+ else:
+ b.chainId = some(chainId)
proc withCredIndex*(b: var RlnRelayConfBuilder, credIndex: uint) =
b.credIndex = some(credIndex)
diff --git a/waku/factory/conf_builder/waku_conf_builder.nim b/waku/factory/conf_builder/waku_conf_builder.nim
index 44cb706af..ab53a965f 100644
--- a/waku/factory/conf_builder/waku_conf_builder.nim
+++ b/waku/factory/conf_builder/waku_conf_builder.nim
@@ -2,6 +2,7 @@ import
libp2p/crypto/crypto,
libp2p/multiaddress,
std/[net, options, sequtils, strutils],
+ stint,
chronicles,
chronos,
results
@@ -292,7 +293,7 @@ proc nodeKey(
proc applyClusterConf(builder: var WakuConfBuilder) =
# Apply cluster conf, overrides most values passed individually
# If you want to tweak values, don't use clusterConf
- if builder.clusterConf.isNone:
+ if builder.clusterConf.isNone():
return
let clusterConf = builder.clusterConf.get()
@@ -417,7 +418,7 @@ proc build*(
warn("Cluster Id was not specified, defaulting to 0")
0.uint16
else:
- builder.clusterId.get()
+ builder.clusterId.get().uint16
let numShardsInNetwork =
if builder.numShardsInNetwork.isSome():
diff --git a/waku/factory/external_config.nim b/waku/factory/external_config.nim
index 800b61e63..2b156fc8e 100644
--- a/waku/factory/external_config.nim
+++ b/waku/factory/external_config.nim
@@ -4,6 +4,8 @@ import
chronicles,
chronos,
regex,
+ stew/endians2,
+ stint,
confutils,
confutils/defs,
confutils/std/net,
@@ -867,7 +869,7 @@ proc defaultWakuNodeConf*(): ConfResult[WakuNodeConf] =
proc toKeystoreGeneratorConf*(n: WakuNodeConf): RlnKeystoreGeneratorConf =
RlnKeystoreGeneratorConf(
execute: n.execute,
- chainId: n.rlnRelayChainId,
+ chainId: UInt256.fromBytesBE(n.rlnRelayChainId.toBytesBE()),
ethClientUrls: n.ethClientUrls.mapIt(string(it)),
ethContractAddress: n.rlnRelayEthContractAddress,
userMessageLimit: n.rlnRelayUserMessageLimit,
diff --git a/waku/factory/networks_config.nim b/waku/factory/networks_config.nim
index aceada3fe..8b5f4a628 100644
--- a/waku/factory/networks_config.nim
+++ b/waku/factory/networks_config.nim
@@ -1,5 +1,7 @@
{.push raises: [].}
+import stint
+
# TODO: Rename this type to match file name
type ClusterConf* = object
@@ -7,7 +9,7 @@ type ClusterConf* = object
clusterId*: uint16
rlnRelay*: bool
rlnRelayEthContractAddress*: string
- rlnRelayChainId*: uint
+ rlnRelayChainId*: UInt256
rlnRelayDynamic*: bool
rlnEpochSizeSec*: uint64
rlnRelayUserMessageLimit*: uint64
@@ -20,13 +22,14 @@ type ClusterConf* = object
# Cluster configuration corresponding to The Waku Network. Note that it
# overrides existing cli configuration
proc TheWakuNetworkConf*(T: type ClusterConf): ClusterConf =
+ const RelayChainId = 11155111'u256
return ClusterConf(
maxMessageSize: "150KiB",
clusterId: 1,
rlnRelay: true,
rlnRelayEthContractAddress: "0xfe7a9eabcE779a090FD702346Fd0bFAc02ce6Ac8",
rlnRelayDynamic: true,
- rlnRelayChainId: 11155111,
+ rlnRelayChainId: RelayChainId,
rlnEpochSizeSec: 600,
rlnRelayUserMessageLimit: 100,
numShardsInNetwork: 8,
diff --git a/waku/node/peer_manager/peer_manager.nim b/waku/node/peer_manager/peer_manager.nim
index 75c72449a..40a13b601 100644
--- a/waku/node/peer_manager/peer_manager.nim
+++ b/waku/node/peer_manager/peer_manager.nim
@@ -1103,8 +1103,13 @@ proc new*(
online: true,
)
- proc peerHook(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe.} =
- onPeerEvent(pm, peerId, event)
+ proc peerHook(
+ peerId: PeerId, event: PeerEvent
+ ): Future[void] {.gcsafe, async: (raises: [CancelledError]).} =
+ try:
+ await onPeerEvent(pm, peerId, event)
+ except CatchableError:
+ error "exception in onPeerEvent", error = getCurrentExceptionMsg()
var peerStore = pm.switch.peerStore
diff --git a/waku/node/waku_node.nim b/waku/node/waku_node.nim
index e38b1e795..152c7125d 100644
--- a/waku/node/waku_node.nim
+++ b/waku/node/waku_node.nim
@@ -1475,7 +1475,7 @@ proc start*(node: WakuNode) {.async.} =
## with announced addrs after start
let addressMapper = proc(
listenAddrs: seq[MultiAddress]
- ): Future[seq[MultiAddress]] {.async.} =
+ ): Future[seq[MultiAddress]] {.gcsafe, async: (raises: [CancelledError]).} =
return node.announcedAddresses
node.switch.peerInfo.addressMappers.add(addressMapper)
diff --git a/waku/node/waku_switch.nim b/waku/node/waku_switch.nim
index 48d3612e3..cc99f46ae 100644
--- a/waku/node/waku_switch.nim
+++ b/waku/node/waku_switch.nim
@@ -20,7 +20,7 @@ const MaxConnectionsPerPeer* = 1
proc withWsTransport*(b: SwitchBuilder): SwitchBuilder =
b.withTransport(
- proc(upgr: Upgrade): Transport =
+ proc(upgr: Upgrade, privateKey: crypto.PrivateKey): Transport =
WsTransport.new(upgr)
)
@@ -48,7 +48,7 @@ proc withWssTransport*(
let key: TLSPrivateKey = getSecureKey(secureKeyPath)
let cert: TLSCertificate = getSecureCert(secureCertPath)
b.withTransport(
- proc(upgr: Upgrade): Transport =
+ proc(upgr: Upgrade, privateKey: crypto.PrivateKey): Transport =
WsTransport.new(
upgr,
tlsPrivateKey = key,
diff --git a/waku/waku_filter_v2/client.nim b/waku/waku_filter_v2/client.nim
index 2007371c7..2ad275a94 100644
--- a/waku/waku_filter_v2/client.nim
+++ b/waku/waku_filter_v2/client.nim
@@ -174,7 +174,7 @@ proc registerPushHandler*(wfc: WakuFilterClient, handler: FilterPushHandler) =
wfc.pushHandlers.add(handler)
proc initProtocolHandler(wfc: WakuFilterClient) =
- proc handler(conn: Connection, proto: string) {.async.} =
+ proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
## Notice that the client component is acting as a server of WakuFilterPushCodec messages
while not conn.atEof():
var buf: seq[byte]
diff --git a/waku/waku_filter_v2/protocol.nim b/waku/waku_filter_v2/protocol.nim
index c3a4683f7..80f60fdd3 100644
--- a/waku/waku_filter_v2/protocol.nim
+++ b/waku/waku_filter_v2/protocol.nim
@@ -287,14 +287,20 @@ proc handleMessage*(
waku_filter_handle_message_duration_seconds.observe(handleMessageDurationSec)
proc initProtocolHandler(wf: WakuFilter) =
- proc handler(conn: Connection, proto: string) {.async.} =
+ proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
debug "filter subscribe request handler triggered",
peerId = shortLog(conn.peerId), conn
var response: FilterSubscribeResponse
wf.peerRequestRateLimiter.checkUsageLimit(WakuFilterSubscribeCodec, conn):
- let buf = await conn.readLp(int(DefaultMaxSubscribeSize))
+ var buf: seq[byte]
+ try:
+ buf = await conn.readLp(int(DefaultMaxSubscribeSize))
+ except LPStreamError:
+ error "failed to read stream in readLp",
+ remote_peer_id = conn.peerId, error = getCurrentExceptionMsg()
+ return
waku_service_network_bytes.inc(
amount = buf.len().int64, labelValues = [WakuFilterSubscribeCodec, "in"]
@@ -302,14 +308,19 @@ proc initProtocolHandler(wf: WakuFilter) =
let decodeRes = FilterSubscribeRequest.decode(buf)
if decodeRes.isErr():
- error "Failed to decode filter subscribe request",
+ error "failed to decode filter subscribe request",
peer_id = conn.peerId, err = decodeRes.error
waku_filter_errors.inc(labelValues = [decodeRpcFailure])
return
let request = decodeRes.value #TODO: toAPI() split here
- response = await wf.handleSubscribeRequest(conn.peerId, request)
+ try:
+ response = await wf.handleSubscribeRequest(conn.peerId, request)
+ except CatchableError:
+ error "handleSubscribeRequest failed",
+ remote_peer_id = conn.peerId, err = getCurrentExceptionMsg()
+ return
debug "sending filter subscribe response",
peer_id = shortLog(conn.peerId), response = response
@@ -322,7 +333,11 @@ proc initProtocolHandler(wf: WakuFilter) =
statusDesc: some("filter request rejected due rate limit exceeded"),
)
- await conn.writeLp(response.encode().buffer) #TODO: toRPC() separation here
+ try:
+ await conn.writeLp(response.encode().buffer) #TODO: toRPC() separation here
+ except LPStreamError:
+ error "failed to write stream in writeLp",
+ remote_peer_id = conn.peerId, error = getCurrentExceptionMsg()
return
wf.handler = handler
@@ -355,8 +370,16 @@ proc new*(
peerRequestRateLimiter: PerPeerRateLimiter(setting: rateLimitSetting),
)
- proc peerEventHandler(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe.} =
- wf.onPeerEventHandler(peerId, event)
+ proc peerEventHandler(
+ peerId: PeerId, event: PeerEvent
+ ): Future[void] {.gcsafe, async: (raises: [CancelledError]).} =
+ try:
+ await wf.onPeerEventHandler(peerId, event)
+ except CatchableError:
+ error "onPeerEventHandler failed",
+ remote_peer_id = shortLog(peerId),
+ event = event,
+ error = getCurrentExceptionMsg()
peerManager.addExtPeerEventHandler(peerEventHandler, PeerEventKind.Left)
diff --git a/waku/waku_lightpush/protocol.nim b/waku/waku_lightpush/protocol.nim
index 57a95e107..1165cbb52 100644
--- a/waku/waku_lightpush/protocol.nim
+++ b/waku/waku_lightpush/protocol.nim
@@ -114,16 +114,24 @@ proc handleRequest*(
return pushResponse
proc initProtocolHandler(wl: WakuLightPush) =
- proc handle(conn: Connection, proto: string) {.async.} =
+ proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
var rpc: LightpushResponse
wl.requestRateLimiter.checkUsageLimit(WakuLightPushCodec, conn):
- let buffer = await conn.readLp(DefaultMaxRpcSize)
+ var buffer: seq[byte]
+ try:
+ buffer = await conn.readLp(DefaultMaxRpcSize)
+ except LPStreamError:
+ error "lightpush read stream failed", error = getCurrentExceptionMsg()
+ return
waku_service_network_bytes.inc(
amount = buffer.len().int64, labelValues = [WakuLightPushCodec, "in"]
)
- rpc = await handleRequest(wl, conn.peerId, buffer)
+ try:
+ rpc = await handleRequest(wl, conn.peerId, buffer)
+ except CatchableError:
+ error "lightpush failed handleRequest", error = getCurrentExceptionMsg()
do:
debug "lightpush request rejected due rate limit exceeded",
peerId = conn.peerId, limit = $wl.requestRateLimiter.setting
@@ -139,12 +147,15 @@ proc initProtocolHandler(wl: WakuLightPush) =
)
)
- await conn.writeLp(rpc.encode().buffer)
+ try:
+ await conn.writeLp(rpc.encode().buffer)
+ except LPStreamError:
+ error "lightpush write stream failed", error = getCurrentExceptionMsg()
## For lightpush might not worth to measure outgoing trafic as it is only
## small respones about success/failure
- wl.handler = handle
+ wl.handler = handler
wl.codec = WakuLightPushCodec
proc new*(
diff --git a/waku/waku_lightpush_legacy/protocol.nim b/waku/waku_lightpush_legacy/protocol.nim
index 5de25ead9..feb6e17dc 100644
--- a/waku/waku_lightpush_legacy/protocol.nim
+++ b/waku/waku_lightpush_legacy/protocol.nim
@@ -64,16 +64,24 @@ proc handleRequest*(
return rpc
proc initProtocolHandler(wl: WakuLegacyLightPush) =
- proc handle(conn: Connection, proto: string) {.async.} =
+ proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
var rpc: PushRPC
wl.requestRateLimiter.checkUsageLimit(WakuLegacyLightPushCodec, conn):
- let buffer = await conn.readLp(DefaultMaxRpcSize)
+ var buffer: seq[byte]
+ try:
+ buffer = await conn.readLp(DefaultMaxRpcSize)
+ except LPStreamError:
+ error "lightpush legacy read stream failed", error = getCurrentExceptionMsg()
+ return
waku_service_network_bytes.inc(
amount = buffer.len().int64, labelValues = [WakuLegacyLightPushCodec, "in"]
)
- rpc = await handleRequest(wl, conn.peerId, buffer)
+ try:
+ rpc = await handleRequest(wl, conn.peerId, buffer)
+ except CatchableError:
+ error "lightpush legacy handleRequest failed", error = getCurrentExceptionMsg()
do:
debug "lightpush request rejected due rate limit exceeded",
peerId = conn.peerId, limit = $wl.requestRateLimiter.setting
@@ -89,12 +97,15 @@ proc initProtocolHandler(wl: WakuLegacyLightPush) =
)
)
- await conn.writeLp(rpc.encode().buffer)
+ try:
+ await conn.writeLp(rpc.encode().buffer)
+ except LPStreamError:
+ error "lightpush legacy write stream failed", error = getCurrentExceptionMsg()
## For lightpush might not worth to measure outgoing trafic as it is only
## small respones about success/failure
- wl.handler = handle
+ wl.handler = handler
wl.codec = WakuLegacyLightPushCodec
proc new*(
diff --git a/waku/waku_metadata/protocol.nim b/waku/waku_metadata/protocol.nim
index 8e4640ce7..13a2916b3 100644
--- a/waku/waku_metadata/protocol.nim
+++ b/waku/waku_metadata/protocol.nim
@@ -70,7 +70,11 @@ proc request*(
return ok(response)
proc initProtocolHandler(m: WakuMetadata) =
- proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
+ proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
+ defer:
+ # close, no data is expected
+ await conn.closeWithEof()
+
let res = catch:
await conn.readLp(RpcResponseMaxBytes)
let buffer = res.valueOr:
@@ -88,12 +92,13 @@ proc initProtocolHandler(m: WakuMetadata) =
localShards = m.shards,
peer = conn.peerId
- discard await m.respond(conn)
+ try:
+ discard await m.respond(conn)
+ except CatchableError:
+ error "Failed to respond to WakuMetadata request",
+ error = getCurrentExceptionMsg()
- # close, no data is expected
- await conn.closeWithEof()
-
- m.handler = handle
+ m.handler = handler
m.codec = WakuMetadataCodec
proc new*(
diff --git a/waku/waku_peer_exchange/protocol.nim b/waku/waku_peer_exchange/protocol.nim
index 2732cb1c1..14de77c67 100644
--- a/waku/waku_peer_exchange/protocol.nim
+++ b/waku/waku_peer_exchange/protocol.nim
@@ -243,7 +243,7 @@ proc updatePxEnrCache(wpx: WakuPeerExchange) {.async.} =
wpx.populateEnrCache()
proc initProtocolHandler(wpx: WakuPeerExchange) =
- proc handler(conn: Connection, proto: string) {.async, gcsafe, closure.} =
+ proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
var buffer: seq[byte]
wpx.requestRateLimiter.checkUsageLimit(WakuPeerExchangeCodec, conn):
try:
@@ -253,9 +253,13 @@ proc initProtocolHandler(wpx: WakuPeerExchange) =
waku_px_errors.inc(labelValues = [exc.msg])
(
- await wpx.respondError(
- PeerExchangeResponseStatusCode.BAD_REQUEST, some(exc.msg), conn
- )
+ try:
+ await wpx.respondError(
+ PeerExchangeResponseStatusCode.BAD_REQUEST, some(exc.msg), conn
+ )
+ except CatchableError:
+ error "could not send error response", error = getCurrentExceptionMsg()
+ return
).isOkOr:
error "Failed to respond with BAD_REQUEST:", error = $error
return
@@ -266,26 +270,41 @@ proc initProtocolHandler(wpx: WakuPeerExchange) =
error "Failed to decode PeerExchange request", error = $decBuf.error
(
- await wpx.respondError(
- PeerExchangeResponseStatusCode.BAD_REQUEST, some($decBuf.error), conn
- )
+ try:
+ await wpx.respondError(
+ PeerExchangeResponseStatusCode.BAD_REQUEST, some($decBuf.error), conn
+ )
+ except CatchableError:
+ error "could not send error response decode",
+ error = getCurrentExceptionMsg()
+ return
).isOkOr:
error "Failed to respond with BAD_REQUEST:", error = $error
return
let enrs = wpx.getEnrsFromCache(decBuf.get().request.numPeers)
debug "peer exchange request received", enrs = $enrs
- (await wpx.respond(enrs, conn)).isErrOr:
- waku_px_peers_sent.inc(enrs.len().int64())
+
+ try:
+ (await wpx.respond(enrs, conn)).isErrOr:
+ waku_px_peers_sent.inc(enrs.len().int64())
+ except CatchableError:
+ error "could not send response", error = getCurrentExceptionMsg()
do:
- (
- await wpx.respondError(
- PeerExchangeResponseStatusCode.TOO_MANY_REQUESTS, none(string), conn
- )
- ).isOkOr:
- error "Failed to respond with TOO_MANY_REQUESTS:", error = $error
- # close, no data is expected
- await conn.closeWithEof()
+ defer:
+ # close, no data is expected
+ await conn.closeWithEof()
+
+ try:
+ (
+ await wpx.respondError(
+ PeerExchangeResponseStatusCode.TOO_MANY_REQUESTS, none(string), conn
+ )
+ ).isOkOr:
+ error "Failed to respond with TOO_MANY_REQUESTS:", error = $error
+ except CatchableError:
+ error "could not send error response", error = getCurrentExceptionMsg()
+ return
wpx.handler = handler
wpx.codec = WakuPeerExchangeCodec
diff --git a/waku/waku_relay/protocol.nim b/waku/waku_relay/protocol.nim
index daaf056b7..8da3f89b5 100644
--- a/waku/waku_relay/protocol.nim
+++ b/waku/waku_relay/protocol.nim
@@ -144,7 +144,7 @@ type PublishOutcome* {.pure.} = enum
CannotGenerateMessageId
proc initProtocolHandler(w: WakuRelay) =
- proc handler(conn: Connection, proto: string) {.async.} =
+ proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
## main protocol handler that gets triggered on every
## connection for a protocol string
## e.g. ``/wakusub/0.0.1``, etc...
diff --git a/waku/waku_rendezvous/protocol.nim b/waku/waku_rendezvous/protocol.nim
index 9f1aa69cb..a26eaca6f 100644
--- a/waku/waku_rendezvous/protocol.nim
+++ b/waku/waku_rendezvous/protocol.nim
@@ -123,7 +123,7 @@ proc batchRequest*(
conn
let reqCatch = catch:
- await self.rendezvous.request(namespace, count, peers)
+ await self.rendezvous.request(Opt.some(namespace), count, peers)
for conn in conns:
await conn.close()
diff --git a/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim b/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim
index 600291ecf..54290a77a 100644
--- a/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim
+++ b/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim
@@ -57,7 +57,7 @@ type
ethRpc*: Option[Web3]
wakuRlnContract*: Option[WakuRlnContractWithSender]
registrationTxHash*: Option[TxHash]
- chainId*: uint
+ chainId*: UInt256
keystorePath*: Option[string]
keystorePassword*: Option[string]
registrationHandler*: Option[RegistrationHandler]
@@ -239,8 +239,12 @@ method register*(
# TODO: make this robust. search within the event list for the event
debug "ts receipt", receipt = tsReceipt[]
- if tsReceipt.status.isNone() or tsReceipt.status.get() != 1.Quantity:
- raise newException(ValueError, "register: transaction failed")
+ if tsReceipt.status.isNone():
+ raise newException(ValueError, "register: transaction failed status is None")
+ if tsReceipt.status.get() != 1.Quantity:
+ raise newException(
+ ValueError, "register: transaction failed status is: " & $tsReceipt.status.get()
+ )
let firstTopic = tsReceipt.logs[0].topics[0]
# the hash of the signature of MemberRegistered(uint256,uint32) event is equal to the following hex value
@@ -485,9 +489,9 @@ method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.}
let ethRpc: Web3 = (await establishConnection(g)).valueOr:
return err("failed to connect to Ethereum clients: " & $error)
- var fetchedChainId: uint
+ var fetchedChainId: UInt256
g.retryWrapper(fetchedChainId, "Failed to get the chain id"):
- uint(await ethRpc.provider.eth_chainId())
+ await ethRpc.provider.eth_chainId()
# Set the chain id
if g.chainId == 0:
@@ -555,7 +559,7 @@ method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.}
warn "could not initialize with persisted rln metadata"
elif metadataGetOptRes.get().isSome():
let metadata = metadataGetOptRes.get().get()
- if metadata.chainId != uint(g.chainId):
+ if metadata.chainId != g.chainId:
return err("persisted data: chain id mismatch")
if metadata.contractAddress != g.ethContractAddress.toLower():
return err("persisted data: contract address mismatch")
diff --git a/waku/waku_rln_relay/protocol_types.nim b/waku/waku_rln_relay/protocol_types.nim
index c6f52e00b..867878886 100644
--- a/waku/waku_rln_relay/protocol_types.nim
+++ b/waku/waku_rln_relay/protocol_types.nim
@@ -131,6 +131,13 @@ proc encode*(nsp: RateLimitProof): ProtoBuffer =
output.finish3()
return output
+func encode*(x: UInt32): seq[byte] =
+ ## the Ethereum ABI imposes a 32 byte width for every type
+ let numTargetBytes = 32 div 8
+ let paddingBytes = 32 - numTargetBytes
+ let paddingZeros = newSeq[byte](paddingBytes)
+ paddingZeros & @(stint.toBytesBE(x))
+
type
SpamHandler* =
proc(wakuMessage: WakuMessage): void {.gcsafe, closure, raises: [Defect].}
diff --git a/waku/waku_rln_relay/rln/wrappers.nim b/waku/waku_rln_relay/rln/wrappers.nim
index 24682dda6..2e10c7e37 100644
--- a/waku/waku_rln_relay/rln/wrappers.nim
+++ b/waku/waku_rln_relay/rln/wrappers.nim
@@ -4,6 +4,7 @@ import
options,
eth/keys,
stew/[arrayops, byteutils, endians2],
+ stint,
results,
std/[sequtils, strutils, tables]
@@ -410,7 +411,7 @@ proc getMerkleRoot*(rlnInstance: ptr RLN): MerkleNodeResult =
type RlnMetadata* = object
lastProcessedBlock*: uint64
- chainId*: uint64
+ chainId*: UInt256
contractAddress*: string
validRoots*: seq[MerkleNode]
@@ -419,7 +420,7 @@ proc serialize(metadata: RlnMetadata): seq[byte] =
## returns the serialized metadata
return concat(
@(metadata.lastProcessedBlock.toBytes()),
- @(metadata.chainId.toBytes()),
+ @(metadata.chainId.toBytes(Endianness.littleEndian)[0 .. 7]),
@(hexToSeqByte(toLower(metadata.contractAddress))),
@(uint64(metadata.validRoots.len()).toBytes()),
@(serialize(metadata.validRoots)),
@@ -427,7 +428,7 @@ proc serialize(metadata: RlnMetadata): seq[byte] =
type MerkleNodeSeq = seq[MerkleNode]
-proc deserialize*(T: type MerkleNodeSeq, merkleNodeByteSeq: seq[byte]): T =
+proc deserialize(T: type MerkleNodeSeq, merkleNodeByteSeq: seq[byte]): T =
## deserializes a byte seq to a seq of MerkleNodes
## the order of serialization is |merkle_node_len<8>|merkle_node[len]|
@@ -489,7 +490,7 @@ proc getMetadata*(rlnInstance: ptr RLN): RlnRelayResult[Option[RlnMetadata]] =
var
lastProcessedBlock: uint64
- chainId: uint64
+ chainId: UInt256
contractAddress: string
validRoots: MerkleNodeSeq
@@ -500,7 +501,9 @@ proc getMetadata*(rlnInstance: ptr RLN): RlnRelayResult[Option[RlnMetadata]] =
lastProcessedBlock =
uint64.fromBytes(metadataBytes[lastProcessedBlockOffset .. chainIdOffset - 1])
- chainId = uint64.fromBytes(metadataBytes[chainIdOffset .. contractAddressOffset - 1])
+ chainId = UInt256.fromBytes(
+ metadataBytes[chainIdOffset .. contractAddressOffset - 1], Endianness.littleEndian
+ )
contractAddress =
byteutils.toHex(metadataBytes[contractAddressOffset .. validRootsOffset - 1])
let validRootsBytes = metadataBytes[validRootsOffset .. metadataBytes.high]
diff --git a/waku/waku_rln_relay/rln_relay.nim b/waku/waku_rln_relay/rln_relay.nim
index 48b3e8d79..965c8c021 100644
--- a/waku/waku_rln_relay/rln_relay.nim
+++ b/waku/waku_rln_relay/rln_relay.nim
@@ -44,7 +44,7 @@ type RlnRelayConf* = object of RootObj
credIndex*: Option[uint]
ethContractAddress*: string
ethClientUrls*: seq[string]
- chainId*: uint
+ chainId*: UInt256
creds*: Option[RlnRelayCreds]
treePath*: string
epochSizeSec*: uint64
diff --git a/waku/waku_store/protocol.nim b/waku/waku_store/protocol.nim
index aa22fe5cd..5e13c9a77 100644
--- a/waku/waku_store/protocol.nim
+++ b/waku/waku_store/protocol.nim
@@ -88,7 +88,7 @@ proc initProtocolHandler(self: WakuStore) =
statusDesc: $ErrorCode.TOO_MANY_REQUESTS,
).encode().buffer
- proc handler(conn: Connection, proto: string) {.async, gcsafe, closure.} =
+ proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
var successfulQuery = false ## only consider the correct queries in metrics
var resBuf: StoreResp
var queryDuration: float
@@ -106,7 +106,14 @@ proc initProtocolHandler(self: WakuStore) =
let queryStartTime = getTime().toUnixFloat()
- resBuf = await self.handleQueryRequest(conn.peerId, reqBuf)
+ try:
+ resBuf = await self.handleQueryRequest(conn.peerId, reqBuf)
+ except CatchableError:
+ error "store query failed in handler",
+ remote_peer_id = conn.peerId,
+ requestId = resBuf.requestId,
+ error = getCurrentExceptionMsg()
+ return
queryDuration = getTime().toUnixFloat() - queryStartTime
waku_store_time_seconds.set(queryDuration, ["query-db-time"])
diff --git a/waku/waku_store_legacy/protocol.nim b/waku/waku_store_legacy/protocol.nim
index d72308e63..79d0f03a1 100644
--- a/waku/waku_store_legacy/protocol.nim
+++ b/waku/waku_store_legacy/protocol.nim
@@ -110,7 +110,7 @@ proc initProtocolHandler(ws: WakuStore) =
),
).encode().buffer
- proc handler(conn: Connection, proto: string) {.async, closure.} =
+ proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
var successfulQuery = false ## only consider the correct queries in metrics
var resBuf: StoreResp
var queryDuration: float
@@ -127,7 +127,13 @@ proc initProtocolHandler(ws: WakuStore) =
)
let queryStartTime = getTime().toUnixFloat()
- resBuf = await ws.handleLegacyQueryRequest(conn.peerId, reqBuf)
+ try:
+ resBuf = await ws.handleLegacyQueryRequest(conn.peerId, reqBuf)
+ except CatchableError:
+ error "legacy store query handler failed",
+ remote_peer_id = conn.peerId, error = getCurrentExceptionMsg()
+ return
+
queryDuration = getTime().toUnixFloat() - queryStartTime
waku_legacy_store_time_seconds.set(queryDuration, ["query-db-time"])
successfulQuery = true
diff --git a/waku/waku_store_sync/reconciliation.nim b/waku/waku_store_sync/reconciliation.nim
index d9912a3df..cb5c1bc3d 100644
--- a/waku/waku_store_sync/reconciliation.nim
+++ b/waku/waku_store_sync/reconciliation.nim
@@ -322,11 +322,12 @@ proc new*(
remoteNeedsTx: remoteNeedsTx,
)
- let handler = proc(conn: Connection, proto: string) {.async, closure.} =
- (await sync.processRequest(conn)).isOkOr:
- error "request processing error", error = error
-
- return
+ proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
+ try:
+ (await sync.processRequest(conn)).isOkOr:
+ error "request processing error", error = error
+ except CatchableError:
+ error "exception in reconciliation handler", error = getCurrentExceptionMsg()
sync.handler = handler
sync.codec = WakuReconciliationCodec
diff --git a/waku/waku_store_sync/transfer.nim b/waku/waku_store_sync/transfer.nim
index c1e5d3e37..78b83c601 100644
--- a/waku/waku_store_sync/transfer.nim
+++ b/waku/waku_store_sync/transfer.nim
@@ -130,7 +130,7 @@ proc needsReceiverLoop(self: SyncTransfer) {.async.} =
return
proc initProtocolHandler(self: SyncTransfer) =
- let handler = proc(conn: Connection, proto: string) {.async, closure.} =
+ proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
while true:
if not self.inSessions.contains(conn.peerId):
error "unwanted peer, disconnecting", remote = conn.peerId
@@ -156,10 +156,14 @@ proc initProtocolHandler(self: SyncTransfer) =
let hash = computeMessageHash(pubsub, msg)
- #TODO verify msg RLN proof...
-
- (await self.wakuArchive.syncMessageIngress(hash, pubsub, msg)).isOkOr:
- error "failed to archive message", error = $error
+ try:
+ #TODO verify msg RLN proof...
+ (await self.wakuArchive.syncMessageIngress(hash, pubsub, msg)).isOkOr:
+ error "failed to archive message", error = $error
+ continue
+ except CatchableError:
+ error "syncMessageIngress failed",
+ remote_peer_id = conn.peerId, error = getCurrentExceptionMsg()
continue
let id = SyncID(time: msg.timestamp, hash: hash)
From f47af16ffb43c0dd010d7ae10f1865582ea52cb5 Mon Sep 17 00:00:00 2001
From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com>
Date: Tue, 27 May 2025 16:29:04 +0200
Subject: [PATCH 03/47] fix: build_rln.sh update version to dowload to v0.7.0
(#3425)
---
scripts/build_rln.sh | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/scripts/build_rln.sh b/scripts/build_rln.sh
index 1cf9b9879..cd2fa3827 100755
--- a/scripts/build_rln.sh
+++ b/scripts/build_rln.sh
@@ -19,15 +19,14 @@ host_triplet=$(rustc --version --verbose | awk '/host:/{print $2}')
tarball="${host_triplet}"
-# use arkzkey feature for v0.5.1
+# use arkzkey feature for v0.7.0
# TODO: update this script in the future when arkzkey is default
-if [[ "${rln_version}" == "v0.5.1" ]]; then
+if [[ "${rln_version}" == "v0.7.0" ]]; then
tarball+="-arkzkey-rln.tar.gz"
else
tarball+="-rln.tar.gz"
fi
-
# Download the prebuilt rln library if it is available
if curl --silent --fail-with-body -L \
"https://github.com/vacp2p/zerokit/releases/download/$rln_version/$tarball" \
From 8812d66eb51f7e1f4635f2b398d5fedbd5a45f0b Mon Sep 17 00:00:00 2001
From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com>
Date: Wed, 28 May 2025 13:00:25 +0200
Subject: [PATCH 04/47] chore: CHANGELOG add lightpush v3 in v0.35.1 (#3427)
Co-authored-by: NagyZoltanPeter <113987313+NagyZoltanPeter@users.noreply.github.com>
---
CHANGELOG.md | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 151392f1b..c7b0369b1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,10 @@
**Info:** before upgrading to this version, make sure you delete the previous rln_tree folder, i.e.,
the one that is passed through this CLI: `--rln-relay-tree-path`.
+### Features
+* lightpush v3 ([#3279](https://github.com/waku-org/nwaku/pull/3279)) ([e0b563ff](https://github.com/waku-org/nwaku/commit/e0b563ffe5af20bd26d37cd9b4eb9ed9eb82ff80))
+ Upgrade for Waku Llightpush protocol with enhanced error handling. Read specification [here](https://github.com/waku-org/specs/blob/master/standards/core/lightpush.md)
+
This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/):
| Protocol | Spec status | Protocol id |
| ---: | :---: | :--- |
@@ -14,6 +18,7 @@ This release supports the following [libp2p protocols](https://docs.libp2p.io/co
| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` |
| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` |
| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` |
+| [`WAKU2-LIGHTPUSH v3`](https://github.com/waku-org/specs/blob/master/standards/core/lightpush.md) | `draft` | `/vac/waku/lightpush/3.0.0` |
| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` |
| [`WAKU-SYNC`](https://github.com/waku-org/specs/blob/feat--waku-sync/standards/core/sync.md) | `draft` | `/vac/waku/sync/1.0.0` |
From f0d668966dac38a15733be39bb83458992494387 Mon Sep 17 00:00:00 2001
From: Sasha <118575614+weboko@users.noreply.github.com>
Date: Wed, 28 May 2025 13:10:47 +0200
Subject: [PATCH 05/47] chore: supress debug for js-waku (#3423)
---
.github/workflows/ci.yml | 2 --
1 file changed, 2 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 9d6a313e3..0ede43361 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -147,7 +147,6 @@ jobs:
with:
nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }}
test_type: node
- debug: waku*
js-waku-node-optional:
needs: build-docker-image
@@ -155,7 +154,6 @@ jobs:
with:
nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }}
test_type: node-optional
- debug: waku*
lint:
name: "Lint"
From 768b2785e1c4cdae64710b3e2288a80827f7c2af Mon Sep 17 00:00:00 2001
From: NagyZoltanPeter <113987313+NagyZoltanPeter@users.noreply.github.com>
Date: Wed, 28 May 2025 19:07:07 +0200
Subject: [PATCH 06/47] chore: heaptrack support build for Nim v2.0.12 builds
(#3424)
* fix heaptrack build for Nim v2.0.12 builds, fixed docker image creation and local image with copying
* fix Dockerfile.bn.amd64 to support nwaku-compose
* Fix heaptrack image build with jenkins.release
* Fix NIM_COMMIT for heaptrack support in jenkins.release
* Remove leftover echo
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
* Fix dockerfile naming
* Fix assignment of NIM_COMMIT in Makefile
---------
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
---
Dockerfile | 2 +-
Makefile | 12 ++++--
ci/Jenkinsfile.release | 38 ++++++++++++-----
docker/binaries/Dockerfile.bn.amd64 | 2 +-
docker/binaries/Dockerfile.bn.local | 63 +++++++++++++++++++++++++++++
5 files changed, 100 insertions(+), 17 deletions(-)
create mode 100644 docker/binaries/Dockerfile.bn.local
diff --git a/Dockerfile b/Dockerfile
index 8a1a743c9..b1f6b3c6a 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -78,7 +78,7 @@ RUN make -j$(nproc)
# Debug image
-FROM prod AS debug
+FROM prod AS debug-with-heaptrack
RUN apk add --no-cache gdb libunwind
diff --git a/Makefile b/Makefile
index d15668673..ae57852a5 100644
--- a/Makefile
+++ b/Makefile
@@ -82,15 +82,18 @@ HEAPTRACKER ?= 0
HEAPTRACKER_INJECT ?= 0
ifeq ($(HEAPTRACKER), 1)
# Needed to make nimbus-build-system use the Nim's 'heaptrack_support' branch
-DOCKER_NIM_COMMIT := NIM_COMMIT=heaptrack_support
-TARGET := heaptrack-build
+DOCKER_NIM_COMMIT := NIM_COMMIT=heaptrack_support_v2.0.12
+TARGET := debug-with-heaptrack
+NIM_COMMIT := heaptrack_support_v2.0.12
ifeq ($(HEAPTRACKER_INJECT), 1)
# the Nim compiler will load 'libheaptrack_inject.so'
HEAPTRACK_PARAMS := -d:heaptracker -d:heaptracker_inject
+NIM_PARAMS := $(NIM_PARAMS) -d:heaptracker -d:heaptracker_inject
else
# the Nim compiler will load 'libheaptrack_preload.so'
HEAPTRACK_PARAMS := -d:heaptracker
+NIM_PARAMS := $(NIM_PARAMS) -d:heaptracker
endif
endif
@@ -209,6 +212,7 @@ testwaku: | build deps anvil librln
wakunode2: | build deps librln
echo -e $(BUILD_MSG) "build/$@" && \
+ \
$(ENV_SCRIPT) nim wakunode2 $(NIM_PARAMS) waku.nims
benchmarks: | build deps librln
@@ -343,12 +347,12 @@ docker-image:
docker-quick-image: MAKE_TARGET ?= wakunode2
docker-quick-image: DOCKER_IMAGE_TAG ?= $(MAKE_TARGET)-$(GIT_VERSION)
docker-quick-image: DOCKER_IMAGE_NAME ?= wakuorg/nwaku:$(DOCKER_IMAGE_TAG)
-docker-quick-image: NIM_PARAMS := $(NIM_PARAMS) -d:chronicles_colors:none -d:insecure -d:postgres --passL:$(LIBRLN_FILE) --passL:-lm
docker-quick-image: | build deps librln wakunode2
docker build \
--build-arg="MAKE_TARGET=$(MAKE_TARGET)" \
--tag $(DOCKER_IMAGE_NAME) \
- --file docker/binaries/Dockerfile.bn.amd64 \
+ --target $(TARGET) \
+ --file docker/binaries/Dockerfile.bn.local \
.
docker-push:
diff --git a/ci/Jenkinsfile.release b/ci/Jenkinsfile.release
index 1a2125402..5d18d32aa 100644
--- a/ci/Jenkinsfile.release
+++ b/ci/Jenkinsfile.release
@@ -69,17 +69,33 @@ pipeline {
stages {
stage('Build') {
steps { script {
- image = docker.build(
- "${params.IMAGE_NAME}:${params.IMAGE_TAG ?: env.GIT_COMMIT.take(8)}",
- "--label=build='${env.BUILD_URL}' " +
- "--label=commit='${git.commit()}' " +
- "--label=version='${git.describe('--tags')}' " +
- "--build-arg=MAKE_TARGET='${params.MAKE_TARGET}' " +
- "--build-arg=NIMFLAGS='${params.NIMFLAGS} -d:postgres ' " +
- "--build-arg=LOG_LEVEL='${params.LOWEST_LOG_LEVEL_ALLOWED}' " +
- "--build-arg=DEBUG='${params.DEBUG ? "1" : "0"} ' " +
- "--target=${params.HEAPTRACK ? "heaptrack-build" : "prod"} ."
- )
+ if (params.HEAPTRACK) {
+ echo 'Building with heaptrack support'
+ image = docker.build(
+ "${params.IMAGE_NAME}:${params.IMAGE_TAG ?: env.GIT_COMMIT.take(8)}",
+ "--label=build='${env.BUILD_URL}' " +
+ "--label=commit='${git.commit()}' " +
+ "--label=version='${git.describe('--tags')}' " +
+ "--build-arg=MAKE_TARGET='${params.MAKE_TARGET}' " +
+ "--build-arg=NIMFLAGS='${params.NIMFLAGS} -d:postgres -d:heaptracker ' " +
+ "--build-arg=LOG_LEVEL='${params.LOWEST_LOG_LEVEL_ALLOWED}' " +
+ "--build-arg=DEBUG='${params.DEBUG ? "1" : "0"} ' " +
+ "--build-arg=NIM_COMMIT='NIM_COMMIT=heaptrack_support_v2.0.12' " +
+ "--target='debug-with-heaptrack' ."
+ )
+ } else {
+ image = docker.build(
+ "${params.IMAGE_NAME}:${params.IMAGE_TAG ?: env.GIT_COMMIT.take(8)}",
+ "--label=build='${env.BUILD_URL}' " +
+ "--label=commit='${git.commit()}' " +
+ "--label=version='${git.describe('--tags')}' " +
+ "--build-arg=MAKE_TARGET='${params.MAKE_TARGET}' " +
+ "--build-arg=NIMFLAGS='${params.NIMFLAGS} -d:postgres ' " +
+ "--build-arg=LOG_LEVEL='${params.LOWEST_LOG_LEVEL_ALLOWED}' " +
+ "--build-arg=DEBUG='${params.DEBUG ? "1" : "0"} ' " +
+ "--target='prod' ."
+ )
+ }
} }
}
diff --git a/docker/binaries/Dockerfile.bn.amd64 b/docker/binaries/Dockerfile.bn.amd64
index d32cf9342..c8dc0ffeb 100644
--- a/docker/binaries/Dockerfile.bn.amd64
+++ b/docker/binaries/Dockerfile.bn.amd64
@@ -13,7 +13,7 @@ EXPOSE 30303 60000 8545
# Referenced in the binary
RUN apt-get update &&\
- apt-get install -y libpcre3 libpq-dev curl iproute2 wget &&\
+ apt-get install -y libpcre3 libpq-dev curl iproute2 wget dnsutils &&\
apt-get clean && rm -rf /var/lib/apt/lists/*
# Fix for 'Error loading shared library libpcre.so.3: No such file or directory'
diff --git a/docker/binaries/Dockerfile.bn.local b/docker/binaries/Dockerfile.bn.local
new file mode 100644
index 000000000..79445d14f
--- /dev/null
+++ b/docker/binaries/Dockerfile.bn.local
@@ -0,0 +1,63 @@
+# Dockerfile to build a distributable container image from pre-existing binaries
+# FROM debian:stable-slim AS prod
+FROM ubuntu:24.04 AS prod
+
+ARG MAKE_TARGET=wakunode2
+
+LABEL maintainer="vaclav@status.im"
+LABEL source="https://github.com/waku-org/nwaku"
+LABEL description="Wakunode: Waku client"
+LABEL commit="unknown"
+
+# DevP2P, LibP2P, and JSON RPC ports
+EXPOSE 30303 60000 8545
+
+# Referenced in the binary
+RUN apt-get update &&\
+ apt-get install -y libpcre3 libpq-dev curl iproute2 wget jq dnsutils &&\
+ apt-get clean && rm -rf /var/lib/apt/lists/*
+
+# Fix for 'Error loading shared library libpcre.so.3: No such file or directory'
+RUN ln -s /usr/lib/libpcre.so /usr/lib/libpcre.so.3
+
+# Copy to separate location to accomodate different MAKE_TARGET values
+ADD ./build/$MAKE_TARGET /usr/local/bin/
+
+# Copy migration scripts for DB upgrades
+ADD ./migrations/ /app/migrations/
+
+# Symlink the correct wakunode binary
+RUN ln -sv /usr/local/bin/$MAKE_TARGET /usr/bin/wakunode
+
+ENTRYPOINT ["/usr/bin/wakunode"]
+
+# By default just show help if called without arguments
+CMD ["--help"]
+
+# Build debug tools: heaptrack
+FROM ubuntu:24.04 AS heaptrack-build
+
+RUN apt update
+RUN apt install -y gdb git g++ make cmake zlib1g-dev libboost-all-dev libunwind-dev
+RUN git clone https://github.com/KDE/heaptrack.git /heaptrack
+
+WORKDIR /heaptrack/build
+# going to a commit that builds properly. We will revisit this for new releases
+RUN git reset --hard f9cc35ebbdde92a292fe3870fe011ad2874da0ca
+RUN cmake -DCMAKE_BUILD_TYPE=Release ..
+RUN make -j$(nproc)
+
+
+# Debug image
+FROM prod AS debug-with-heaptrack
+
+RUN apt update
+RUN apt install -y gdb libunwind8
+
+# Add heaptrack
+COPY --from=heaptrack-build /heaptrack/build/ /heaptrack/build/
+
+ENV LD_LIBRARY_PATH=/heaptrack/build/lib/heaptrack/
+RUN ln -s /heaptrack/build/bin/heaptrack /usr/local/bin/heaptrack
+
+ENTRYPOINT ["/heaptrack/build/bin/heaptrack", "/usr/bin/wakunode"]
From 5e22ea18b651e5d603fde4953a8d4f702c4e69c2 Mon Sep 17 00:00:00 2001
From: gabrielmer <101006718+gabrielmer@users.noreply.github.com>
Date: Thu, 29 May 2025 12:05:48 +0200
Subject: [PATCH 07/47] chore: don't return error on double relay
subscription/unsubscription (#3429)
---
tests/waku_relay/test_wakunode_relay.nim | 10 +++++-----
waku/node/waku_node.nim | 13 ++++++-------
2 files changed, 11 insertions(+), 12 deletions(-)
diff --git a/tests/waku_relay/test_wakunode_relay.nim b/tests/waku_relay/test_wakunode_relay.nim
index c9c8d82ef..3f6bfd3e7 100644
--- a/tests/waku_relay/test_wakunode_relay.nim
+++ b/tests/waku_relay/test_wakunode_relay.nim
@@ -631,7 +631,7 @@ suite "WakuNode - Relay":
# Stop all nodes
await allFutures(nodes.mapIt(it.stop()))
- asyncTest "Only one subscription is allowed for contenttopics that generate the same shard":
+ asyncTest "Multiple subscription calls are allowed for contenttopics that generate the same shard":
## Setup
let
nodeKey = generateSecp256k1Key()
@@ -663,12 +663,12 @@ suite "WakuNode - Relay":
## When
node.subscribe((kind: ContentSub, topic: contentTopicA), some(handler)).isOkOr:
assert false, "Failed to subscribe to topic: " & $error
- node.subscribe((kind: ContentSub, topic: contentTopicB), some(handler)).isErrOr:
+ node.subscribe((kind: ContentSub, topic: contentTopicB), some(handler)).isOkOr:
assert false,
- "The subscription should fail because is already subscribe to that shard"
- node.subscribe((kind: ContentSub, topic: contentTopicC), some(handler)).isErrOr:
+ "The subscription call shouldn't error even though it's already subscribed to that shard"
+ node.subscribe((kind: ContentSub, topic: contentTopicC), some(handler)).isOkOr:
assert false,
- "The subscription should fail because is already subscribe to that shard"
+ "The subscription call shouldn't error even though it's already subscribed to that shard"
## Then
node.unsubscribe((kind: ContentUnsub, topic: contentTopicB)).isOkOr:
diff --git a/waku/node/waku_node.nim b/waku/node/waku_node.nim
index 152c7125d..1538d9096 100644
--- a/waku/node/waku_node.nim
+++ b/waku/node/waku_node.nim
@@ -323,12 +323,12 @@ proc subscribe*(
return err("Unsupported subscription type in relay subscribe")
if node.wakuRelay.isSubscribed(pubsubTopic):
- debug "already subscribed to topic", pubsubTopic
- return err("Already subscribed to topic: " & $pubsubTopic)
+ warn "No-effect API call to subscribe. Already subscribed to topic", pubsubTopic
+ return ok()
if contentTopicOp.isSome() and node.contentTopicHandlers.hasKey(contentTopicOp.get()):
- error "Invalid API call to `subscribe`. Was already subscribed"
- return err("Invalid API call to `subscribe`. Was already subscribed")
+ warn "No-effect API call to `subscribe`. Was already subscribed"
+ return ok()
node.topicSubscriptionQueue.emit((kind: PubsubSub, topic: pubsubTopic))
node.registerRelayDefaultHandler(pubsubTopic)
@@ -364,9 +364,8 @@ proc unsubscribe*(
return err("Unsupported subscription type in relay unsubscribe")
if not node.wakuRelay.isSubscribed(pubsubTopic):
- error "Invalid API call to `unsubscribe`. Was not subscribed", pubsubTopic
- return
- err("Invalid API call to `unsubscribe`. Was not subscribed to: " & $pubsubTopic)
+ warn "No-effect API call to `unsubscribe`. Was not subscribed", pubsubTopic
+ return ok()
if contentTopicOp.isSome():
# Remove this handler only
From 94cd2f88b4425c6fd37c6079a537c3a19c100f05 Mon Sep 17 00:00:00 2001
From: gabrielmer <101006718+gabrielmer@users.noreply.github.com>
Date: Fri, 30 May 2025 17:47:06 +0200
Subject: [PATCH 08/47] chore: exposing online state in libwaku (#3433)
---
library/libwaku.h | 4 ++++
library/libwaku.nim | 14 ++++++++++++++
.../requests/peer_manager_request.nim | 3 +++
3 files changed, 21 insertions(+)
diff --git a/library/libwaku.h b/library/libwaku.h
index 3c15b36f9..7a16c9d65 100644
--- a/library/libwaku.h
+++ b/library/libwaku.h
@@ -232,6 +232,10 @@ int waku_ping_peer(void* ctx,
WakuCallBack callback,
void* userData);
+int waku_is_online(void* ctx,
+ WakuCallBack callback,
+ void* userData);
+
#ifdef __cplusplus
}
#endif
diff --git a/library/libwaku.nim b/library/libwaku.nim
index 48df3e2c6..5d8252225 100644
--- a/library/libwaku.nim
+++ b/library/libwaku.nim
@@ -842,5 +842,19 @@ proc waku_ping_peer(
userData,
)
+proc waku_is_online(
+ ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
+): cint {.dynlib, exportc.} =
+ initializeLibrary()
+ checkLibwakuParams(ctx, callback, userData)
+
+ handleRequest(
+ ctx,
+ RequestType.PEER_MANAGER,
+ PeerManagementRequest.createShared(PeerManagementMsgType.IS_ONLINE),
+ callback,
+ userData,
+ )
+
### End of exported procs
################################################################################
diff --git a/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim b/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim
index 1e5202891..e68e66afe 100644
--- a/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim
+++ b/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim
@@ -15,6 +15,7 @@ type PeerManagementMsgType* {.pure.} = enum
DIAL_PEER
DIAL_PEER_BY_ID
GET_CONNECTED_PEERS
+ IS_ONLINE
type PeerManagementRequest* = object
operation: PeerManagementMsgType
@@ -144,5 +145,7 @@ proc process*(
(inPeerIds, outPeerIds) = waku.node.peerManager.connectedPeers()
connectedPeerids = concat(inPeerIds, outPeerIds)
return ok(connectedPeerids.mapIt($it).join(","))
+ of IS_ONLINE:
+ return ok($waku.node.peerManager.isOnline())
return ok("")
From a39bcff6dc38812df72ee4b3a2f34f7091be0e53 Mon Sep 17 00:00:00 2001
From: NagyZoltanPeter <113987313+NagyZoltanPeter@users.noreply.github.com>
Date: Mon, 2 Jun 2025 17:21:09 +0200
Subject: [PATCH 09/47] feat: Extend node /health REST endpoint with all
protocol's state (#3419)
* Extend ndoe /health REST endpoint with all protocol's state
* Added check for Rendezvous peers availability
* Fine tune filter, added client protocols to health report
* Fix /health endpoint test
* Add explanatory description for state NOT_READY
* Fix formattings
* Apply suggestions from code review
Co-authored-by: Ivan FB <128452529+Ivansete-status@users.noreply.github.com>
* Apply code style changes and extended test
* Fix formatting
---------
Co-authored-by: Ivan FB <128452529+Ivansete-status@users.noreply.github.com>
---
tests/wakunode_rest/test_rest_health.nim | 41 +++-
waku/node/health_monitor.nim | 250 +++++++++++++++++++++--
waku/waku_api/rest/health/types.nim | 34 +--
3 files changed, 288 insertions(+), 37 deletions(-)
diff --git a/tests/wakunode_rest/test_rest_health.nim b/tests/wakunode_rest/test_rest_health.nim
index 1f6c6314f..93838b4fe 100644
--- a/tests/wakunode_rest/test_rest_health.nim
+++ b/tests/wakunode_rest/test_rest_health.nim
@@ -74,6 +74,10 @@ suite "Waku v2 REST API - health":
treePath: genTempPath("rln_tree", "wakunode"),
)
)
+
+ node.mountLightPushClient()
+ await node.mountFilterClient()
+
healthMonitor.setNode(node)
healthMonitor.setOverallHealth(HealthStatus.READY)
# When
@@ -84,9 +88,40 @@ suite "Waku v2 REST API - health":
response.status == 200
$response.contentType == $MIMETYPE_JSON
response.data.nodeHealth == HealthStatus.READY
- response.data.protocolsHealth.len() == 1
- response.data.protocolsHealth[0].protocol == "Rln Relay"
- response.data.protocolsHealth[0].health == HealthStatus.READY
+ response.data.protocolsHealth.len() == 14
+ response.data.protocolsHealth[0].protocol == "Relay"
+ response.data.protocolsHealth[0].health == HealthStatus.NOT_READY
+ response.data.protocolsHealth[0].desc == some("No connected peers")
+ response.data.protocolsHealth[1].protocol == "Rln Relay"
+ response.data.protocolsHealth[1].health == HealthStatus.READY
+ response.data.protocolsHealth[2].protocol == "Lightpush"
+ response.data.protocolsHealth[2].health == HealthStatus.NOT_MOUNTED
+ response.data.protocolsHealth[3].protocol == "Legacy Lightpush"
+ response.data.protocolsHealth[3].health == HealthStatus.NOT_MOUNTED
+ response.data.protocolsHealth[4].protocol == "Filter"
+ response.data.protocolsHealth[4].health == HealthStatus.NOT_MOUNTED
+ response.data.protocolsHealth[5].protocol == "Store"
+ response.data.protocolsHealth[5].health == HealthStatus.NOT_MOUNTED
+ response.data.protocolsHealth[6].protocol == "Legacy Store"
+ response.data.protocolsHealth[6].health == HealthStatus.NOT_MOUNTED
+ response.data.protocolsHealth[7].protocol == "Peer Exchange"
+ response.data.protocolsHealth[7].health == HealthStatus.NOT_MOUNTED
+ response.data.protocolsHealth[8].protocol == "Rendezvous"
+ response.data.protocolsHealth[8].health == HealthStatus.NOT_MOUNTED
+ response.data.protocolsHealth[9].protocol == "Lightpush Client"
+ response.data.protocolsHealth[9].health == HealthStatus.NOT_READY
+ response.data.protocolsHealth[9].desc ==
+ some("No Lightpush service peer available yet")
+ response.data.protocolsHealth[10].protocol == "Legacy Lightpush Client"
+ response.data.protocolsHealth[10].health == HealthStatus.NOT_MOUNTED
+ response.data.protocolsHealth[11].protocol == "Store Client"
+ response.data.protocolsHealth[11].health == HealthStatus.NOT_MOUNTED
+ response.data.protocolsHealth[12].protocol == "Legacy Store Client"
+ response.data.protocolsHealth[12].health == HealthStatus.NOT_MOUNTED
+ response.data.protocolsHealth[13].protocol == "Filter Client"
+ response.data.protocolsHealth[13].health == HealthStatus.NOT_READY
+ response.data.protocolsHealth[13].desc ==
+ some("No Filter service peer available yet")
await restServer.stop()
await restServer.closeWait()
diff --git a/waku/node/health_monitor.nim b/waku/node/health_monitor.nim
index b3fe9b227..aa9082ec6 100644
--- a/waku/node/health_monitor.nim
+++ b/waku/node/health_monitor.nim
@@ -1,8 +1,8 @@
{.push raises: [].}
-import std/[options], chronos
+import std/[options, sets], chronos, libp2p/protocols/rendezvous
-import waku_node, ../waku_rln_relay
+import waku_node, ../waku_rln_relay, ../waku_relay, ./peer_manager
type
HealthStatus* = enum
@@ -16,6 +16,7 @@ type
ProtocolHealth* = object
protocol*: string
health*: HealthStatus
+ desc*: Option[string] ## describes why a certain protocol is considered `NOT_READY`
HealthReport* = object
nodeHealth*: HealthStatus
@@ -54,31 +55,236 @@ proc init*(
else:
raise newException(ValueError, "Invalid HealthStatus string representation")
+proc init*(p: typedesc[ProtocolHealth], protocol: string): ProtocolHealth =
+ let p = ProtocolHealth(
+ protocol: protocol, health: HealthStatus.NOT_MOUNTED, desc: none[string]()
+ )
+ return p
+
+proc notReady(p: var ProtocolHealth, desc: string): ProtocolHealth =
+ p.health = HealthStatus.NOT_READY
+ p.desc = some(desc)
+ return p
+
+proc ready(p: var ProtocolHealth): ProtocolHealth =
+ p.health = HealthStatus.READY
+ p.desc = none[string]()
+ return p
+
+proc notMounted(p: var ProtocolHealth): ProtocolHealth =
+ p.health = HealthStatus.NOT_MOUNTED
+ p.desc = none[string]()
+ return p
+
+proc synchronizing(p: var ProtocolHealth): ProtocolHealth =
+ p.health = HealthStatus.SYNCHRONIZING
+ p.desc = none[string]()
+ return p
+
+proc initializing(p: var ProtocolHealth): ProtocolHealth =
+ p.health = HealthStatus.INITIALIZING
+ p.desc = none[string]()
+ return p
+
+proc shuttingDown(p: var ProtocolHealth): ProtocolHealth =
+ p.health = HealthStatus.SHUTTING_DOWN
+ p.desc = none[string]()
+ return p
+
const FutIsReadyTimout = 5.seconds
+proc getRelayHealth(hm: WakuNodeHealthMonitor): ProtocolHealth =
+ var p = ProtocolHealth.init("Relay")
+ if hm.node.get().wakuRelay == nil:
+ return p.notMounted()
+
+ let relayPeers = hm.node
+ .get().wakuRelay
+ .getConnectedPubSubPeers(pubsubTopic = "").valueOr:
+ return p.notMounted()
+
+ if relayPeers.len() == 0:
+ return p.notReady("No connected peers")
+
+ return p.ready()
+
+proc getRlnRelayHealth(hm: WakuNodeHealthMonitor): Future[ProtocolHealth] {.async.} =
+ var p = ProtocolHealth.init("Rln Relay")
+ if hm.node.get().wakuRlnRelay.isNil():
+ return p.notMounted()
+
+ let isReadyStateFut = hm.node.get().wakuRlnRelay.isReady()
+ if not await isReadyStateFut.withTimeout(FutIsReadyTimout):
+ return p.notReady("Ready state check timed out")
+
+ try:
+ if not isReadyStateFut.completed():
+ return p.notReady("Ready state check timed out")
+ elif isReadyStateFut.read():
+ return p.ready()
+
+ return p.synchronizing()
+ except:
+ error "exception reading state: " & getCurrentExceptionMsg()
+ return p.notReady("State cannot be determined")
+
+proc getLightpushHealth(
+ hm: WakuNodeHealthMonitor, relayHealth: HealthStatus
+): ProtocolHealth =
+ var p = ProtocolHealth.init("Lightpush")
+ if hm.node.get().wakuLightPush == nil:
+ return p.notMounted()
+
+ if relayHealth == HealthStatus.READY:
+ return p.ready()
+
+ return p.notReady("Node has no relay peers to fullfill push requests")
+
+proc getLightpushClientHealth(
+ hm: WakuNodeHealthMonitor, relayHealth: HealthStatus
+): ProtocolHealth =
+ var p = ProtocolHealth.init("Lightpush Client")
+ if hm.node.get().wakuLightpushClient == nil:
+ return p.notMounted()
+
+ let selfServiceAvailable =
+ hm.node.get().wakuLightPush != nil and relayHealth == HealthStatus.READY
+ let servicePeerAvailable =
+ hm.node.get().peerManager.selectPeer(WakuLightPushCodec).isSome()
+
+ if selfServiceAvailable or servicePeerAvailable:
+ return p.ready()
+
+ return p.notReady("No Lightpush service peer available yet")
+
+proc getLegacyLightpushHealth(
+ hm: WakuNodeHealthMonitor, relayHealth: HealthStatus
+): ProtocolHealth =
+ var p = ProtocolHealth.init("Legacy Lightpush")
+ if hm.node.get().wakuLegacyLightPush == nil:
+ return p.notMounted()
+
+ if relayHealth == HealthStatus.READY:
+ return p.ready()
+
+ return p.notReady("Node has no relay peers to fullfill push requests")
+
+proc getLegacyLightpushClientHealth(
+ hm: WakuNodeHealthMonitor, relayHealth: HealthStatus
+): ProtocolHealth =
+ var p = ProtocolHealth.init("Legacy Lightpush Client")
+ if hm.node.get().wakuLegacyLightpushClient == nil:
+ return p.notMounted()
+
+ if (hm.node.get().wakuLegacyLightPush != nil and relayHealth == HealthStatus.READY) or
+ hm.node.get().peerManager.selectPeer(WakuLegacyLightPushCodec).isSome():
+ return p.ready()
+
+ return p.notReady("No Lightpush service peer available yet")
+
+proc getFilterHealth(
+ hm: WakuNodeHealthMonitor, relayHealth: HealthStatus
+): ProtocolHealth =
+ var p = ProtocolHealth.init("Filter")
+ if hm.node.get().wakuFilter == nil:
+ return p.notMounted()
+
+ if relayHealth == HealthStatus.READY:
+ return p.ready()
+
+ return p.notReady("Relay is not ready, filter will not be able to sort out messages")
+
+proc getFilterClientHealth(
+ hm: WakuNodeHealthMonitor, relayHealth: HealthStatus
+): ProtocolHealth =
+ var p = ProtocolHealth.init("Filter Client")
+ if hm.node.get().wakuFilterClient == nil:
+ return p.notMounted()
+
+ if hm.node.get().peerManager.selectPeer(WakuFilterSubscribeCodec).isSome():
+ return p.ready()
+
+ return p.notReady("No Filter service peer available yet")
+
+proc getStoreHealth(hm: WakuNodeHealthMonitor): ProtocolHealth =
+ var p = ProtocolHealth.init("Store")
+ if hm.node.get().wakuStore == nil:
+ return p.notMounted()
+
+ return p.ready()
+
+proc getStoreClientHealth(hm: WakuNodeHealthMonitor): ProtocolHealth =
+ var p = ProtocolHealth.init("Store Client")
+ if hm.node.get().wakuStoreClient == nil:
+ return p.notMounted()
+
+ if hm.node.get().peerManager.selectPeer(WakuStoreCodec).isSome() or
+ hm.node.get().wakuStore != nil:
+ return p.ready()
+
+ return p.notReady(
+ "No Store service peer available yet, neither Store service set up for the node"
+ )
+
+proc getLegacyStoreHealth(hm: WakuNodeHealthMonitor): ProtocolHealth =
+ var p = ProtocolHealth.init("Legacy Store")
+ if hm.node.get().wakuLegacyStore == nil:
+ return p.notMounted()
+
+ return p.ready()
+
+proc getLegacyStoreClientHealth(hm: WakuNodeHealthMonitor): ProtocolHealth =
+ var p = ProtocolHealth.init("Legacy Store Client")
+ if hm.node.get().wakuLegacyStoreClient == nil:
+ return p.notMounted()
+
+ if hm.node.get().peerManager.selectPeer(WakuLegacyStoreCodec).isSome() or
+ hm.node.get().wakuLegacyStore != nil:
+ return p.ready()
+
+ return p.notReady(
+ "No Legacy Store service peers are available yet, neither Store service set up for the node"
+ )
+
+proc getPeerExchangeHealth(hm: WakuNodeHealthMonitor): ProtocolHealth =
+ var p = ProtocolHealth.init("Peer Exchange")
+ if hm.node.get().wakuPeerExchange == nil:
+ return p.notMounted()
+
+ return p.ready()
+
+proc getRendezvousHealth(hm: WakuNodeHealthMonitor): ProtocolHealth =
+ var p = ProtocolHealth.init("Rendezvous")
+ if hm.node.get().wakuRendezvous == nil:
+ return p.notMounted()
+
+ if hm.node.get().peerManager.switch.peerStore.peers(RendezVousCodec).len() == 0:
+ return p.notReady("No Rendezvous peers are available yet")
+
+ return p.ready()
+
proc getNodeHealthReport*(hm: WakuNodeHealthMonitor): Future[HealthReport] {.async.} =
- result.nodeHealth = hm.nodeHealth
+ var report: HealthReport
+ report.nodeHealth = hm.nodeHealth
- if hm.node.isSome() and hm.node.get().wakuRlnRelay != nil:
- let getRlnRelayHealth = proc(): Future[HealthStatus] {.async.} =
- let isReadyStateFut = hm.node.get().wakuRlnRelay.isReady()
- if not await isReadyStateFut.withTimeout(FutIsReadyTimout):
- return HealthStatus.NOT_READY
+ if hm.node.isSome():
+ let relayHealth = hm.getRelayHealth()
+ report.protocolsHealth.add(relayHealth)
+ report.protocolsHealth.add(await hm.getRlnRelayHealth())
+ report.protocolsHealth.add(hm.getLightpushHealth(relayHealth.health))
+ report.protocolsHealth.add(hm.getLegacyLightpushHealth(relayHealth.health))
+ report.protocolsHealth.add(hm.getFilterHealth(relayHealth.health))
+ report.protocolsHealth.add(hm.getStoreHealth())
+ report.protocolsHealth.add(hm.getLegacyStoreHealth())
+ report.protocolsHealth.add(hm.getPeerExchangeHealth())
+ report.protocolsHealth.add(hm.getRendezvousHealth())
- try:
- if not isReadyStateFut.completed():
- return HealthStatus.NOT_READY
- elif isReadyStateFut.read():
- return HealthStatus.READY
-
- return HealthStatus.SYNCHRONIZING
- except:
- error "exception reading state: " & getCurrentExceptionMsg()
- return HealthStatus.NOT_READY
-
- result.protocolsHealth.add(
- ProtocolHealth(protocol: "Rln Relay", health: await getRlnRelayHealth())
- )
+ report.protocolsHealth.add(hm.getLightpushClientHealth(relayHealth.health))
+ report.protocolsHealth.add(hm.getLegacyLightpushClientHealth(relayHealth.health))
+ report.protocolsHealth.add(hm.getStoreClientHealth())
+ report.protocolsHealth.add(hm.getLegacyStoreClientHealth())
+ report.protocolsHealth.add(hm.getFilterClientHealth(relayHealth.health))
+ return report
proc setNode*(hm: WakuNodeHealthMonitor, node: WakuNode) =
hm.node = some(node)
diff --git a/waku/waku_api/rest/health/types.nim b/waku/waku_api/rest/health/types.nim
index ce58ab711..e457ebea5 100644
--- a/waku/waku_api/rest/health/types.nim
+++ b/waku/waku_api/rest/health/types.nim
@@ -10,26 +10,36 @@ proc writeValue*(
) {.raises: [IOError].} =
writer.beginRecord()
writer.writeField(value.protocol, $value.health)
+ writer.writeField("desc", value.desc)
writer.endRecord()
proc readValue*(
reader: var JsonReader[RestJson], value: var ProtocolHealth
) {.gcsafe, raises: [SerializationError, IOError].} =
- var health: HealthStatus
- var fieldCount = 0
-
+ var protocol = none[string]()
+ var health = none[HealthStatus]()
+ var desc = none[string]()
for fieldName in readObjectFields(reader):
- if fieldCount > 0:
- reader.raiseUnexpectedField("Too many fields", "ProtocolHealth")
- fieldCount += 1
+ if fieldName == "desc":
+ if desc.isSome():
+ reader.raiseUnexpectedField("Multiple `desc` fields found", "ProtocolHealth")
+ desc = some(reader.readValue(string))
+ else:
+ if protocol.isSome():
+ reader.raiseUnexpectedField(
+ "Multiple `protocol` fields and value found", "ProtocolHealth"
+ )
- let fieldValue = reader.readValue(string)
- try:
- health = HealthStatus.init(fieldValue)
- except ValueError:
- reader.raiseUnexpectedValue("Invalid `health` value")
+ let fieldValue = reader.readValue(string)
+ try:
+ health = some(HealthStatus.init(fieldValue))
+ protocol = some(fieldName)
+ except ValueError:
+ reader.raiseUnexpectedValue(
+ "Invalid `health` value: " & getCurrentExceptionMsg()
+ )
- value = ProtocolHealth(protocol: fieldName, health: health)
+ value = ProtocolHealth(protocol: protocol.get(), health: health.get(), desc: desc)
proc writeValue*(
writer: var JsonWriter[RestJson], value: HealthReport
From 336fbf8b64d683fa6eb829462598e22b59ac734f Mon Sep 17 00:00:00 2001
From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com>
Date: Mon, 2 Jun 2025 22:02:49 +0200
Subject: [PATCH 10/47] fix: relay unsubscribe (#3422)
* waku_relay protocol fix unsubscribe and remove topic validator
* simplify subscription and avoid unneeded code
* tests adaptations
* call wakuRelay.subscribe only in one place within waku_node
---
apps/chat2/chat2.nim | 4 +-
apps/chat2bridge/chat2bridge.nim | 2 +-
apps/networkmonitor/networkmonitor.nim | 2 +-
examples/subscriber.nim | 2 +-
.../requests/protocols/relay_request.nim | 2 +-
tests/node/test_wakunode_legacy_lightpush.nim | 6 +-
tests/node/test_wakunode_lightpush.nim | 6 +-
tests/test_relay_peer_exchange.nim | 23 +++-
tests/test_wakunode.nim | 6 +-
tests/waku_relay/test_protocol.nim | 80 +++++++------
tests/waku_relay/test_wakunode_relay.nim | 94 +++++++++------
tests/waku_relay/utils.nim | 4 +-
.../test_wakunode_rln_relay.nim | 107 +++++++++++-------
tests/wakunode2/test_validators.nim | 11 +-
tests/wakunode_rest/test_rest_admin.nim | 21 +++-
tests/wakunode_rest/test_rest_filter.nim | 28 ++++-
tests/wakunode_rest/test_rest_lightpush.nim | 23 +++-
.../test_rest_lightpush_legacy.nim | 22 +++-
tests/wakunode_rest/test_rest_relay.nim | 40 ++++++-
waku/factory/node_factory.nim | 5 +-
waku/factory/waku.nim | 10 +-
waku/node/waku_node.nim | 54 +++------
waku/waku_api/rest/builder.nim | 12 +-
waku/waku_api/rest/relay/handlers.nim | 6 +-
waku/waku_relay/protocol.nim | 33 +++---
25 files changed, 366 insertions(+), 237 deletions(-)
diff --git a/apps/chat2/chat2.nim b/apps/chat2/chat2.nim
index 9c0f47dcd..1ba599d78 100644
--- a/apps/chat2/chat2.nim
+++ b/apps/chat2/chat2.nim
@@ -380,7 +380,7 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
if conf.relay:
let shards =
conf.shards.mapIt(RelayShard(clusterId: conf.clusterId, shardId: uint16(it)))
- (await node.mountRelay(shards)).isOkOr:
+ (await node.mountRelay()).isOkOr:
echo "failed to mount relay: " & error
return
@@ -535,7 +535,7 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
chat.printReceivedMessage(msg)
node.subscribe(
- (kind: PubsubSub, topic: DefaultPubsubTopic), some(WakuRelayHandler(handler))
+ (kind: PubsubSub, topic: DefaultPubsubTopic), WakuRelayHandler(handler)
).isOkOr:
error "failed to subscribe to pubsub topic",
topic = DefaultPubsubTopic, error = error
diff --git a/apps/chat2bridge/chat2bridge.nim b/apps/chat2bridge/chat2bridge.nim
index 7a7a5d08f..a62d98261 100644
--- a/apps/chat2bridge/chat2bridge.nim
+++ b/apps/chat2bridge/chat2bridge.nim
@@ -232,7 +232,7 @@ proc start*(cmb: Chat2MatterBridge) {.async.} =
except:
error "exception in relayHandler: " & getCurrentExceptionMsg()
- cmb.nodev2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)).isOkOr:
+ cmb.nodev2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), relayHandler).isOkOr:
error "failed to subscribe to relay", topic = DefaultPubsubTopic, error = error
return
diff --git a/apps/networkmonitor/networkmonitor.nim b/apps/networkmonitor/networkmonitor.nim
index 7b71a630e..f8cde5281 100644
--- a/apps/networkmonitor/networkmonitor.nim
+++ b/apps/networkmonitor/networkmonitor.nim
@@ -554,7 +554,7 @@ proc subscribeAndHandleMessages(
else:
msgPerContentTopic[msg.contentTopic] = 1
- node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(WakuRelayHandler(handler))).isOkOr:
+ node.subscribe((kind: PubsubSub, topic: pubsubTopic), WakuRelayHandler(handler)).isOkOr:
error "failed to subscribe to pubsub topic", pubsubTopic, error
quit(1)
diff --git a/examples/subscriber.nim b/examples/subscriber.nim
index 7eb900792..fb040b05a 100644
--- a/examples/subscriber.nim
+++ b/examples/subscriber.nim
@@ -119,7 +119,7 @@ proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} =
contentTopic = msg.contentTopic,
timestamp = msg.timestamp
- node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(WakuRelayHandler(handler))).isOkOr:
+ node.subscribe((kind: PubsubSub, topic: pubsubTopic), WakuRelayHandler(handler)).isOkOr:
error "failed to subscribe to pubsub topic", pubsubTopic, error
quit(1)
diff --git a/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim b/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim
index c2f002c44..cfff1442c 100644
--- a/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim
+++ b/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim
@@ -111,7 +111,7 @@ proc process*(
of SUBSCRIBE:
waku.node.subscribe(
(kind: SubscriptionKind.PubsubSub, topic: $self.pubsubTopic),
- handler = some(self.relayEventCallback),
+ handler = self.relayEventCallback,
).isOkOr:
error "SUBSCRIBE failed", error
return err($error)
diff --git a/tests/node/test_wakunode_legacy_lightpush.nim b/tests/node/test_wakunode_legacy_lightpush.nim
index 806bfe032..5d01e9f58 100644
--- a/tests/node/test_wakunode_legacy_lightpush.nim
+++ b/tests/node/test_wakunode_legacy_lightpush.nim
@@ -189,9 +189,9 @@ suite "Waku Legacy Lightpush message delivery":
await allFutures(destNode.start(), bridgeNode.start(), lightNode.start())
- (await destNode.mountRelay(@[DefaultRelayShard])).isOkOr:
+ (await destNode.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
- (await bridgeNode.mountRelay(@[DefaultRelayShard])).isOkOr:
+ (await bridgeNode.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await bridgeNode.mountLegacyLightPush()
lightNode.mountLegacyLightPushClient()
@@ -214,7 +214,7 @@ suite "Waku Legacy Lightpush message delivery":
msg == message
completionFutRelay.complete(true)
- destNode.subscribe((kind: PubsubSub, topic: CustomPubsubTopic), some(relayHandler)).isOkOr:
+ destNode.subscribe((kind: PubsubSub, topic: CustomPubsubTopic), relayHandler).isOkOr:
assert false, "Failed to subscribe to topic:" & $error
# Wait for subscription to take effect
diff --git a/tests/node/test_wakunode_lightpush.nim b/tests/node/test_wakunode_lightpush.nim
index dccb899af..6a42c899b 100644
--- a/tests/node/test_wakunode_lightpush.nim
+++ b/tests/node/test_wakunode_lightpush.nim
@@ -183,9 +183,9 @@ suite "Waku Lightpush message delivery":
await allFutures(destNode.start(), bridgeNode.start(), lightNode.start())
- (await destNode.mountRelay(@[DefaultRelayShard])).isOkOr:
+ (await destNode.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
- (await bridgeNode.mountRelay(@[DefaultRelayShard])).isOkOr:
+ (await bridgeNode.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await bridgeNode.mountLightPush()
lightNode.mountLightPushClient()
@@ -209,7 +209,7 @@ suite "Waku Lightpush message delivery":
msg == message
completionFutRelay.complete(true)
- destNode.subscribe((kind: PubsubSub, topic: CustomPubsubTopic), some(relayHandler)).isOkOr:
+ destNode.subscribe((kind: PubsubSub, topic: CustomPubsubTopic), relayHandler).isOkOr:
assert false, "Failed to subscribe to relay"
# Wait for subscription to take effect
diff --git a/tests/test_relay_peer_exchange.nim b/tests/test_relay_peer_exchange.nim
index a5e3b63ee..84976bd9a 100644
--- a/tests/test_relay_peer_exchange.nim
+++ b/tests/test_relay_peer_exchange.nim
@@ -22,9 +22,9 @@ procSuite "Relay (GossipSub) Peer Exchange":
newTestWakuNode(node2Key, listenAddress, port, sendSignedPeerRecord = true)
# When both client and server mount relay without a handler
- (await node1.mountRelay(@[DefaultRelayShard])).isOkOr:
+ (await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
- (await node2.mountRelay(@[DefaultRelayShard], none(RoutingRecordsHandler))).isOkOr:
+ (await node2.mountRelay(none(RoutingRecordsHandler))).isOkOr:
assert false, "Failed to mount relay"
# Then the relays are mounted without a handler
@@ -74,11 +74,11 @@ procSuite "Relay (GossipSub) Peer Exchange":
peerExchangeHandle: RoutingRecordsHandler = peerExchangeHandler
# Givem the nodes mount relay with a peer exchange handler
- (await node1.mountRelay(@[DefaultRelayShard], some(emptyPeerExchangeHandle))).isOkOr:
+ (await node1.mountRelay(some(emptyPeerExchangeHandle))).isOkOr:
assert false, "Failed to mount relay"
- (await node2.mountRelay(@[DefaultRelayShard], some(emptyPeerExchangeHandle))).isOkOr:
+ (await node2.mountRelay(some(emptyPeerExchangeHandle))).isOkOr:
assert false, "Failed to mount relay"
- (await node3.mountRelay(@[DefaultRelayShard], some(peerExchangeHandle))).isOkOr:
+ (await node3.mountRelay(some(peerExchangeHandle))).isOkOr:
assert false, "Failed to mount relay"
# Ensure that node1 prunes all peers after the first connection
@@ -86,6 +86,19 @@ procSuite "Relay (GossipSub) Peer Exchange":
await allFutures([node1.start(), node2.start(), node3.start()])
+ # The three nodes should be subscribed to the same shard
+ proc simpleHandler(
+ topic: PubsubTopic, msg: WakuMessage
+ ): Future[void] {.async, gcsafe.} =
+ await sleepAsync(0.milliseconds)
+
+ node1.subscribe((kind: PubsubSub, topic: $DefaultRelayShard), simpleHandler).isOkOr:
+ assert false, "Failed to subscribe to topic: " & $error
+ node2.subscribe((kind: PubsubSub, topic: $DefaultRelayShard), simpleHandler).isOkOr:
+ assert false, "Failed to subscribe to topic: " & $error
+ node3.subscribe((kind: PubsubSub, topic: $DefaultRelayShard), simpleHandler).isOkOr:
+ assert false, "Failed to subscribe to topic: " & $error
+
# When nodes are connected
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
await node3.connectToNodes(@[node1.switch.peerInfo.toRemotePeerInfo()])
diff --git a/tests/test_wakunode.nim b/tests/test_wakunode.nim
index e50f3fe98..a7f1084fb 100644
--- a/tests/test_wakunode.nim
+++ b/tests/test_wakunode.nim
@@ -34,14 +34,14 @@ suite "WakuNode":
# Setup node 1 with stable codec "/vac/waku/relay/2.0.0"
await node1.start()
- (await node1.mountRelay(@[shard])).isOkOr:
+ (await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
node1.wakuRelay.codec = "/vac/waku/relay/2.0.0"
# Setup node 2 with beta codec "/vac/waku/relay/2.0.0-beta2"
await node2.start()
- (await node2.mountRelay(@[shard])).isOkOr:
+ (await node2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
node2.wakuRelay.codec = "/vac/waku/relay/2.0.0-beta2"
@@ -69,7 +69,7 @@ suite "WakuNode":
assert false, "Failed to unsubscribe from topic: " & $error
## Subscribe to the relay topic to add the custom relay handler defined above
- node2.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr:
+ node2.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr:
assert false, "Failed to subscribe to topic"
await sleepAsync(2000.millis)
diff --git a/tests/waku_relay/test_protocol.nim b/tests/waku_relay/test_protocol.nim
index bc2097caa..46032b693 100644
--- a/tests/waku_relay/test_protocol.nim
+++ b/tests/waku_relay/test_protocol.nim
@@ -77,7 +77,8 @@ suite "Waku Relay":
asyncTest "Publish with Subscription (Network Size: 1)":
# When subscribing to a Pubsub Topic
- discard node.subscribe(pubsubTopic, simpleFutureHandler)
+
+ node.subscribe(pubsubTopic, simpleFutureHandler)
# Then the node is subscribed
check:
@@ -111,7 +112,7 @@ suite "Waku Relay":
otherHandlerFuture.complete((topic, message))
# When subscribing the second node to the Pubsub Topic
- discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
+ otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
# Then the second node is subscribed, but not the first one
check:
@@ -172,8 +173,8 @@ suite "Waku Relay":
otherHandlerFuture.complete((topic, message))
# When subscribing both nodes to the same Pubsub Topic
- discard node.subscribe(pubsubTopic, simpleFutureHandler)
- discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
+ node.subscribe(pubsubTopic, simpleFutureHandler)
+ otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
# Then both nodes are subscribed
check:
@@ -228,7 +229,7 @@ suite "Waku Relay":
asyncTest "Refreshing subscription":
# Given a subscribed node
- discard node.subscribe(pubsubTopic, simpleFutureHandler)
+ node.subscribe(pubsubTopic, simpleFutureHandler)
check:
node.isSubscribed(pubsubTopic)
node.subscribedTopics == pubsubTopicSeq
@@ -244,7 +245,7 @@ suite "Waku Relay":
) {.async, gcsafe.} =
otherHandlerFuture.complete((topic, message))
- discard node.subscribe(pubsubTopic, otherSimpleFutureHandler)
+ node.subscribe(pubsubTopic, otherSimpleFutureHandler)
check:
node.isSubscribed(pubsubTopic)
node.subscribedTopics == pubsubTopicSeq
@@ -291,14 +292,14 @@ suite "Waku Relay":
otherHandlerFuture.complete((topic, message))
otherNode.addValidator(len4Validator)
- discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
+ otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
await sleepAsync(500.millis)
check:
otherNode.isSubscribed(pubsubTopic)
# Given a subscribed node with a validator
node.addValidator(len4Validator)
- discard node.subscribe(pubsubTopic, simpleFutureHandler)
+ node.subscribe(pubsubTopic, simpleFutureHandler)
await sleepAsync(500.millis)
check:
node.isSubscribed(pubsubTopic)
@@ -380,8 +381,8 @@ suite "Waku Relay":
) {.async, gcsafe.} =
otherHandlerFuture.complete((topic, message))
- discard node.subscribe(pubsubTopic, simpleFutureHandler)
- discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
+ node.subscribe(pubsubTopic, simpleFutureHandler)
+ otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
check:
node.isSubscribed(pubsubTopic)
node.subscribedTopics == pubsubTopicSeq
@@ -464,8 +465,8 @@ suite "Waku Relay":
) {.async, gcsafe.} =
handlerFuture2.complete((topic, message))
- discard node.subscribe(pubsubTopic, simpleFutureHandler)
- discard node.subscribe(pubsubTopicB, simpleFutureHandler2)
+ node.subscribe(pubsubTopic, simpleFutureHandler)
+ node.subscribe(pubsubTopicB, simpleFutureHandler2)
# Given the other nodes are subscribed to two pubsub topics
var otherHandlerFuture1 = newPushHandlerFuture()
@@ -492,10 +493,10 @@ suite "Waku Relay":
) {.async, gcsafe.} =
anotherHandlerFuture2.complete((topic, message))
- discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler1)
- discard otherNode.subscribe(pubsubTopicC, otherSimpleFutureHandler2)
- discard anotherNode.subscribe(pubsubTopicB, anotherSimpleFutureHandler1)
- discard anotherNode.subscribe(pubsubTopicC, anotherSimpleFutureHandler2)
+ otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler1)
+ otherNode.subscribe(pubsubTopicC, otherSimpleFutureHandler2)
+ anotherNode.subscribe(pubsubTopicB, anotherSimpleFutureHandler1)
+ anotherNode.subscribe(pubsubTopicC, anotherSimpleFutureHandler2)
await sleepAsync(500.millis)
# When publishing a message in node for each of the pubsub topics
@@ -735,15 +736,13 @@ suite "Waku Relay":
otherSwitch = newTestSwitch()
otherNode = await newTestWakuRelay(otherSwitch)
await allFutures(otherSwitch.start(), otherNode.start())
- let otherTopicHandler: TopicHandler =
- otherNode.subscribe(pubsubTopic, simpleFutureHandler)
+ otherNode.subscribe(pubsubTopic, simpleFutureHandler)
# Given a node without a subscription
check:
node.subscribedTopics == []
- # When unsubscribing from a pubsub topic from an unsubscribed topic handler
- node.unsubscribe(pubsubTopic, otherTopicHandler)
+ node.unsubscribe(pubsubTopic)
# Then the node is still not subscribed
check:
@@ -754,11 +753,11 @@ suite "Waku Relay":
asyncTest "Single Node with Single Pubsub Topic":
# Given a node subscribed to a pubsub topic
- let topicHandler = node.subscribe(pubsubTopic, simpleFutureHandler)
+ node.subscribe(pubsubTopic, simpleFutureHandler)
check node.subscribedTopics == pubsubTopicSeq
# When unsubscribing from the pubsub topic
- node.unsubscribe(pubsubTopic, topicHandler)
+ node.unsubscribe(pubsubTopic)
# Then the node is not subscribed anymore
check node.subscribedTopics == []
@@ -768,9 +767,8 @@ suite "Waku Relay":
let pubsubTopicB = "/waku/2/rs/0/1"
# Given a node subscribed to multiple pubsub topics
- let
- topicHandler = node.subscribe(pubsubTopic, simpleFutureHandler)
- topicHandlerB = node.subscribe(pubsubTopicB, simpleFutureHandler)
+ node.subscribe(pubsubTopic, simpleFutureHandler)
+ node.subscribe(pubsubTopicB, simpleFutureHandler)
assert pubsubTopic in node.subscribedTopics,
fmt"Node is not subscribed to {pubsubTopic}"
@@ -778,13 +776,13 @@ suite "Waku Relay":
fmt"Node is not subscribed to {pubsubTopicB}"
# When unsubscribing from one of the pubsub topics
- node.unsubscribe(pubsubTopic, topicHandler)
+ node.unsubscribe(pubsubTopic)
# Then the node is still subscribed to the other pubsub topic
check node.subscribedTopics == @[pubsubTopicB]
# When unsubscribing from the other pubsub topic
- node.unsubscribe(pubsubTopicB, topicHandlerB)
+ node.unsubscribe(pubsubTopicB)
# Then the node is not subscribed anymore
check node.subscribedTopics == []
@@ -802,7 +800,7 @@ suite "Waku Relay":
asyncTest "Single Node with Single Pubsub Topic":
# Given a node subscribed to a pubsub topic
- discard node.subscribe(pubsubTopic, simpleFutureHandler)
+ node.subscribe(pubsubTopic, simpleFutureHandler)
check node.subscribedTopics == pubsubTopicSeq
# When unsubscribing from all pubsub topics
@@ -816,9 +814,9 @@ suite "Waku Relay":
let pubsubTopicB = "/waku/2/rs/0/1"
# Given a node subscribed to multiple pubsub topics
- discard node.subscribe(pubsubTopic, simpleFutureHandler)
- discard node.subscribe(pubsubTopic, simpleFutureHandler)
- discard node.subscribe(pubsubTopicB, simpleFutureHandler)
+ node.subscribe(pubsubTopic, simpleFutureHandler)
+ node.subscribe(pubsubTopic, simpleFutureHandler)
+ node.subscribe(pubsubTopicB, simpleFutureHandler)
assert pubsubTopic in node.subscribedTopics,
fmt"Node is not subscribed to {pubsubTopic}"
@@ -855,8 +853,8 @@ suite "Waku Relay":
) {.async, gcsafe.} =
otherHandlerFuture.complete((topic, message))
- discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
- discard node.subscribe(pubsubTopic, simpleFutureHandler)
+ otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
+ node.subscribe(pubsubTopic, simpleFutureHandler)
check:
node.subscribedTopics == pubsubTopicSeq
otherNode.subscribedTopics == pubsubTopicSeq
@@ -1021,8 +1019,8 @@ suite "Waku Relay":
) {.async, gcsafe.} =
otherHandlerFuture.complete((topic, message))
- discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
- discard node.subscribe(pubsubTopic, simpleFutureHandler)
+ otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
+ node.subscribe(pubsubTopic, simpleFutureHandler)
check:
node.subscribedTopics == pubsubTopicSeq
otherNode.subscribedTopics == pubsubTopicSeq
@@ -1163,8 +1161,8 @@ suite "Waku Relay":
otherMessageSeq.add((topic, message))
otherHandlerFuture.complete((topic, message))
- discard node.subscribe(pubsubTopic, thisSimpleFutureHandler)
- discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
+ node.subscribe(pubsubTopic, thisSimpleFutureHandler)
+ otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
check:
node.subscribedTopics == pubsubTopicSeq
otherNode.subscribedTopics == pubsubTopicSeq
@@ -1237,8 +1235,8 @@ suite "Waku Relay":
) {.async, gcsafe.} =
otherHandlerFuture.complete((topic, message))
- discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
- discard node.subscribe(pubsubTopic, simpleFutureHandler)
+ otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
+ node.subscribe(pubsubTopic, simpleFutureHandler)
check:
node.subscribedTopics == pubsubTopicSeq
otherNode.subscribedTopics == pubsubTopicSeq
@@ -1332,8 +1330,8 @@ suite "Waku Relay":
) {.async, gcsafe.} =
otherHandlerFuture.complete((topic, message))
- discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
- discard node.subscribe(pubsubTopic, simpleFutureHandler)
+ otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
+ node.subscribe(pubsubTopic, simpleFutureHandler)
check:
node.subscribedTopics == pubsubTopicSeq
otherNode.subscribedTopics == pubsubTopicSeq
diff --git a/tests/waku_relay/test_wakunode_relay.nim b/tests/waku_relay/test_wakunode_relay.nim
index 3f6bfd3e7..ad8d83361 100644
--- a/tests/waku_relay/test_wakunode_relay.nim
+++ b/tests/waku_relay/test_wakunode_relay.nim
@@ -70,15 +70,15 @@ suite "WakuNode - Relay":
message = WakuMessage(payload: payload, contentTopic: contentTopic)
await node1.start()
- (await node1.mountRelay(@[shard])).isOkOr:
+ (await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await node2.start()
- (await node2.mountRelay(@[shard])).isOkOr:
+ (await node2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await node3.start()
- (await node3.mountRelay(@[shard])).isOkOr:
+ (await node3.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await allFutures(
@@ -97,13 +97,19 @@ suite "WakuNode - Relay":
msg.timestamp > 0
completionFut.complete(true)
- ## The following unsubscription is necessary to remove the default relay handler, which is
- ## added when mountRelay is called.
- node3.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr:
- assert false, "Failed to unsubscribe from topic: " & $error
+ proc simpleHandler(
+ topic: PubsubTopic, msg: WakuMessage
+ ): Future[void] {.async, gcsafe.} =
+ await sleepAsync(0.milliseconds)
+
+ ## node1 and node2 explicitly subscribe to the same shard as node3
+ node1.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr:
+ assert false, "Failed to subscribe to topic: " & $error
+ node2.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr:
+ assert false, "Failed to subscribe to topic: " & $error
## Subscribe to the relay topic to add the custom relay handler defined above
- node3.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr:
+ node3.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr:
assert false, "Failed to subscribe to topic: " & $error
await sleepAsync(500.millis)
@@ -147,15 +153,15 @@ suite "WakuNode - Relay":
# start all the nodes
await node1.start()
- (await node1.mountRelay(@[shard])).isOkOr:
+ (await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await node2.start()
- (await node2.mountRelay(@[shard])).isOkOr:
+ (await node2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await node3.start()
- (await node3.mountRelay(@[shard])).isOkOr:
+ (await node3.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
@@ -193,13 +199,19 @@ suite "WakuNode - Relay":
# relay handler is called
completionFut.complete(true)
- ## The following unsubscription is necessary to remove the default relay handler, which is
- ## added when mountRelay is called.
- node3.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr:
- assert false, "Failed to unsubscribe from topic: " & $error
+ proc simpleHandler(
+ topic: PubsubTopic, msg: WakuMessage
+ ): Future[void] {.async, gcsafe.} =
+ await sleepAsync(0.milliseconds)
+
+ ## node1 and node2 explicitly subscribe to the same shard as node3
+ node1.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr:
+ assert false, "Failed to subscribe to topic: " & $error
+ node2.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr:
+ assert false, "Failed to subscribe to topic: " & $error
## Subscribe to the relay topic to add the custom relay handler defined above
- node3.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr:
+ node3.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr:
assert false, "Failed to subscribe to topic: " & $error
await sleepAsync(500.millis)
@@ -287,11 +299,11 @@ suite "WakuNode - Relay":
message = WakuMessage(payload: payload, contentTopic: contentTopic)
await node1.start()
- (await node1.mountRelay(@[shard])).isOkOr:
+ (await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await node2.start()
- (await node2.mountRelay(@[shard])).isOkOr:
+ (await node2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
@@ -313,7 +325,7 @@ suite "WakuNode - Relay":
assert false, "Failed to unsubscribe from topic: " & $error
## Subscribe to the relay topic to add the custom relay handler defined above
- node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr:
+ node1.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr:
assert false, "Failed to subscribe to topic: " & $error
await sleepAsync(500.millis)
@@ -345,11 +357,11 @@ suite "WakuNode - Relay":
message = WakuMessage(payload: payload, contentTopic: contentTopic)
await node1.start()
- (await node1.mountRelay(@[shard])).isOkOr:
+ (await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await node2.start()
- (await node2.mountRelay(@[shard])).isOkOr:
+ (await node2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
@@ -371,7 +383,7 @@ suite "WakuNode - Relay":
assert false, "Failed to unsubscribe from topic: " & $error
## Subscribe to the relay topic to add the custom relay handler defined above
- node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr:
+ node1.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr:
assert false, "Failed to subscribe to topic: " & $error
await sleepAsync(500.millis)
@@ -403,11 +415,11 @@ suite "WakuNode - Relay":
message = WakuMessage(payload: payload, contentTopic: contentTopic)
await node1.start()
- (await node1.mountRelay(@[shard])).isOkOr:
+ (await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await node2.start()
- (await node2.mountRelay(@[shard])).isOkOr:
+ (await node2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
#delete websocket peer address
@@ -433,7 +445,7 @@ suite "WakuNode - Relay":
assert false, "Failed to unsubscribe from topic: " & $error
## Subscribe to the relay topic to add the custom relay handler defined above
- node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr:
+ node1.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr:
assert false, "Failed to subscribe to topic: " & $error
await sleepAsync(500.millis)
@@ -467,11 +479,11 @@ suite "WakuNode - Relay":
message = WakuMessage(payload: payload, contentTopic: contentTopic)
await node1.start()
- (await node1.mountRelay(@[shard])).isOkOr:
+ (await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await node2.start()
- (await node2.mountRelay(@[shard])).isOkOr:
+ (await node2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
@@ -493,7 +505,7 @@ suite "WakuNode - Relay":
assert false, "Failed to unsubscribe from topic: " & $error
## Subscribe to the relay topic to add the custom relay handler defined above
- node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr:
+ node1.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr:
assert false, "Failed to subscribe to topic: " & $error
await sleepAsync(500.millis)
@@ -535,11 +547,11 @@ suite "WakuNode - Relay":
message = WakuMessage(payload: payload, contentTopic: contentTopic)
await node1.start()
- (await node1.mountRelay(@[shard])).isOkOr:
+ (await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await node2.start()
- (await node2.mountRelay(@[shard])).isOkOr:
+ (await node2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
@@ -561,7 +573,7 @@ suite "WakuNode - Relay":
assert false, "Failed to unsubscribe from topic: " & $error
## Subscribe to the relay topic to add the custom relay handler defined above
- node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr:
+ node1.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr:
assert false, "Failed to subscribe to topic: " & $error
await sleepAsync(500.millis)
@@ -583,10 +595,15 @@ suite "WakuNode - Relay":
await allFutures(nodes.mapIt(it.start()))
await allFutures(nodes.mapIt(it.mountRelay()))
+ proc simpleHandler(
+ topic: PubsubTopic, msg: WakuMessage
+ ): Future[void] {.async, gcsafe.} =
+ await sleepAsync(0.milliseconds)
+
# subscribe all nodes to a topic
let topic = "topic"
for node in nodes:
- discard node.wakuRelay.subscribe(topic, nil)
+ node.wakuRelay.subscribe(topic, simpleHandler)
await sleepAsync(500.millis)
# connect nodes in full mesh
@@ -661,19 +678,24 @@ suite "WakuNode - Relay":
"topic must use the same shard"
## When
- node.subscribe((kind: ContentSub, topic: contentTopicA), some(handler)).isOkOr:
+ node.subscribe((kind: ContentSub, topic: contentTopicA), handler).isOkOr:
assert false, "Failed to subscribe to topic: " & $error
- node.subscribe((kind: ContentSub, topic: contentTopicB), some(handler)).isOkOr:
+ node.subscribe((kind: ContentSub, topic: contentTopicB), handler).isOkOr:
assert false,
"The subscription call shouldn't error even though it's already subscribed to that shard"
- node.subscribe((kind: ContentSub, topic: contentTopicC), some(handler)).isOkOr:
+ node.subscribe((kind: ContentSub, topic: contentTopicC), handler).isOkOr:
assert false,
"The subscription call shouldn't error even though it's already subscribed to that shard"
+ ## The node should be subscribed to the shard
+ check node.wakuRelay.isSubscribed(shard)
+
## Then
node.unsubscribe((kind: ContentUnsub, topic: contentTopicB)).isOkOr:
assert false, "Failed to unsubscribe to topic: " & $error
- check node.wakuRelay.isSubscribed(shard)
+
+ ## After unsubcription, the node should not be subscribed to the shard anymore
+ check not node.wakuRelay.isSubscribed(shard)
## Cleanup
await node.stop()
diff --git a/tests/waku_relay/utils.nim b/tests/waku_relay/utils.nim
index 309f800dc..81e366298 100644
--- a/tests/waku_relay/utils.nim
+++ b/tests/waku_relay/utils.nim
@@ -60,7 +60,7 @@ proc subscribeToContentTopicWithHandler*(
if topic == topic:
completionFut.complete(true)
- (node.subscribe((kind: ContentSub, topic: contentTopic), some(relayHandler))).isOkOr:
+ (node.subscribe((kind: ContentSub, topic: contentTopic), relayHandler)).isOkOr:
error "Failed to subscribe to content topic", error
completionFut.complete(true)
return completionFut
@@ -73,7 +73,7 @@ proc subscribeCompletionHandler*(node: WakuNode, pubsubTopic: string): Future[bo
if topic == pubsubTopic:
completionFut.complete(true)
- (node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(relayHandler))).isOkOr:
+ (node.subscribe((kind: PubsubSub, topic: pubsubTopic), relayHandler)).isOkOr:
error "Failed to subscribe to pubsub topic", error
completionFut.complete(false)
return completionFut
diff --git a/tests/waku_rln_relay/test_wakunode_rln_relay.nim b/tests/waku_rln_relay/test_wakunode_rln_relay.nim
index 312fe5cfc..8b5a47174 100644
--- a/tests/waku_rln_relay/test_wakunode_rln_relay.nim
+++ b/tests/waku_rln_relay/test_wakunode_rln_relay.nim
@@ -57,7 +57,7 @@ procSuite "WakuNode - RLN relay":
# set up three nodes
# node1
- (await node1.mountRelay(@[DefaultRelayShard])).isOkOr:
+ (await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
# mount rlnrelay in off-chain mode
@@ -74,7 +74,7 @@ procSuite "WakuNode - RLN relay":
await node1.start()
# node 2
- (await node2.mountRelay(@[DefaultRelayShard])).isOkOr:
+ (await node2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
# mount rlnrelay in off-chain mode
let wakuRlnConfig2 = WakuRlnConfig(
@@ -90,7 +90,7 @@ procSuite "WakuNode - RLN relay":
await node2.start()
# node 3
- (await node3.mountRelay(@[DefaultRelayShard])).isOkOr:
+ (await node3.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
let wakuRlnConfig3 = WakuRlnConfig(
@@ -117,13 +117,18 @@ procSuite "WakuNode - RLN relay":
if topic == DefaultPubsubTopic:
completionFut.complete(true)
- ## The following unsubscription is necessary to remove the default relay handler, which is
- ## added when mountRelay is called.
- node3.unsubscribe((kind: PubsubUnsub, topic: DefaultPubsubTopic)).isOkOr:
- assert false, "Failed to unsubscribe from topic: " & $error
+ proc simpleHandler(
+ topic: PubsubTopic, msg: WakuMessage
+ ): Future[void] {.async, gcsafe.} =
+ await sleepAsync(0.milliseconds)
+
+ node1.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr:
+ assert false, "Failed to subscribe to pubsub topic in node1: " & $error
+ node2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr:
+ assert false, "Failed to subscribe to pubsub topic in node2: " & $error
## Subscribe to the relay topic to add the custom relay handler defined above
- node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)).isOkOr:
+ node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), relayHandler).isOkOr:
assert false, "Failed to subscribe to pubsub topic: " & $error
await sleepAsync(2000.millis)
@@ -146,8 +151,7 @@ procSuite "WakuNode - RLN relay":
discard await node1.publish(some(DefaultPubsubTopic), message)
await sleepAsync(2000.millis)
- check:
- (await completionFut.withTimeout(10.seconds)) == true
+ assert (await completionFut.withTimeout(10.seconds)), "completionFut timed out"
await node1.stop()
await node2.stop()
@@ -169,7 +173,7 @@ procSuite "WakuNode - RLN relay":
]
# set up three nodes
- await allFutures(nodes.mapIt(it.mountRelay(shards)))
+ await allFutures(nodes.mapIt(it.mountRelay()))
# mount rlnrelay in off-chain mode
for index, node in nodes:
@@ -201,17 +205,20 @@ procSuite "WakuNode - RLN relay":
elif topic == $shards[1]:
rxMessagesTopic2 = rxMessagesTopic2 + 1
- ## This unsubscription is necessary to remove the default relay handler, which is
- ## added when mountRelay is called.
- nodes[2].unsubscribe((kind: PubsubUnsub, topic: $shards[0])).isOkOr:
- assert false, "Failed to unsubscribe to pubsub topic: " & $error
- nodes[2].unsubscribe((kind: PubsubUnsub, topic: $shards[1])).isOkOr:
- assert false, "Failed to unsubscribe to pubsub topic: " & $error
+ proc simpleHandler(
+ topic: PubsubTopic, msg: WakuMessage
+ ): Future[void] {.async, gcsafe.} =
+ await sleepAsync(0.milliseconds)
+
+ nodes[0].subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr:
+ assert false, "Failed to subscribe to pubsub topic in nodes[0]: " & $error
+ nodes[1].subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr:
+ assert false, "Failed to subscribe to pubsub topic in nodes[1]: " & $error
# mount the relay handlers
- nodes[2].subscribe((kind: PubsubSub, topic: $shards[0]), some(relayHandler)).isOkOr:
+ nodes[2].subscribe((kind: PubsubSub, topic: $shards[0]), relayHandler).isOkOr:
assert false, "Failed to subscribe to pubsub topic: " & $error
- nodes[2].subscribe((kind: PubsubSub, topic: $shards[1]), some(relayHandler)).isOkOr:
+ nodes[2].subscribe((kind: PubsubSub, topic: $shards[1]), relayHandler).isOkOr:
assert false, "Failed to subscribe to pubsub topic: " & $error
await sleepAsync(1000.millis)
@@ -279,7 +286,7 @@ procSuite "WakuNode - RLN relay":
# set up three nodes
# node1
- (await node1.mountRelay(@[DefaultRelayShard])).isOkOr:
+ (await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
# mount rlnrelay in off-chain mode
@@ -296,7 +303,7 @@ procSuite "WakuNode - RLN relay":
await node1.start()
# node 2
- (await node2.mountRelay(@[DefaultRelayShard])).isOkOr:
+ (await node2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
# mount rlnrelay in off-chain mode
let wakuRlnConfig2 = WakuRlnConfig(
@@ -312,7 +319,7 @@ procSuite "WakuNode - RLN relay":
await node2.start()
# node 3
- (await node3.mountRelay(@[DefaultRelayShard])).isOkOr:
+ (await node3.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
let wakuRlnConfig3 = WakuRlnConfig(
@@ -339,13 +346,18 @@ procSuite "WakuNode - RLN relay":
if topic == DefaultPubsubTopic:
completionFut.complete(true)
- ## The following unsubscription is necessary to remove the default relay handler, which is
- ## added when mountRelay is called.
- node3.unsubscribe((kind: PubsubUnsub, topic: DefaultPubsubTopic)).isOkOr:
- assert false, "Failed to unsubscribe to pubsub topic: " & $error
+ proc simpleHandler(
+ topic: PubsubTopic, msg: WakuMessage
+ ): Future[void] {.async, gcsafe.} =
+ await sleepAsync(0.milliseconds)
+
+ node1.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr:
+ assert false, "Failed to subscribe to pubsub topic in node1: " & $error
+ node2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr:
+ assert false, "Failed to subscribe to pubsub topic in node2: " & $error
# mount the relay handler
- node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)).isOkOr:
+ node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), relayHandler).isOkOr:
assert false, "Failed to subscribe to pubsub topic: " & $error
await sleepAsync(2000.millis)
@@ -408,7 +420,7 @@ procSuite "WakuNode - RLN relay":
# set up three nodes
# node1
- (await node1.mountRelay(@[DefaultRelayShard])).isOkOr:
+ (await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
# mount rlnrelay in off-chain mode
@@ -425,7 +437,7 @@ procSuite "WakuNode - RLN relay":
await node1.start()
# node 2
- (await node2.mountRelay(@[DefaultRelayShard])).isOkOr:
+ (await node2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
# mount rlnrelay in off-chain mode
@@ -441,7 +453,7 @@ procSuite "WakuNode - RLN relay":
await node2.start()
# node 3
- (await node3.mountRelay(@[DefaultRelayShard])).isOkOr:
+ (await node3.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
# mount rlnrelay in off-chain mode
@@ -513,13 +525,18 @@ procSuite "WakuNode - RLN relay":
if msg.payload == wm4.payload:
completionFut4.complete(true)
- ## The following unsubscription is necessary to remove the default relay handler, which is
- ## added when mountRelay is called.
- node3.unsubscribe((kind: PubsubUnsub, topic: DefaultPubsubTopic)).isOkOr:
- assert false, "Failed to unsubscribe to pubsub topic: " & $error
+ proc simpleHandler(
+ topic: PubsubTopic, msg: WakuMessage
+ ): Future[void] {.async, gcsafe.} =
+ await sleepAsync(0.milliseconds)
+
+ node1.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr:
+ assert false, "Failed to subscribe to pubsub topic in node1: " & $error
+ node2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr:
+ assert false, "Failed to subscribe to pubsub topic in node2: " & $error
# mount the relay handler for node3
- node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)).isOkOr:
+ node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), relayHandler).isOkOr:
assert false, "Failed to subscribe to pubsub topic: " & $error
await sleepAsync(2000.millis)
@@ -562,14 +579,14 @@ procSuite "WakuNode - RLN relay":
epochSizeSec: uint64 = 5 # This means rlnMaxEpochGap = 4
# Given both nodes mount relay and rlnrelay
- (await node1.mountRelay(shardSeq)).isOkOr:
+ (await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
let wakuRlnConfig1 = buildWakuRlnConfig(1, epochSizeSec, "wakunode_10")
(await node1.mountRlnRelay(wakuRlnConfig1)).isOkOr:
assert false, "Failed to mount rlnrelay"
# Mount rlnrelay in node2 in off-chain mode
- (await node2.mountRelay(@[DefaultRelayShard])).isOkOr:
+ (await node2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
let wakuRlnConfig2 = buildWakuRlnConfig(2, epochSizeSec, "wakunode_11")
await node2.mountRlnRelay(wakuRlnConfig2)
@@ -613,7 +630,7 @@ procSuite "WakuNode - RLN relay":
if msg == wm6:
completionFut6.complete(true)
- node2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)).isOkOr:
+ node2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), relayHandler).isOkOr:
assert false, "Failed to subscribe to pubsub topic: " & $error
# Given all messages have an rln proof and are published by the node 1
@@ -704,17 +721,27 @@ procSuite "WakuNode - RLN relay":
# Given both nodes mount relay and rlnrelay
# Mount rlnrelay in node1 in off-chain mode
- (await node1.mountRelay(shardSeq)).isOkOr:
+ (await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
let wakuRlnConfig1 = buildWakuRlnConfig(1, epochSizeSec, "wakunode_10")
await node1.mountRlnRelay(wakuRlnConfig1)
# Mount rlnrelay in node2 in off-chain mode
- (await node2.mountRelay(@[DefaultRelayShard])).isOkOr:
+ (await node2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
let wakuRlnConfig2 = buildWakuRlnConfig(2, epochSizeSec, "wakunode_11")
await node2.mountRlnRelay(wakuRlnConfig2)
+ proc simpleHandler(
+ topic: PubsubTopic, msg: WakuMessage
+ ): Future[void] {.async, gcsafe.} =
+ await sleepAsync(0.milliseconds)
+
+ node1.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr:
+ assert false, "Failed to subscribe to pubsub topic in node2: " & $error
+ node2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr:
+ assert false, "Failed to subscribe to pubsub topic in node1: " & $error
+
# Given the two nodes are started and connected
waitFor allFutures(node1.start(), node2.start())
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
diff --git a/tests/wakunode2/test_validators.nim b/tests/wakunode2/test_validators.nim
index 44b6ae118..b0a8dd8fb 100644
--- a/tests/wakunode2/test_validators.nim
+++ b/tests/wakunode2/test_validators.nim
@@ -73,7 +73,9 @@ suite "WakuNode2 - Validators":
# Subscribe all nodes to the same topic/handler
for node in nodes:
- discard node.wakuRelay.subscribe($spamProtectedShard, handler)
+ node.subscribe((kind: PubsubSub, topic: $spamProtectedShard), handler).isOkOr:
+ assert false, "Failed to subscribe to topic: " & $error
+
await sleepAsync(500.millis)
# Each node publishes 10 signed messages
@@ -163,7 +165,9 @@ suite "WakuNode2 - Validators":
# Subscribe all nodes to the same topic/handler
for node in nodes:
- discard node.wakuRelay.subscribe($spamProtectedShard, handler)
+ node.subscribe((kind: PubsubSub, topic: $spamProtectedShard), handler).isOkOr:
+ assert false, "Failed to subscribe to topic: " & $error
+
await sleepAsync(500.millis)
# Each node sends 5 messages, signed but with a non-whitelisted key (total = 25)
@@ -291,7 +295,8 @@ suite "WakuNode2 - Validators":
# Subscribe all nodes to the same topic/handler
for node in nodes:
- discard node.wakuRelay.subscribe($spamProtectedShard, handler)
+ node.subscribe((kind: PubsubSub, topic: $spamProtectedShard), handler).isOkOr:
+ assert false, "Failed to subscribe to topic: " & $error
await sleepAsync(500.millis)
# Add signed message validator to all nodes. They will only route signed messages
diff --git a/tests/wakunode_rest/test_rest_admin.nim b/tests/wakunode_rest/test_rest_admin.nim
index a3546f1f8..4e59b0725 100644
--- a/tests/wakunode_rest/test_rest_admin.nim
+++ b/tests/wakunode_rest/test_rest_admin.nim
@@ -43,14 +43,27 @@ suite "Waku v2 Rest API - Admin":
node3 = newTestWakuNode(generateSecp256k1Key(), getPrimaryIPAddr(), Port(60604))
await allFutures(node1.start(), node2.start(), node3.start())
- let shards = @[RelayShard(clusterId: 1, shardId: 0)]
await allFutures(
- node1.mountRelay(shards = shards),
- node2.mountRelay(shards = shards),
- node3.mountRelay(shards = shards),
+ node1.mountRelay(),
+ node2.mountRelay(),
+ node3.mountRelay(),
node3.mountPeerExchange(),
)
+ # The three nodes should be subscribed to the same shard
+ proc simpleHandler(
+ topic: PubsubTopic, msg: WakuMessage
+ ): Future[void] {.async, gcsafe.} =
+ await sleepAsync(0.milliseconds)
+
+ let shard = RelayShard(clusterId: 1, shardId: 0)
+ node1.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr:
+ assert false, "Failed to subscribe to topic: " & $error
+ node2.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr:
+ assert false, "Failed to subscribe to topic: " & $error
+ node3.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr:
+ assert false, "Failed to subscribe to topic: " & $error
+
peerInfo1 = node1.switch.peerInfo
peerInfo2 = node2.switch.peerInfo
peerInfo3 = node3.switch.peerInfo
diff --git a/tests/wakunode_rest/test_rest_filter.nim b/tests/wakunode_rest/test_rest_filter.nim
index dcd430a0e..f8dbf429a 100644
--- a/tests/wakunode_rest/test_rest_filter.nim
+++ b/tests/wakunode_rest/test_rest_filter.nim
@@ -278,8 +278,16 @@ suite "Waku v2 Rest API - Filter V2":
restFilterTest = await RestFilterTest.init()
subPeerId = restFilterTest.subscriberNode.peerInfo.toRemotePeerInfo().peerId
+ let simpleHandler = proc(
+ topic: PubsubTopic, msg: WakuMessage
+ ): Future[void] {.async, gcsafe.} =
+ await sleepAsync(0.milliseconds)
+
restFilterTest.messageCache.pubsubSubscribe(DefaultPubsubTopic)
- restFilterTest.serviceNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr:
+
+ restFilterTest.serviceNode.subscribe(
+ (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler
+ ).isOkOr:
assert false, "Failed to subscribe to topic: " & $error
# When
@@ -325,7 +333,14 @@ suite "Waku v2 Rest API - Filter V2":
# setup filter service and client node
let restFilterTest = await RestFilterTest.init()
let subPeerId = restFilterTest.subscriberNode.peerInfo.toRemotePeerInfo().peerId
- restFilterTest.serviceNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr:
+ let simpleHandler = proc(
+ topic: PubsubTopic, msg: WakuMessage
+ ): Future[void] {.async, gcsafe.} =
+ await sleepAsync(0.milliseconds)
+
+ restFilterTest.serviceNode.subscribe(
+ (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler
+ ).isOkOr:
assert false, "Failed to subscribe to topic: " & $error
let requestBody = FilterSubscribeRequest(
@@ -397,7 +412,14 @@ suite "Waku v2 Rest API - Filter V2":
# setup filter service and client node
let restFilterTest = await RestFilterTest.init()
let subPeerId = restFilterTest.subscriberNode.peerInfo.toRemotePeerInfo().peerId
- restFilterTest.serviceNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr:
+ let simpleHandler = proc(
+ topic: PubsubTopic, msg: WakuMessage
+ ): Future[void] {.async, gcsafe.} =
+ await sleepAsync(0.milliseconds)
+
+ restFilterTest.serviceNode.subscribe(
+ (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler
+ ).isOkOr:
assert false, "Failed to subscribe to topic: " & $error
let requestBody = FilterSubscribeRequest(
diff --git a/tests/wakunode_rest/test_rest_lightpush.nim b/tests/wakunode_rest/test_rest_lightpush.nim
index 035b2a884..b09c72ee3 100644
--- a/tests/wakunode_rest/test_rest_lightpush.nim
+++ b/tests/wakunode_rest/test_rest_lightpush.nim
@@ -128,13 +128,18 @@ suite "Waku v2 Rest API - lightpush":
# Given
let restLightPushTest = await RestLightPushTest.init()
+ let simpleHandler = proc(
+ topic: PubsubTopic, msg: WakuMessage
+ ): Future[void] {.async, gcsafe.} =
+ await sleepAsync(0.milliseconds)
+
restLightPushTest.consumerNode.subscribe(
- (kind: PubsubSub, topic: DefaultPubsubTopic)
+ (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler
).isOkOr:
assert false, "Failed to subscribe to relay: " & $error
restLightPushTest.serviceNode.subscribe(
- (kind: PubsubSub, topic: DefaultPubsubTopic)
+ (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler
).isOkOr:
assert false, "Failed to subscribe to relay: " & $error
require:
@@ -162,9 +167,13 @@ suite "Waku v2 Rest API - lightpush":
asyncTest "Push message bad-request":
# Given
let restLightPushTest = await RestLightPushTest.init()
+ let simpleHandler = proc(
+ topic: PubsubTopic, msg: WakuMessage
+ ): Future[void] {.async, gcsafe.} =
+ await sleepAsync(0.milliseconds)
restLightPushTest.serviceNode.subscribe(
- (kind: PubsubSub, topic: DefaultPubsubTopic)
+ (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler
).isOkOr:
assert false, "Failed to subscribe to relay: " & $error
require:
@@ -220,14 +229,18 @@ suite "Waku v2 Rest API - lightpush":
let budgetCap = 3
let tokenPeriod = 500.millis
let restLightPushTest = await RestLightPushTest.init((budgetCap, tokenPeriod))
+ let simpleHandler = proc(
+ topic: PubsubTopic, msg: WakuMessage
+ ): Future[void] {.async, gcsafe.} =
+ await sleepAsync(0.milliseconds)
restLightPushTest.consumerNode.subscribe(
- (kind: PubsubSub, topic: DefaultPubsubTopic)
+ (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler
).isOkOr:
assert false, "Failed to subscribe to relay: " & $error
restLightPushTest.serviceNode.subscribe(
- (kind: PubsubSub, topic: DefaultPubsubTopic)
+ (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler
).isOkOr:
assert false, "Failed to subscribe to relay: " & $error
require:
diff --git a/tests/wakunode_rest/test_rest_lightpush_legacy.nim b/tests/wakunode_rest/test_rest_lightpush_legacy.nim
index f50703bae..fea51554b 100644
--- a/tests/wakunode_rest/test_rest_lightpush_legacy.nim
+++ b/tests/wakunode_rest/test_rest_lightpush_legacy.nim
@@ -122,14 +122,18 @@ suite "Waku v2 Rest API - lightpush":
asyncTest "Push message request":
# Given
let restLightPushTest = await RestLightPushTest.init()
+ let simpleHandler = proc(
+ topic: PubsubTopic, msg: WakuMessage
+ ): Future[void] {.async, gcsafe.} =
+ await sleepAsync(0.milliseconds)
restLightPushTest.consumerNode.subscribe(
- (kind: PubsubSub, topic: DefaultPubsubTopic)
+ (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler
).isOkOr:
assert false, "Failed to subscribe to topic"
restLightPushTest.serviceNode.subscribe(
- (kind: PubsubSub, topic: DefaultPubsubTopic)
+ (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler
).isOkOr:
assert false, "Failed to subscribe to topic"
require:
@@ -157,9 +161,13 @@ suite "Waku v2 Rest API - lightpush":
asyncTest "Push message bad-request":
# Given
let restLightPushTest = await RestLightPushTest.init()
+ let simpleHandler = proc(
+ topic: PubsubTopic, msg: WakuMessage
+ ): Future[void] {.async, gcsafe.} =
+ await sleepAsync(0.milliseconds)
restLightPushTest.serviceNode.subscribe(
- (kind: PubsubSub, topic: DefaultPubsubTopic)
+ (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler
).isOkOr:
assert false, "Failed to subscribe to topic"
require:
@@ -218,14 +226,18 @@ suite "Waku v2 Rest API - lightpush":
let budgetCap = 3
let tokenPeriod = 500.millis
let restLightPushTest = await RestLightPushTest.init((budgetCap, tokenPeriod))
+ let simpleHandler = proc(
+ topic: PubsubTopic, msg: WakuMessage
+ ): Future[void] {.async, gcsafe.} =
+ await sleepAsync(0.milliseconds)
restLightPushTest.consumerNode.subscribe(
- (kind: PubsubSub, topic: DefaultPubsubTopic)
+ (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler
).isOkOr:
assert false, "Failed to subscribe to topic"
restLightPushTest.serviceNode.subscribe(
- (kind: PubsubSub, topic: DefaultPubsubTopic)
+ (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler
).isOkOr:
assert false, "Failed to subscribe to topic"
require:
diff --git a/tests/wakunode_rest/test_rest_relay.nim b/tests/wakunode_rest/test_rest_relay.nim
index 208b86190..8ea7f2abe 100644
--- a/tests/wakunode_rest/test_rest_relay.nim
+++ b/tests/wakunode_rest/test_rest_relay.nim
@@ -95,9 +95,18 @@ suite "Waku v2 Rest API - Relay":
shard3 = RelayShard(clusterId: DefaultClusterId, shardId: 3)
shard4 = RelayShard(clusterId: DefaultClusterId, shardId: 4)
- (await node.mountRelay(@[shard0, shard1, shard2, shard3, shard4])).isOkOr:
+ (await node.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
+ proc simpleHandler(
+ topic: PubsubTopic, msg: WakuMessage
+ ): Future[void] {.async, gcsafe.} =
+ await sleepAsync(0.milliseconds)
+
+ for shard in @[$shard0, $shard1, $shard2, $shard3, $shard4]:
+ node.subscribe((kind: PubsubSub, topic: shard), simpleHandler).isOkOr:
+ assert false, "Failed to subscribe to pubsub topic: " & $error
+
var restPort = Port(0)
let restAddress = parseIpAddress("0.0.0.0")
let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet()
@@ -248,8 +257,14 @@ suite "Waku v2 Rest API - Relay":
let client = newRestHttpClient(initTAddress(restAddress, restPort))
- node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr:
+ let simpleHandler = proc(
+ topic: PubsubTopic, msg: WakuMessage
+ ): Future[void] {.async, gcsafe.} =
+ await sleepAsync(0.milliseconds)
+
+ node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr:
assert false, "Failed to subscribe to pubsub topic"
+
require:
toSeq(node.wakuRelay.subscribedTopics).len == 1
@@ -477,7 +492,12 @@ suite "Waku v2 Rest API - Relay":
let client = newRestHttpClient(initTAddress(restAddress, restPort))
- node.subscribe((kind: ContentSub, topic: DefaultContentTopic)).isOkOr:
+ let simpleHandler = proc(
+ topic: PubsubTopic, msg: WakuMessage
+ ): Future[void] {.async, gcsafe.} =
+ await sleepAsync(0.milliseconds)
+
+ node.subscribe((kind: ContentSub, topic: DefaultContentTopic), simpleHandler).isOkOr:
assert false, "Failed to subscribe to content topic: " & $error
require:
toSeq(node.wakuRelay.subscribedTopics).len == 1
@@ -583,7 +603,12 @@ suite "Waku v2 Rest API - Relay":
let client = newRestHttpClient(initTAddress(restAddress, restPort))
- node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr:
+ let simpleHandler = proc(
+ topic: PubsubTopic, msg: WakuMessage
+ ): Future[void] {.async, gcsafe.} =
+ await sleepAsync(0.milliseconds)
+
+ node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr:
assert false, "Failed to subscribe to pubsub topic: " & $error
require:
toSeq(node.wakuRelay.subscribedTopics).len == 1
@@ -640,7 +665,12 @@ suite "Waku v2 Rest API - Relay":
let client = newRestHttpClient(initTAddress(restAddress, restPort))
- node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr:
+ let simpleHandler = proc(
+ topic: PubsubTopic, msg: WakuMessage
+ ): Future[void] {.async, gcsafe.} =
+ await sleepAsync(0.milliseconds)
+
+ node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr:
assert false, "Failed to subscribe to pubsub topic: " & $error
require:
toSeq(node.wakuRelay.subscribedTopics).len == 1
diff --git a/waku/factory/node_factory.nim b/waku/factory/node_factory.nim
index 7df5c2567..a03e2a1e1 100644
--- a/waku/factory/node_factory.nim
+++ b/waku/factory/node_factory.nim
@@ -317,10 +317,7 @@ proc setupProtocols(
(
await mountRelay(
- node,
- shards,
- peerExchangeHandler = peerExchangeHandler,
- int(conf.maxMessageSizeBytes),
+ node, peerExchangeHandler = peerExchangeHandler, int(conf.maxMessageSizeBytes)
)
).isOkOr:
return err("failed to mount waku relay protocol: " & $error)
diff --git a/waku/factory/waku.nim b/waku/factory/waku.nim
index 01dc7a36f..fe797b0a3 100644
--- a/waku/factory/waku.nim
+++ b/waku/factory/waku.nim
@@ -1,7 +1,7 @@
{.push raises: [].}
import
- std/[options, sequtils],
+ std/[options, sequtils, strformat],
results,
chronicles,
chronos,
@@ -130,8 +130,12 @@ proc setupAppCallbacks(
conf.shards.mapIt(RelayShard(clusterId: conf.clusterId, shardId: uint16(it)))
let shards = confShards & autoShards
- for shard in shards:
- discard node.wakuRelay.subscribe($shard, appCallbacks.relayHandler)
+ let uniqueShards = deduplicate(shards)
+
+ for shard in uniqueShards:
+ let topic = $shard
+ node.subscribe((kind: PubsubSub, topic: topic), appCallbacks.relayHandler).isOkOr:
+ return err(fmt"Could not subscribe {topic}: " & $error)
if not appCallbacks.topicHealthChangeHandler.isNil():
if node.wakuRelay.isNil():
diff --git a/waku/node/waku_node.nim b/waku/node/waku_node.nim
index 1538d9096..ac72f3e37 100644
--- a/waku/node/waku_node.nim
+++ b/waku/node/waku_node.nim
@@ -116,7 +116,6 @@ type
announcedAddresses*: seq[MultiAddress]
started*: bool # Indicates that node has started listening
topicSubscriptionQueue*: AsyncEventQueue[SubscriptionEvent]
- contentTopicHandlers: Table[ContentTopic, TopicHandler]
rateLimitSettings*: ProtocolRateLimitSettings
proc new*(
@@ -256,7 +255,13 @@ proc mountStoreSync*(
## Waku relay
-proc registerRelayDefaultHandler(node: WakuNode, topic: PubsubTopic) =
+proc registerRelayHandler(
+ node: WakuNode, topic: PubsubTopic, appHandler: WakuRelayHandler
+) =
+ ## Registers the only handler for the given topic.
+ ## Notice that this handler internally calls other handlers, such as filter,
+ ## archive, etc, plus the handler provided by the application.
+
if node.wakuRelay.isSubscribed(topic):
return
@@ -289,18 +294,19 @@ proc registerRelayDefaultHandler(node: WakuNode, topic: PubsubTopic) =
node.wakuStoreReconciliation.messageIngress(topic, msg)
- let defaultHandler = proc(
+ let uniqueTopicHandler = proc(
topic: PubsubTopic, msg: WakuMessage
): Future[void] {.async, gcsafe.} =
await traceHandler(topic, msg)
await filterHandler(topic, msg)
await archiveHandler(topic, msg)
await syncHandler(topic, msg)
+ await appHandler(topic, msg)
- discard node.wakuRelay.subscribe(topic, defaultHandler)
+ node.wakuRelay.subscribe(topic, uniqueTopicHandler)
proc subscribe*(
- node: WakuNode, subscription: SubscriptionEvent, handler = none(WakuRelayHandler)
+ node: WakuNode, subscription: SubscriptionEvent, handler: WakuRelayHandler
): Result[void, string] =
## Subscribes to a PubSub or Content topic. Triggers handler when receiving messages on
## this topic. WakuRelayHandler is a method that takes a topic and a Waku message.
@@ -326,18 +332,8 @@ proc subscribe*(
warn "No-effect API call to subscribe. Already subscribed to topic", pubsubTopic
return ok()
- if contentTopicOp.isSome() and node.contentTopicHandlers.hasKey(contentTopicOp.get()):
- warn "No-effect API call to `subscribe`. Was already subscribed"
- return ok()
-
+ node.registerRelayHandler(pubsubTopic, handler)
node.topicSubscriptionQueue.emit((kind: PubsubSub, topic: pubsubTopic))
- node.registerRelayDefaultHandler(pubsubTopic)
-
- if handler.isSome():
- let wrappedHandler = node.wakuRelay.subscribe(pubsubTopic, handler.get())
-
- if contentTopicOp.isSome():
- node.contentTopicHandlers[contentTopicOp.get()] = wrappedHandler
return ok()
@@ -367,17 +363,9 @@ proc unsubscribe*(
warn "No-effect API call to `unsubscribe`. Was not subscribed", pubsubTopic
return ok()
- if contentTopicOp.isSome():
- # Remove this handler only
- var handler: TopicHandler
- ## TODO: refactor this part. I think we can simplify it
- if node.contentTopicHandlers.pop(contentTopicOp.get(), handler):
- debug "unsubscribe", contentTopic = contentTopicOp.get()
- node.wakuRelay.unsubscribe(pubsubTopic)
- else:
- debug "unsubscribe", pubsubTopic = pubsubTopic
- node.wakuRelay.unsubscribe(pubsubTopic)
- node.topicSubscriptionQueue.emit((kind: PubsubUnsub, topic: pubsubTopic))
+ debug "unsubscribe", pubsubTopic, contentTopicOp
+ node.wakuRelay.unsubscribe(pubsubTopic)
+ node.topicSubscriptionQueue.emit((kind: PubsubUnsub, topic: pubsubTopic))
return ok()
@@ -439,7 +427,6 @@ proc startRelay*(node: WakuNode) {.async.} =
proc mountRelay*(
node: WakuNode,
- shards: seq[RelayShard] = @[],
peerExchangeHandler = none(RoutingRecordsHandler),
maxMessageSize = int(DefaultMaxWakuMessageSize),
): Future[Result[void, string]] {.async.} =
@@ -465,16 +452,7 @@ proc mountRelay*(
node.switch.mount(node.wakuRelay, protocolMatcher(WakuRelayCodec))
- ## Make sure we don't have duplicates
- let uniqueShards = deduplicate(shards)
-
- # Subscribe to shards
- for shard in uniqueShards:
- node.subscribe((kind: PubsubSub, topic: $shard)).isOkOr:
- error "failed to subscribe to shard", error = error
- return err("failed to subscribe to shard in mountRelay: " & error)
-
- info "relay mounted successfully", shards = uniqueShards
+ info "relay mounted successfully"
return ok()
## Waku filter
diff --git a/waku/waku_api/rest/builder.nim b/waku/waku_api/rest/builder.nim
index 6e880f5a3..f11a11fbc 100644
--- a/waku/waku_api/rest/builder.nim
+++ b/waku/waku_api/rest/builder.nim
@@ -148,9 +148,9 @@ proc startRestServerProtocolSupport*(
let pubsubTopic = $RelayShard(clusterId: clusterId, shardId: shard)
cache.pubsubSubscribe(pubsubTopic)
- ## TODO: remove this line. use observer-observable pattern
- ## within waku_node::registerRelayDefaultHandler
- discard node.wakuRelay.subscribe(pubsubTopic, handler)
+ node.subscribe((kind: PubsubSub, topic: pubsubTopic), handler).isOkOr:
+ error "Could not subscribe", pubsubTopic, error
+ continue
for contentTopic in contentTopics:
cache.contentSubscribe(contentTopic)
@@ -160,9 +160,9 @@ proc startRestServerProtocolSupport*(
continue
let pubsubTopic = $shard
- ## TODO: remove this line. use observer-observable pattern
- ## within waku_node::registerRelayDefaultHandler
- discard node.wakuRelay.subscribe(pubsubTopic, handler)
+ node.subscribe((kind: PubsubSub, topic: pubsubTopic), handler).isOkOr:
+ error "Could not subscribe", pubsubTopic, error
+ continue
installRelayApiHandlers(router, node, cache)
else:
diff --git a/waku/waku_api/rest/relay/handlers.nim b/waku/waku_api/rest/relay/handlers.nim
index 252375208..06bbc0c06 100644
--- a/waku/waku_api/rest/relay/handlers.nim
+++ b/waku/waku_api/rest/relay/handlers.nim
@@ -67,9 +67,7 @@ proc installRelayApiHandlers*(
for pubsubTopic in newTopics:
cache.pubsubSubscribe(pubsubTopic)
- node.subscribe(
- (kind: PubsubSub, topic: pubsubTopic), some(messageCacheHandler(cache))
- ).isOkOr:
+ node.subscribe((kind: PubsubSub, topic: pubsubTopic), messageCacheHandler(cache)).isOkOr:
let errorMsg = "Subscribe failed:" & $error
error "SUBSCRIBE failed", error = errorMsg
return RestApiResponse.internalServerError(errorMsg)
@@ -202,7 +200,7 @@ proc installRelayApiHandlers*(
cache.contentSubscribe(contentTopic)
node.subscribe(
- (kind: ContentSub, topic: contentTopic), some(messageCacheHandler(cache))
+ (kind: ContentSub, topic: contentTopic), messageCacheHandler(cache)
).isOkOr:
let errorMsg = "Subscribe failed:" & $error
error "SUBSCRIBE failed", error = errorMsg
diff --git a/waku/waku_relay/protocol.nim b/waku/waku_relay/protocol.nim
index 8da3f89b5..c87519b06 100644
--- a/waku/waku_relay/protocol.nim
+++ b/waku/waku_relay/protocol.nim
@@ -131,6 +131,8 @@ type
# a map of validators to error messages to return when validation fails
topicValidator: Table[PubsubTopic, ValidatorHandler]
# map topic with its assigned validator within pubsub
+ topicHandlers: Table[PubsubTopic, TopicHandler]
+ # map topic with the TopicHandler proc in charge of attending topic's incoming message events
publishObservers: seq[PublishObserver]
topicsHealth*: Table[string, TopicHealth]
onTopicHealthChange*: TopicHealthChangeHandler
@@ -488,13 +490,11 @@ proc validateMessage*(
return ok()
-proc subscribe*(
- w: WakuRelay, pubsubTopic: PubsubTopic, handler: WakuRelayHandler
-): TopicHandler =
+proc subscribe*(w: WakuRelay, pubsubTopic: PubsubTopic, handler: WakuRelayHandler) =
debug "subscribe", pubsubTopic = pubsubTopic
# We need to wrap the handler since gossipsub doesnt understand WakuMessage
- let wrappedHandler = proc(
+ let topicHandler = proc(
pubsubTopic: string, data: seq[byte]
): Future[void] {.gcsafe, raises: [].} =
let decMsg = WakuMessage.decode(data)
@@ -526,9 +526,9 @@ proc subscribe*(
w.topicParams[pubsubTopic] = TopicParameters
# subscribe to the topic with our wrapped handler
- procCall GossipSub(w).subscribe(pubsubTopic, wrappedHandler)
+ procCall GossipSub(w).subscribe(pubsubTopic, topicHandler)
- return wrappedHandler
+ w.topicHandlers[pubsubTopic] = topicHandler
proc unsubscribeAll*(w: WakuRelay, pubsubTopic: PubsubTopic) =
## Unsubscribe all handlers on this pubsub topic
@@ -537,35 +537,32 @@ proc unsubscribeAll*(w: WakuRelay, pubsubTopic: PubsubTopic) =
procCall GossipSub(w).unsubscribeAll(pubsubTopic)
w.topicValidator.del(pubsubTopic)
+ w.topicHandlers.del(pubsubTopic)
proc unsubscribe*(w: WakuRelay, pubsubTopic: PubsubTopic) =
if not w.topicValidator.hasKey(pubsubTopic):
error "unsubscribe no validator for this topic", pubsubTopic
return
- if pubsubtopic notin Pubsub(w).topics:
+ if not w.topicHandlers.hasKey(pubsubTopic):
error "not subscribed to the given topic", pubsubTopic
return
- var topicHandlerSeq: seq[TopicHandler]
+ var topicHandler: TopicHandler
var topicValidator: ValidatorHandler
try:
- topicHandlerSeq = Pubsub(w).topics[pubsubTopic]
- if topicHandlerSeq.len == 0:
- error "unsubscribe no handler for this topic", pubsubTopic
- return
+ topicHandler = w.topicHandlers[pubsubTopic]
topicValidator = w.topicValidator[pubsubTopic]
except KeyError:
error "exception in unsubscribe", pubsubTopic, error = getCurrentExceptionMsg()
return
- let topicHandler = topicHandlerSeq[0]
-
debug "unsubscribe", pubsubTopic
- procCall GossipSub(w).unsubscribe($pubsubTopic, topicHandler)
- ## TODO: uncomment the following line when https://github.com/vacp2p/nim-libp2p/pull/1356
- ## is available in a nim-libp2p release.
- # procCall GossipSub(w).removeValidator(pubsubTopic, topicValidator)
+ procCall GossipSub(w).unsubscribe(pubsubTopic, topicHandler)
+ procCall GossipSub(w).removeValidator(pubsubTopic, topicValidator)
+
+ w.topicValidator.del(pubsubTopic)
+ w.topicHandlers.del(pubsubTopic)
proc publish*(
w: WakuRelay, pubsubTopic: PubsubTopic, wakuMessage: WakuMessage
From 66d8d3763d70de3bb71ae1379a0282ef6947ebb1 Mon Sep 17 00:00:00 2001
From: Hanno Cornelius <68783915+jm-clius@users.noreply.github.com>
Date: Wed, 4 Jun 2025 14:19:14 +0100
Subject: [PATCH 11/47] fix: misc sync fixes, added debug logging (#3411)
---
waku/waku_store_sync/reconciliation.nim | 20 +++++++++++++++-----
waku/waku_store_sync/transfer.nim | 13 ++++++++++++-
2 files changed, 27 insertions(+), 6 deletions(-)
diff --git a/waku/waku_store_sync/reconciliation.nim b/waku/waku_store_sync/reconciliation.nim
index cb5c1bc3d..01000935b 100644
--- a/waku/waku_store_sync/reconciliation.nim
+++ b/waku/waku_store_sync/reconciliation.nim
@@ -65,6 +65,8 @@ type SyncReconciliation* = ref object of LPProtocol
proc messageIngress*(
self: SyncReconciliation, pubsubTopic: PubsubTopic, msg: WakuMessage
) =
+ trace "message ingress", pubsub_topic = pubsubTopic, msg = msg
+
if msg.ephemeral:
return
@@ -78,6 +80,8 @@ proc messageIngress*(
proc messageIngress*(
self: SyncReconciliation, msgHash: WakuMessageHash, msg: WakuMessage
) =
+ trace "message ingress", msg_hash = msgHash.toHex(), msg = msg
+
if msg.ephemeral:
return
@@ -87,6 +91,8 @@ proc messageIngress*(
error "failed to insert new message", msg_hash = msgHash.toHex(), err = error
proc messageIngress*(self: SyncReconciliation, id: SyncID) =
+ trace "message ingress", id = id
+
self.storage.insert(id).isOkOr:
error "failed to insert new message", msg_hash = id.hash.toHex(), err = error
@@ -116,7 +122,7 @@ proc processRequest(
roundTrips.inc()
- trace "sync payload received",
+ debug "sync payload received",
local = self.peerManager.switch.peerInfo.peerId,
remote = conn.peerId,
payload = recvPayload
@@ -135,6 +141,9 @@ proc processRequest(
recvPayload.shards.toPackedSet() == self.shards:
sendPayload = self.storage.processPayload(recvPayload, hashToSend, hashToRecv)
+ debug "sync payload processed",
+ hash_to_send = hashToSend, hash_to_recv = hashToRecv
+
sendPayload.cluster = self.cluster
sendPayload.shards = self.shards.toSeq()
@@ -157,7 +166,7 @@ proc processRequest(
return
err("remote " & $conn.peerId & " connection write error: " & writeRes.error.msg)
- trace "sync payload sent",
+ debug "sync payload sent",
local = self.peerManager.switch.peerInfo.peerId,
remote = conn.peerId,
payload = sendPayload
@@ -208,7 +217,7 @@ proc initiate(
"remote " & $connection.peerId & " connection write error: " & writeRes.error.msg
)
- trace "sync payload sent",
+ debug "sync payload sent",
local = self.peerManager.switch.peerInfo.peerId,
remote = connection.peerId,
payload = initPayload
@@ -265,7 +274,7 @@ proc initFillStorage(
debug "initial storage filling started"
- var ids = newSeq[SyncID](DefaultStorageCap)
+ var ids = newSeqOfCap[SyncID](DefaultStorageCap)
# we assume IDs are in order
@@ -332,7 +341,8 @@ proc new*(
sync.handler = handler
sync.codec = WakuReconciliationCodec
- info "Store Reconciliation protocol initialized"
+ info "Store Reconciliation protocol initialized",
+ sync_range = syncRange, sync_interval = syncInterval, relay_jitter = relayJitter
return ok(sync)
diff --git a/waku/waku_store_sync/transfer.nim b/waku/waku_store_sync/transfer.nim
index 78b83c601..f17fe944b 100644
--- a/waku/waku_store_sync/transfer.nim
+++ b/waku/waku_store_sync/transfer.nim
@@ -97,7 +97,13 @@ proc needsReceiverLoop(self: SyncTransfer) {.async.} =
while true: # infinite loop
let (peerId, fingerprint) = await self.remoteNeedsRx.popFirst()
- if not self.outSessions.hasKey(peerId):
+ if (not self.outSessions.hasKey(peerId)) or self.outSessions[peerId].closed() or
+ ## sanity check, should not be possible
+ self.outSessions[peerId].isClosedRemotely:
+ ## quite possibly remote end has closed the connection, believing transfer to be done
+ debug "opening transfer connection to remote peer",
+ my_peer_id = self.peerManager.switch.peerInfo.peerId, remote_peer_id = peerId
+
let connection = (await self.openConnection(peerId)).valueOr:
error "failed to establish transfer connection", error = error
continue
@@ -121,6 +127,11 @@ proc needsReceiverLoop(self: SyncTransfer) {.async.} =
let msg =
WakuMessageAndTopic(pubsub: response.topics[0], message: response.messages[0])
+ trace "sending transfer message",
+ my_peer_id = self.peerManager.switch.peerInfo.peerId,
+ remote_peer_id = peerId,
+ msg = msg
+
(await sendMessage(connection, msg)).isOkOr:
self.outSessions.del(peerId)
await connection.close()
From daa4a6a98673cae034c3309d9cd4e665e0ea6723 Mon Sep 17 00:00:00 2001
From: gabrielmer <101006718+gabrielmer@users.noreply.github.com>
Date: Thu, 5 Jun 2025 17:25:14 +0200
Subject: [PATCH 12/47] feat: add waku_disconnect_all_peers to libwaku (#3438)
---
library/libwaku.h | 4 ++++
library/libwaku.nim | 14 ++++++++++++++
.../requests/peer_manager_request.nim | 11 +++++++++++
3 files changed, 29 insertions(+)
diff --git a/library/libwaku.h b/library/libwaku.h
index 7a16c9d65..a95cbda90 100644
--- a/library/libwaku.h
+++ b/library/libwaku.h
@@ -150,6 +150,10 @@ int waku_disconnect_peer_by_id(void* ctx,
WakuCallBack callback,
void* userData);
+int waku_disconnect_all_peers(void* ctx,
+ WakuCallBack callback,
+ void* userData);
+
int waku_dial_peer(void* ctx,
const char* peerMultiAddr,
const char* protocol,
diff --git a/library/libwaku.nim b/library/libwaku.nim
index 5d8252225..69d523d14 100644
--- a/library/libwaku.nim
+++ b/library/libwaku.nim
@@ -580,6 +580,20 @@ proc waku_disconnect_peer_by_id(
userData,
)
+proc waku_disconnect_all_peers(
+ ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
+): cint {.dynlib, exportc.} =
+ initializeLibrary()
+ checkLibwakuParams(ctx, callback, userData)
+
+ handleRequest(
+ ctx,
+ RequestType.PEER_MANAGER,
+ PeerManagementRequest.createShared(op = PeerManagementMsgType.DISCONNECT_ALL_PEERS),
+ callback,
+ userData,
+ )
+
proc waku_dial_peer(
ctx: ptr WakuContext,
peerMultiAddr: cstring,
diff --git a/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim b/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim
index e68e66afe..55728780f 100644
--- a/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim
+++ b/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim
@@ -12,6 +12,7 @@ type PeerManagementMsgType* {.pure.} = enum
GET_CONNECTED_PEERS_INFO
GET_PEER_IDS_BY_PROTOCOL
DISCONNECT_PEER_BY_ID
+ DISCONNECT_ALL_PEERS
DIAL_PEER
DIAL_PEER_BY_ID
GET_CONNECTED_PEERS
@@ -121,6 +122,16 @@ proc process*(
return err($error)
await waku.node.peerManager.disconnectNode(peerId)
return ok("")
+ of DISCONNECT_ALL_PEERS:
+ let connectedPeers = waku.node.peerManager.switch.peerStore.peers().filterIt(
+ it.connectedness == Connected
+ )
+
+ var futs: seq[Future[void]]
+ for peer in connectedPeers:
+ futs.add(waku.node.peerManager.disconnectNode(peer))
+ await allFutures(futs)
+ return ok("")
of DIAL_PEER:
let remotePeerInfo = parsePeerInfo($self[].peerMultiAddr).valueOr:
error "DIAL_PEER failed", error = $error
From 4f181abe0d7265c0cf883ed96b13c7c7b723b720 Mon Sep 17 00:00:00 2001
From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com>
Date: Fri, 6 Jun 2025 11:38:34 +0200
Subject: [PATCH 13/47] bump nph and nitpick change (#3441)
---
.github/workflows/ci.yml | 1 -
vendor/nph | 2 +-
2 files changed, 1 insertion(+), 2 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 0ede43361..41f5500a0 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -140,7 +140,6 @@ jobs:
secrets: inherit
-
js-waku-node:
needs: build-docker-image
uses: waku-org/js-waku/.github/workflows/test-node.yml@master
diff --git a/vendor/nph b/vendor/nph
index 3191cc71f..c6e03162d 160000
--- a/vendor/nph
+++ b/vendor/nph
@@ -1 +1 @@
-Subproject commit 3191cc71f4d49473de6cf73a2680009a92419407
+Subproject commit c6e03162dc2820d3088660f644818d7040e95791
From 5132510bc69b002131cef6d18be9e25d6978d9c8 Mon Sep 17 00:00:00 2001
From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com>
Date: Fri, 6 Jun 2025 15:50:08 +0200
Subject: [PATCH 14/47] fix: dnsresolver (#3440)
Properly transmit the dns name server list parameter to the peer manager
---
.../liteprotocoltester/liteprotocoltester.nim | 1 -
waku/factory/builder.nim | 8 +++--
.../dns_discovery_conf_builder.nim | 3 --
.../conf_builder/waku_conf_builder.nim | 14 +-------
.../conf_builder/web_socket_conf_builder.nim | 2 +-
waku/factory/external_config.nim | 14 --------
waku/factory/internal_config.nim | 3 +-
waku/factory/network_conf.nim | 34 -------------------
waku/factory/networks_config.nim | 10 +++++-
waku/factory/node_factory.nim | 18 +++-------
waku/factory/waku_conf.nim | 3 +-
waku/node/net_config.nim | 2 +-
waku/node/peer_manager/peer_manager.nim | 2 +-
13 files changed, 27 insertions(+), 87 deletions(-)
delete mode 100644 waku/factory/network_conf.nim
diff --git a/apps/liteprotocoltester/liteprotocoltester.nim b/apps/liteprotocoltester/liteprotocoltester.nim
index 58f6bd2e3..991e9ba78 100644
--- a/apps/liteprotocoltester/liteprotocoltester.nim
+++ b/apps/liteprotocoltester/liteprotocoltester.nim
@@ -96,7 +96,6 @@ when isMainModule:
wakuConf.restPort = conf.restPort
wakuConf.restAllowOrigin = conf.restAllowOrigin
- wakuConf.dnsAddrs = true
wakuConf.dnsAddrsNameServers = @[parseIpAddress("8.8.8.8"), parseIpAddress("1.1.1.1")]
wakuConf.shards = @[conf.shard]
diff --git a/waku/factory/builder.nim b/waku/factory/builder.nim
index d1cede969..18f1535ea 100644
--- a/waku/factory/builder.nim
+++ b/waku/factory/builder.nim
@@ -79,6 +79,7 @@ proc withNetworkConfigurationDetails*(
wssEnabled: bool = false,
wakuFlags = none(CapabilitiesBitfield),
dns4DomainName = none(string),
+ dnsNameServers = @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")],
): WakuNodeBuilderResult {.
deprecated: "use 'builder.withNetworkConfiguration()' instead"
.} =
@@ -94,6 +95,7 @@ proc withNetworkConfigurationDetails*(
wssEnabled = wssEnabled,
wakuFlags = wakuFlags,
dns4DomainName = dns4DomainName,
+ dnsNameServers = dnsNameServers,
)
builder.withNetworkConfiguration(netConfig)
ok()
@@ -166,6 +168,10 @@ proc build*(builder: WakuNodeBuilder): Result[WakuNode, string] =
if builder.netConfig.isNone():
return err("network configuration is required")
+ let netConfig = builder.netConfig.get()
+ if netConfig.dnsNameServers.len == 0:
+ return err("DNS name servers are required for WakuNode")
+
if builder.record.isNone():
return err("node record is required")
@@ -196,8 +202,6 @@ proc build*(builder: WakuNodeBuilder): Result[WakuNode, string] =
except CatchableError:
return err("failed to create switch: " & getCurrentExceptionMsg())
- let netConfig = builder.netConfig.get()
-
let peerManager = PeerManager.new(
switch = switch,
storage = builder.peerStorage.get(nil),
diff --git a/waku/factory/conf_builder/dns_discovery_conf_builder.nim b/waku/factory/conf_builder/dns_discovery_conf_builder.nim
index 8ac33a18f..dbb2c5fd3 100644
--- a/waku/factory/conf_builder/dns_discovery_conf_builder.nim
+++ b/waku/factory/conf_builder/dns_discovery_conf_builder.nim
@@ -21,9 +21,6 @@ proc withEnabled*(b: var DnsDiscoveryConfBuilder, enabled: bool) =
proc withEnrTreeUrl*(b: var DnsDiscoveryConfBuilder, enrTreeUrl: string) =
b.enrTreeUrl = some(enrTreeUrl)
-proc withNameServers*(b: var DnsDiscoveryConfBuilder, nameServers: seq[IpAddress]) =
- b.nameServers = concat(b.nameServers, nameServers)
-
proc build*(b: DnsDiscoveryConfBuilder): Result[Option[DnsDiscoveryConf], string] =
if not b.enabled.get(false):
return ok(none(DnsDiscoveryConf))
diff --git a/waku/factory/conf_builder/waku_conf_builder.nim b/waku/factory/conf_builder/waku_conf_builder.nim
index ab53a965f..f1f0ba471 100644
--- a/waku/factory/conf_builder/waku_conf_builder.nim
+++ b/waku/factory/conf_builder/waku_conf_builder.nim
@@ -107,7 +107,6 @@ type WakuConfBuilder* = object
extMultiAddrs: seq[string]
extMultiAddrsOnly: Option[bool]
- dnsAddrs: Option[bool]
dnsAddrsNameServers: seq[IpAddress]
peerPersistence: Option[bool]
@@ -193,9 +192,6 @@ proc withRemotePeerExchangeNode*(
) =
b.remotePeerExchangeNode = some(remotePeerExchangeNode)
-proc withDnsAddrs*(b: var WakuConfBuilder, dnsAddrs: bool) =
- b.dnsAddrs = some(dnsAddrs)
-
proc withPeerPersistence*(b: var WakuConfBuilder, peerPersistence: bool) =
b.peerPersistence = some(peerPersistence)
@@ -208,7 +204,7 @@ proc withMaxConnections*(b: var WakuConfBuilder, maxConnections: int) =
proc withDnsAddrsNameServers*(
b: var WakuConfBuilder, dnsAddrsNameServers: seq[IpAddress]
) =
- b.dnsAddrsNameServers = concat(b.dnsAddrsNameServers, dnsAddrsNameServers)
+ b.dnsAddrsNameServers.insert(dnsAddrsNameServers)
proc withLogLevel*(b: var WakuConfBuilder, logLevel: logging.LogLevel) =
b.logLevel = some(logLevel)
@@ -541,13 +537,6 @@ proc build*(
warn "Whether to only announce external multiaddresses is not specified, defaulting to false"
false
- let dnsAddrs =
- if builder.dnsAddrs.isSome():
- builder.dnsAddrs.get()
- else:
- warn "Whether to resolve DNS multiaddresses was not specified, defaulting to false."
- false
-
let dnsAddrsNameServers =
if builder.dnsAddrsNameServers.len != 0:
builder.dnsAddrsNameServers
@@ -627,7 +616,6 @@ proc build*(
),
portsShift: portsShift,
webSocketConf: webSocketConf,
- dnsAddrs: dnsAddrs,
dnsAddrsNameServers: dnsAddrsNameServers,
peerPersistence: peerPersistence,
peerStoreCapacity: builder.peerStoreCapacity,
diff --git a/waku/factory/conf_builder/web_socket_conf_builder.nim b/waku/factory/conf_builder/web_socket_conf_builder.nim
index 25ff6461d..5ed3d230a 100644
--- a/waku/factory/conf_builder/web_socket_conf_builder.nim
+++ b/waku/factory/conf_builder/web_socket_conf_builder.nim
@@ -1,5 +1,5 @@
import chronicles, std/[net, options], results
-import ../network_conf
+import ../networks_config
logScope:
topics = "waku conf builder websocket"
diff --git a/waku/factory/external_config.nim b/waku/factory/external_config.nim
index 2b156fc8e..9617d4403 100644
--- a/waku/factory/external_config.nim
+++ b/waku/factory/external_config.nim
@@ -245,12 +245,6 @@ type WakuNodeConf* = object
.}: bool
## DNS addrs config
- dnsAddrs* {.
- desc: "Enable resolution of `dnsaddr`, `dns4` or `dns6` multiaddrs",
- defaultValue: true,
- name: "dns-addrs"
- .}: bool
-
dnsAddrsNameServers* {.
desc:
"DNS name server IPs to query for DNS multiaddrs resolution. Argument may be repeated.",
@@ -567,12 +561,6 @@ with the drawback of consuming some more bandwidth.""",
name: "dns-discovery-url"
.}: string
- dnsDiscoveryNameServers* {.
- desc: "DNS name server IPs to query. Argument may be repeated.",
- defaultValue: @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")],
- name: "dns-discovery-name-server"
- .}: seq[IpAddress]
-
## Discovery v5 config
discv5Discovery* {.
desc: "Enable discovering nodes via Node Discovery v5.",
@@ -962,7 +950,6 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] =
b.withPeerStoreCapacity(n.peerStoreCapacity.get())
b.withPeerPersistence(n.peerPersistence)
- b.withDnsAddrs(n.dnsAddrs)
b.withDnsAddrsNameServers(n.dnsAddrsNameServers)
b.withDns4DomainName(n.dns4DomainName)
b.withCircuitRelayClient(n.isRelayClient)
@@ -1024,7 +1011,6 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] =
b.dnsDiscoveryConf.withEnabled(n.dnsDiscovery)
b.dnsDiscoveryConf.withEnrTreeUrl(n.dnsDiscoveryUrl)
- b.dnsDiscoveryConf.withNameServers(n.dnsDiscoveryNameServers)
if n.discv5Discovery.isSome():
b.discv5Conf.withEnabled(n.discv5Discovery.get())
diff --git a/waku/factory/internal_config.nim b/waku/factory/internal_config.nim
index 72af28340..4f252fd00 100644
--- a/waku/factory/internal_config.nim
+++ b/waku/factory/internal_config.nim
@@ -12,7 +12,7 @@ import
../waku_enr,
../waku_core,
./waku_conf,
- ./network_conf
+ ./networks_config
proc enrConfiguration*(
conf: WakuConf, netConfig: NetConfig
@@ -139,6 +139,7 @@ proc networkConfiguration*(
dns4DomainName = conf.dns4DomainName,
discv5UdpPort = discv5UdpPort,
wakuFlags = some(wakuFlags),
+ dnsNameServers = dnsAddrsNameServers,
)
return netConfigRes
diff --git a/waku/factory/network_conf.nim b/waku/factory/network_conf.nim
deleted file mode 100644
index c5179e53a..000000000
--- a/waku/factory/network_conf.nim
+++ /dev/null
@@ -1,34 +0,0 @@
-import std/[net, options, strutils]
-import libp2p/multiaddress
-
-type WebSocketSecureConf* {.requiresInit.} = object
- keyPath*: string
- certPath*: string
-
-type WebSocketConf* = object
- port*: Port
- secureConf*: Option[WebSocketSecureConf]
-
-type NetworkConf* = object
- natStrategy*: string # TODO: make enum
- p2pTcpPort*: Port
- dns4DomainName*: Option[string]
- p2pListenAddress*: IpAddress
- extMultiAddrs*: seq[MultiAddress]
- extMultiAddrsOnly*: bool
- webSocketConf*: Option[WebSocketConf]
-
-proc validateNoEmptyStrings(networkConf: NetworkConf): Result[void, string] =
- if networkConf.dns4DomainName.isSome() and
- isEmptyOrWhiteSpace(networkConf.dns4DomainName.get().string):
- return err("dns4DomainName is an empty string, set it to none(string) instead")
-
- if networkConf.webSocketConf.isSome() and
- networkConf.webSocketConf.get().secureConf.isSome():
- let secureConf = networkConf.webSocketConf.get().secureConf.get()
- if isEmptyOrWhiteSpace(secureConf.keyPath):
- return err("websocket.secureConf.keyPath is an empty string")
- if isEmptyOrWhiteSpace(secureConf.certPath):
- return err("websocket.secureConf.certPath is an empty string")
-
- return ok()
diff --git a/waku/factory/networks_config.nim b/waku/factory/networks_config.nim
index 8b5f4a628..619a1a7c5 100644
--- a/waku/factory/networks_config.nim
+++ b/waku/factory/networks_config.nim
@@ -1,6 +1,14 @@
{.push raises: [].}
-import stint
+import stint, std/[nativesockets, options]
+
+type WebSocketSecureConf* {.requiresInit.} = object
+ keyPath*: string
+ certPath*: string
+
+type WebSocketConf* = object
+ port*: Port
+ secureConf*: Option[WebSocketSecureConf]
# TODO: Rename this type to match file name
diff --git a/waku/factory/node_factory.nim b/waku/factory/node_factory.nim
index a03e2a1e1..5dc549317 100644
--- a/waku/factory/node_factory.nim
+++ b/waku/factory/node_factory.nim
@@ -67,17 +67,6 @@ proc initNode(
## file. Optionally include persistent peer storage.
## No protocols are mounted yet.
- var dnsResolver: DnsResolver
- if conf.dnsAddrs:
- # Support for DNS multiaddrs
- var nameServers: seq[TransportAddress]
- for ip in conf.dnsAddrsNameServers:
- nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53
-
- dnsResolver = DnsResolver.new(nameServers)
-
- var node: WakuNode
-
let pStorage =
if peerStore.isNone():
nil
@@ -91,6 +80,9 @@ proc initNode(
else:
(none(string), none(string))
+ let nameResolver =
+ DnsResolver.new(conf.dnsAddrsNameServers.mapIt(initTAddress(it, Port(53))))
+
# Build waku node instance
var builder = WakuNodeBuilder.init()
builder.withRng(rng)
@@ -102,7 +94,7 @@ proc initNode(
maxConnections = some(conf.maxConnections.int),
secureKey = secureKey,
secureCert = secureCert,
- nameResolver = dnsResolver,
+ nameResolver = nameResolver,
sendSignedPeerRecord = conf.relayPeerExchange,
# We send our own signed peer record when peer exchange enabled
agentString = some(conf.agentString),
@@ -132,7 +124,7 @@ proc initNode(
builder.withRateLimit(conf.rateLimits)
builder.withCircuitRelay(relay)
- node =
+ let node =
?builder.build().mapErr(
proc(err: string): string =
"failed to create waku node instance: " & err
diff --git a/waku/factory/waku_conf.nim b/waku/factory/waku_conf.nim
index 94b89a26e..caf925ff2 100644
--- a/waku/factory/waku_conf.nim
+++ b/waku/factory/waku_conf.nim
@@ -13,7 +13,7 @@ import
../node/waku_metrics,
../common/logging,
../waku_enr/capabilities,
- ./network_conf
+ ./networks_config
export RlnRelayConf, RlnRelayCreds, RestServerConf, Discv5Conf, MetricsServerConf
@@ -95,7 +95,6 @@ type WakuConf* {.requiresInit.} = ref object
webSocketConf*: Option[WebSocketConf]
portsShift*: uint16
- dnsAddrs*: bool
dnsAddrsNameServers*: seq[IpAddress]
networkConf*: NetworkConfig
wakuFlags*: CapabilitiesBitfield
diff --git a/waku/node/net_config.nim b/waku/node/net_config.nim
index a45d95f92..4802694c4 100644
--- a/waku/node/net_config.nim
+++ b/waku/node/net_config.nim
@@ -78,7 +78,7 @@ proc init*(
discv5UdpPort = none(Port),
clusterId: uint16 = 0,
wakuFlags = none(CapabilitiesBitfield),
- dnsNameServers = newSeq[IpAddress](),
+ dnsNameServers = @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")],
): NetConfigResult =
## Initialize and validate waku node network configuration
diff --git a/waku/node/peer_manager/peer_manager.nim b/waku/node/peer_manager/peer_manager.nim
index 40a13b601..707738e5f 100644
--- a/waku/node/peer_manager/peer_manager.nim
+++ b/waku/node/peer_manager/peer_manager.nim
@@ -1048,7 +1048,7 @@ proc new*(
maxFailedAttempts = MaxFailedAttempts,
colocationLimit = DefaultColocationLimit,
shardedPeerManagement = false,
- dnsNameServers = newSeq[IpAddress](),
+ dnsNameServers = @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")],
): PeerManager {.gcsafe.} =
let capacity = switch.peerStore.capacity
let maxConnections = switch.connManager.inSema.size
From 895e2022656e2463a53e2fe231239fd74b997967 Mon Sep 17 00:00:00 2001
From: NagyZoltanPeter <113987313+NagyZoltanPeter@users.noreply.github.com>
Date: Tue, 10 Jun 2025 02:10:06 +0200
Subject: [PATCH 15/47] chore: Added extra debug helper via getting peer
statistics (#3443)
* Added extra debug helper via getting peer statistics on new /admin/v1/peers/stats endpoint
* Add /admin/v1/peers/stats client part
* Address review, change protocol names to codec string
* fix formatting
---
waku/waku_api/rest/admin/client.nim | 4 +
waku/waku_api/rest/admin/handlers.nim | 135 ++++++++++++++++++++------
waku/waku_api/rest/admin/types.nim | 35 +++++++
3 files changed, 147 insertions(+), 27 deletions(-)
diff --git a/waku/waku_api/rest/admin/client.nim b/waku/waku_api/rest/admin/client.nim
index 7d45544e2..87d46dd3d 100644
--- a/waku/waku_api/rest/admin/client.nim
+++ b/waku/waku_api/rest/admin/client.nim
@@ -62,6 +62,10 @@ proc getMeshPeersByShard*(
rest, endpoint: "/admin/v1/peers/mesh/on/{shardId}", meth: HttpMethod.MethodGet
.}
+proc getPeersStats*(): RestResponse[PeerStats] {.
+ rest, endpoint: "/admin/v1/peers/stats", meth: HttpMethod.MethodGet
+.}
+
proc getFilterSubscriptions*(): RestResponse[seq[FilterSubscription]] {.
rest, endpoint: "/admin/v1/filter/subscriptions", meth: HttpMethod.MethodGet
.}
diff --git a/waku/waku_api/rest/admin/handlers.nim b/waku/waku_api/rest/admin/handlers.nim
index 9cf6ec131..ba401de25 100644
--- a/waku/waku_api/rest/admin/handlers.nim
+++ b/waku/waku_api/rest/admin/handlers.nim
@@ -31,6 +31,8 @@ export types
logScope:
topics = "waku node rest admin api"
+const ROUTE_ADMIN_V1_PEERS_STATS* = "/admin/v1/peers/stats" # provides peer statistics
+
const ROUTE_ADMIN_V1_PEERS* = "/admin/v1/peers" # returns all peers
const ROUTE_ADMIN_V1_SINGLE_PEER* = "/admin/v1/peer/{peerId}"
@@ -94,6 +96,40 @@ proc populateAdminPeerInfoForCodecs(node: WakuNode, codecs: seq[string]): WakuPe
return peers
+proc getRelayPeers(node: WakuNode): PeersOfShards =
+ var relayPeers: PeersOfShards = @[]
+ if not node.wakuRelay.isNil():
+ for topic in node.wakuRelay.getSubscribedTopics():
+ let relayShard = RelayShard.parse(topic).valueOr:
+ error "Invalid subscribed topic", error = error, topic = topic
+ continue
+ let pubsubPeers =
+ node.wakuRelay.getConnectedPubSubPeers(topic).get(initHashSet[PubSubPeer](0))
+ relayPeers.add(
+ PeersOfShard(
+ shard: relayShard.shardId,
+ peers: toSeq(pubsubPeers).mapIt(WakuPeer.init(it, node.peerManager)),
+ )
+ )
+ return relayPeers
+
+proc getMeshPeers(node: WakuNode): PeersOfShards =
+ var meshPeers: PeersOfShards = @[]
+ if not node.wakuRelay.isNil():
+ for topic in node.wakuRelay.getSubscribedTopics():
+ let relayShard = RelayShard.parse(topic).valueOr:
+ error "Invalid subscribed topic", error = error, topic = topic
+ continue
+ let peers =
+ node.wakuRelay.getPubSubPeersInMesh(topic).get(initHashSet[PubSubPeer](0))
+ meshPeers.add(
+ PeersOfShard(
+ shard: relayShard.shardId,
+ peers: toSeq(peers).mapIt(WakuPeer.init(it, node.peerManager)),
+ )
+ )
+ return meshPeers
+
proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) =
router.api(MethodGet, ROUTE_ADMIN_V1_PEERS) do() -> RestApiResponse:
let peers = populateAdminPeerInfoForAll(node)
@@ -185,19 +221,7 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) =
"Error: Relay Protocol is not mounted to the node"
)
- var relayPeers: PeersOfShards = @[]
- for topic in node.wakuRelay.getSubscribedTopics():
- let relayShard = RelayShard.parse(topic).valueOr:
- error "Invalid subscribed topic", error = error, topic = topic
- continue
- let pubsubPeers =
- node.wakuRelay.getConnectedPubSubPeers(topic).get(initHashSet[PubSubPeer](0))
- relayPeers.add(
- PeersOfShard(
- shard: relayShard.shardId,
- peers: toSeq(pubsubPeers).mapIt(WakuPeer.init(it, node.peerManager)),
- )
- )
+ var relayPeers: PeersOfShards = getRelayPeers(node)
let resp = RestApiResponse.jsonResponse(relayPeers, status = Http200).valueOr:
error "An error occurred while building the json response: ", error = error
@@ -240,21 +264,9 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) =
"Error: Relay Protocol is not mounted to the node"
)
- var relayPeers: PeersOfShards = @[]
- for topic in node.wakuRelay.getSubscribedTopics():
- let relayShard = RelayShard.parse(topic).valueOr:
- error "Invalid subscribed topic", error = error, topic = topic
- continue
- let peers =
- node.wakuRelay.getPubSubPeersInMesh(topic).get(initHashSet[PubSubPeer](0))
- relayPeers.add(
- PeersOfShard(
- shard: relayShard.shardId,
- peers: toSeq(peers).mapIt(WakuPeer.init(it, node.peerManager)),
- )
- )
+ var meshPeers: PeersOfShards = getMeshPeers(node)
- let resp = RestApiResponse.jsonResponse(relayPeers, status = Http200).valueOr:
+ let resp = RestApiResponse.jsonResponse(meshPeers, status = Http200).valueOr:
error "An error occurred while building the json response: ", error = error
return RestApiResponse.internalServerError(
fmt("An error occurred while building the json response: {error}")
@@ -289,6 +301,75 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) =
return resp
+ router.api(MethodGet, ROUTE_ADMIN_V1_PEERS_STATS) do() -> RestApiResponse:
+ let peers = populateAdminPeerInfoForAll(node)
+
+ var stats: PeerStats = initOrderedTable[string, OrderedTable[string, int]]()
+
+ stats["Sum"] = {"Total peers": peers.len()}.toOrderedTable()
+
+ # stats of connectedness
+ var connectednessStats = initOrderedTable[string, int]()
+ connectednessStats[$Connectedness.Connected] =
+ peers.countIt(it.connected == Connectedness.Connected)
+ connectednessStats[$Connectedness.NotConnected] =
+ peers.countIt(it.connected == Connectedness.NotConnected)
+ connectednessStats[$Connectedness.CannotConnect] =
+ peers.countIt(it.connected == Connectedness.CannotConnect)
+ connectednessStats[$Connectedness.CanConnect] =
+ peers.countIt(it.connected == Connectedness.CanConnect)
+ stats["By Connectedness"] = connectednessStats
+
+ # stats of relay peers
+ var totalRelayPeers = 0
+ stats["Relay peers"] = block:
+ let relayPeers = getRelayPeers(node)
+ var stat = initOrderedTable[string, int]()
+ for ps in relayPeers:
+ totalRelayPeers += ps.peers.len
+ stat[$ps.shard] = ps.peers.len
+ stat["Total relay peers"] = relayPeers.len
+ stat
+
+ # stats of mesh peers
+ stats["Mesh peers"] = block:
+ let meshPeers = getMeshPeers(node)
+ var totalMeshPeers = 0
+ var stat = initOrderedTable[string, int]()
+ for ps in meshPeers:
+ totalMeshPeers += ps.peers.len
+ stat[$ps.shard] = ps.peers.len
+ stat["Total mesh peers"] = meshPeers.len
+ stat
+
+ var protoStats = initOrderedTable[string, int]()
+ protoStats[WakuRelayCodec] = peers.countIt(it.protocols.contains(WakuRelayCodec))
+ protoStats[WakuFilterSubscribeCodec] =
+ peers.countIt(it.protocols.contains(WakuFilterSubscribeCodec))
+ protoStats[WakuFilterPushCodec] =
+ peers.countIt(it.protocols.contains(WakuFilterPushCodec))
+ protoStats[WakuStoreCodec] = peers.countIt(it.protocols.contains(WakuStoreCodec))
+ protoStats[WakuLegacyStoreCodec] =
+ peers.countIt(it.protocols.contains(WakuLegacyStoreCodec))
+ protoStats[WakuLightPushCodec] =
+ peers.countIt(it.protocols.contains(WakuLightPushCodec))
+ protoStats[WakuLegacyLightPushCodec] =
+ peers.countIt(it.protocols.contains(WakuLegacyLightPushCodec))
+ protoStats[WakuPeerExchangeCodec] =
+ peers.countIt(it.protocols.contains(WakuPeerExchangeCodec))
+ protoStats[WakuReconciliationCodec] =
+ peers.countIt(it.protocols.contains(WakuReconciliationCodec))
+
+ stats["By Protocols"] = protoStats
+
+ let resp = RestApiResponse.jsonResponse(stats, status = Http200).valueOr:
+ error "An error occurred while building the json response: ", error = error
+ return RestApiResponse.internalServerError(
+ fmt("An error occurred while building the json response: {error}")
+ )
+
+ return resp
+
proc installAdminV1PostPeersHandler(router: var RestRouter, node: WakuNode) =
router.api(MethodPost, ROUTE_ADMIN_V1_PEERS) do(
contentBody: Option[ContentBody]
diff --git a/waku/waku_api/rest/admin/types.nim b/waku/waku_api/rest/admin/types.nim
index 0c0786e3d..483acf8b8 100644
--- a/waku/waku_api/rest/admin/types.nim
+++ b/waku/waku_api/rest/admin/types.nim
@@ -35,6 +35,9 @@ type FilterSubscription* = object
peerId*: string
filterCriteria*: seq[FilterTopic]
+type PeerStats* = OrderedTable[string, OrderedTable[string, int]]
+ # maps high level grouping to low level grouping of counters
+
#### Serialization and deserialization
proc writeValue*(
writer: var JsonWriter[RestJson], value: WakuPeer
@@ -73,6 +76,23 @@ proc writeValue*(
writer.writeField("filterCriteria", value.filterCriteria)
writer.endRecord()
+proc writeValue*(
+ writer: var JsonWriter[RestJson], value: OrderedTable[string, int]
+) {.raises: [IOError].} =
+ writer.beginRecord()
+ for key, value in value.pairs:
+ writer.writeField(key, value)
+ writer.endRecord()
+
+proc writeValue*(
+ writer: var JsonWriter[RestJson],
+ value: OrderedTable[string, OrderedTable[string, int]],
+) {.raises: [IOError].} =
+ writer.beginRecord()
+ for group, subTab in value.pairs:
+ writer.writeField(group, subTab)
+ writer.endRecord()
+
proc readValue*(
reader: var JsonReader[RestJson], value: var WakuPeer
) {.gcsafe, raises: [SerializationError, IOError].} =
@@ -238,6 +258,21 @@ proc readValue*(
value = FilterSubscription(peerId: peerId.get(), filterCriteria: filterCriteria.get())
+proc readValue*(
+ reader: var JsonReader[RestJson], value: var OrderedTable[string, int]
+) {.gcsafe, raises: [SerializationError, IOError].} =
+ for fieldName in readObjectFields(reader):
+ let fieldValue = reader.readValue(int)
+ value[fieldName] = fieldValue
+
+proc readValue*(
+ reader: var JsonReader[RestJson],
+ value: var OrderedTable[string, OrderedTable[string, int]],
+) {.gcsafe, raises: [SerializationError, IOError].} =
+ for fieldName in readObjectFields(reader):
+ let fieldValue = reader.readValue(OrderedTable[string, int])
+ value[fieldName] = fieldValue
+
func `==`*(a, b: WakuPeer): bool {.inline.} =
return a.multiaddr == b.multiaddr
From d4198c08ae0480d3d9e0bb9b2fa382fd85ca40aa Mon Sep 17 00:00:00 2001
From: gabrielmer <101006718+gabrielmer@users.noreply.github.com>
Date: Wed, 11 Jun 2025 15:56:25 +0200
Subject: [PATCH 16/47] fix: discv5 protocol id in libwaku (#3447)
---
waku.nimble | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/waku.nimble b/waku.nimble
index 6ec05caaf..ae84d7a7a 100644
--- a/waku.nimble
+++ b/waku.nimble
@@ -66,11 +66,11 @@ proc buildLibrary(name: string, srcDir = "./", params = "", `type` = "static") =
extra_params &= " " & paramStr(i)
if `type` == "static":
exec "nim c" & " --out:build/" & name &
- ".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header --undef:metrics --nimMainPrefix:libwaku --skipParentCfg:on " &
+ ".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header --undef:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & name & ".nim"
else:
exec "nim c" & " --out:build/" & name &
- ".so --threads:on --app:lib --opt:size --noMain --mm:refc --header --undef:metrics --nimMainPrefix:libwaku --skipParentCfg:on " &
+ ".so --threads:on --app:lib --opt:size --noMain --mm:refc --header --undef:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & name & ".nim"
proc buildMobileAndroid(srcDir = ".", params = "") =
From 17c842a5423821d06c9c531bd700ef29eadd6d55 Mon Sep 17 00:00:00 2001
From: NagyZoltanPeter <113987313+NagyZoltanPeter@users.noreply.github.com>
Date: Thu, 12 Jun 2025 10:50:08 +0200
Subject: [PATCH 17/47] feat: dynamic logging via REST API (#3451)
* Added /admin/v1/log-level/{logLevel} endpoint that is used for dynamic log level setting
credits to @darshankabariya co-authoring:
* Adapted conditional compile switch check from Darshan's solution
* formatting fix
---
waku/waku_api/rest/admin/handlers.nim | 37 +++++++++++++++++++++++++++
1 file changed, 37 insertions(+)
diff --git a/waku/waku_api/rest/admin/handlers.nim b/waku/waku_api/rest/admin/handlers.nim
index ba401de25..6bf44e8a2 100644
--- a/waku/waku_api/rest/admin/handlers.nim
+++ b/waku/waku_api/rest/admin/handlers.nim
@@ -3,6 +3,7 @@
import
std/[sets, strformat, sequtils, tables],
chronicles,
+ chronicles/topics_registry,
json_serialization,
presto/route,
libp2p/[peerinfo, switch, peerid, protocols/pubsub/pubsubpeer]
@@ -48,6 +49,9 @@ const ROUTE_ADMIN_V1_MESH_PEERS_ON_SHARD* = "/admin/v1/peers/mesh/on/{shardId}"
const ROUTE_ADMIN_V1_FILTER_SUBS* = "/admin/v1/filter/subscriptions"
+const ROUTE_ADMIN_V1_POST_LOG_LEVEL* = "/admin/v1/log-level/{logLevel}"
+ # sets the new log level for the node
+
type PeerProtocolTuple =
tuple[
multiaddr: string,
@@ -418,7 +422,40 @@ proc installAdminV1GetFilterSubsHandler(router: var RestRouter, node: WakuNode)
return resp.get()
+proc installAdminV1PostLogLevelHandler(router: var RestRouter, node: WakuNode) =
+ router.api(MethodPost, ROUTE_ADMIN_V1_POST_LOG_LEVEL) do(
+ logLevel: string
+ ) -> RestApiResponse:
+ when runtimeFilteringEnabled:
+ if logLevel.isErr() or logLevel.value().isEmptyOrWhitespace():
+ return RestApiResponse.badRequest("Invalid log-level, it can’t be empty")
+
+ try:
+ let newLogLevel = parseEnum[LogLevel](logLevel.value().capitalizeAscii())
+
+ if newLogLevel < enabledLogLevel:
+ return RestApiResponse.badRequest(
+ fmt(
+ "Log level {newLogLevel} is lower than the lowest log level - {enabledLogLevel} - the binary is compiled with."
+ )
+ )
+
+ setLogLevel(newLogLevel)
+ except ValueError:
+ return RestApiResponse.badRequest(
+ fmt(
+ "Invalid log-level: {logLevel.value()}. Please specify one of TRACE, DEBUG, INFO, NOTICE, WARN, ERROR or FATAL"
+ )
+ )
+
+ return RestApiResponse.ok()
+ else:
+ return RestApiResponse.serviceUnavailable(
+ "Dynamic Log level management is not enabled in this build. Please recompile with `-d:chronicles_runtime_filtering:on`."
+ )
+
proc installAdminApiHandlers*(router: var RestRouter, node: WakuNode) =
installAdminV1GetPeersHandler(router, node)
installAdminV1PostPeersHandler(router, node)
installAdminV1GetFilterSubsHandler(router, node)
+ installAdminV1PostLogLevelHandler(router, node)
From 25a3f4192c61750d727626550692e18e05fd6beb Mon Sep 17 00:00:00 2001
From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com>
Date: Thu, 12 Jun 2025 12:49:05 +0200
Subject: [PATCH 18/47] feat: retrieve metrics from libwaku (#3452)
---
examples/cbindings/waku_example.c | 9 +++++++--
library/libwaku.h | 4 ++++
library/libwaku.nim | 14 ++++++++++++++
.../requests/debug_node_request.nim | 16 +++++++++++++++-
vendor/nim-metrics | 2 +-
waku.nimble | 4 ++--
6 files changed, 43 insertions(+), 6 deletions(-)
diff --git a/examples/cbindings/waku_example.c b/examples/cbindings/waku_example.c
index b80b9af8f..1f6f0256a 100644
--- a/examples/cbindings/waku_example.c
+++ b/examples/cbindings/waku_example.c
@@ -305,10 +305,10 @@ int main(int argc, char** argv) {
\"storeMessageDbUrl\": \"%s\", \
\"storeMessageRetentionPolicy\": \"%s\", \
\"storeMaxNumDbConnections\": %d , \
- \"logLevel\": \"FATAL\", \
+ \"logLevel\": \"DEBUG\", \
\"discv5Discovery\": true, \
\"discv5BootstrapNodes\": \
- [\"enr:-QESuEB4Dchgjn7gfAvwB00CxTA-nGiyk-aALI-H4dYSZD3rUk7bZHmP8d2U6xDiQ2vZffpo45Jp7zKNdnwDUx6g4o6XAYJpZIJ2NIJpcIRA4VDAim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOvD3S3jUNICsrOILlmhENiWAMmMVlAl6-Q8wRB7hidY4N0Y3CCdl-DdWRwgiMohXdha3UyDw\", \"enr:-QEkuEBIkb8q8_mrorHndoXH9t5N6ZfD-jehQCrYeoJDPHqT0l0wyaONa2-piRQsi3oVKAzDShDVeoQhy0uwN1xbZfPZAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQKnGt-GSgqPSf3IAPM7bFgTlpczpMZZLF3geeoNNsxzSoN0Y3CCdl-DdWRwgiMohXdha3UyDw\"], \
+ [\"enr:-QEKuED9AJm2HGgrRpVaJY2nj68ao_QiPeUT43sK-aRM7sMJ6R4G11OSDOwnvVacgN1sTw-K7soC5dzHDFZgZkHU0u-XAYJpZIJ2NIJpcISnYxMvim11bHRpYWRkcnO4WgAqNiVib290LTAxLmRvLWFtczMuc3RhdHVzLnByb2Quc3RhdHVzLmltBnZfACw2JWJvb3QtMDEuZG8tYW1zMy5zdGF0dXMucHJvZC5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaEC3rRtFQSgc24uWewzXaxTY8hDAHB8sgnxr9k8Rjb5GeSDdGNwgnZfg3VkcIIjKIV3YWt1Mg0\", \"enr:-QEcuED7ww5vo2rKc1pyBp7fubBUH-8STHEZHo7InjVjLblEVyDGkjdTI9VdqmYQOn95vuQH-Htku17WSTzEufx-Wg4mAYJpZIJ2NIJpcIQihw1Xim11bHRpYWRkcnO4bAAzNi5ib290LTAxLmdjLXVzLWNlbnRyYWwxLWEuc3RhdHVzLnByb2Quc3RhdHVzLmltBnZfADU2LmJvb3QtMDEuZ2MtdXMtY2VudHJhbDEtYS5zdGF0dXMucHJvZC5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaECxjqgDQ0WyRSOilYU32DA5k_XNlDis3m1VdXkK9xM6kODdGNwgnZfg3VkcIIjKIV3YWt1Mg0\", \"enr:-QEcuEAoShWGyN66wwusE3Ri8hXBaIkoHZHybUB8cCPv5v3ypEf9OCg4cfslJxZFANl90s-jmMOugLUyBx4EfOBNJ6_VAYJpZIJ2NIJpcIQI2hdMim11bHRpYWRkcnO4bAAzNi5ib290LTAxLmFjLWNuLWhvbmdrb25nLWMuc3RhdHVzLnByb2Quc3RhdHVzLmltBnZfADU2LmJvb3QtMDEuYWMtY24taG9uZ2tvbmctYy5zdGF0dXMucHJvZC5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaEDP7CbRk-YKJwOFFM4Z9ney0GPc7WPJaCwGkpNRyla7mCDdGNwgnZfg3VkcIIjKIV3YWt1Mg0\"], \
\"discv5UdpPort\": 9999, \
\"dnsDiscoveryUrl\": \"enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.prod.status.nodes.status.im\", \
\"dnsDiscoveryNameServers\": [\"8.8.8.8\", \"1.0.0.1\"] \
@@ -353,6 +353,11 @@ int main(int argc, char** argv) {
show_main_menu();
while(1) {
handle_user_input();
+
+ // Uncomment the following if need to test the metrics retrieval
+ // WAKU_CALL( waku_get_metrics(ctx,
+ // event_handler,
+ // userData) );
}
pthread_mutex_destroy(&mutex);
diff --git a/library/libwaku.h b/library/libwaku.h
index a95cbda90..525fec69a 100644
--- a/library/libwaku.h
+++ b/library/libwaku.h
@@ -225,6 +225,10 @@ int waku_get_my_peerid(void* ctx,
WakuCallBack callback,
void* userData);
+int waku_get_metrics(void* ctx,
+ WakuCallBack callback,
+ void* userData);
+
int waku_peer_exchange_request(void* ctx,
int numPeers,
WakuCallBack callback,
diff --git a/library/libwaku.nim b/library/libwaku.nim
index 69d523d14..3774ad0a8 100644
--- a/library/libwaku.nim
+++ b/library/libwaku.nim
@@ -796,6 +796,20 @@ proc waku_get_my_peerid(
userData,
)
+proc waku_get_metrics(
+ ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
+): cint {.dynlib, exportc.} =
+ initializeLibrary()
+ checkLibwakuParams(ctx, callback, userData)
+
+ handleRequest(
+ ctx,
+ RequestType.DEBUG,
+ DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_METRICS),
+ callback,
+ userData,
+ )
+
proc waku_start_discv5(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
diff --git a/library/waku_thread/inter_thread_communication/requests/debug_node_request.nim b/library/waku_thread/inter_thread_communication/requests/debug_node_request.nim
index 53715e0ed..dc0bc72f5 100644
--- a/library/waku_thread/inter_thread_communication/requests/debug_node_request.nim
+++ b/library/waku_thread/inter_thread_communication/requests/debug_node_request.nim
@@ -1,11 +1,19 @@
import std/json
-import chronicles, chronos, results, eth/p2p/discoveryv5/enr, strutils, libp2p/peerid
+import
+ chronicles,
+ chronos,
+ results,
+ eth/p2p/discoveryv5/enr,
+ strutils,
+ libp2p/peerid,
+ metrics
import ../../../../waku/factory/waku, ../../../../waku/node/waku_node
type DebugNodeMsgType* = enum
RETRIEVE_LISTENING_ADDRESSES
RETRIEVE_MY_ENR
RETRIEVE_MY_PEER_ID
+ RETRIEVE_METRICS
type DebugNodeRequest* = object
operation: DebugNodeMsgType
@@ -21,6 +29,10 @@ proc destroyShared(self: ptr DebugNodeRequest) =
proc getMultiaddresses(node: WakuNode): seq[string] =
return node.info().listenAddresses
+proc getMetrics(): string =
+ {.gcsafe.}:
+ return defaultRegistry.toText() ## defaultRegistry is {.global.} in metrics module
+
proc process*(
self: ptr DebugNodeRequest, waku: Waku
): Future[Result[string, string]] {.async.} =
@@ -35,6 +47,8 @@ proc process*(
return ok(waku.node.enr.toURI())
of RETRIEVE_MY_PEER_ID:
return ok($waku.node.peerId())
+ of RETRIEVE_METRICS:
+ return ok(getMetrics())
error "unsupported operation in DebugNodeRequest"
return err("unsupported operation in DebugNodeRequest")
diff --git a/vendor/nim-metrics b/vendor/nim-metrics
index 11d0cddfb..ecf64c607 160000
--- a/vendor/nim-metrics
+++ b/vendor/nim-metrics
@@ -1 +1 @@
-Subproject commit 11d0cddfb0e711aa2a8c75d1892ae24a64c299fc
+Subproject commit ecf64c6078d1276d3b7d9b3d931fbdb70004db11
diff --git a/waku.nimble b/waku.nimble
index ae84d7a7a..5be212264 100644
--- a/waku.nimble
+++ b/waku.nimble
@@ -66,11 +66,11 @@ proc buildLibrary(name: string, srcDir = "./", params = "", `type` = "static") =
extra_params &= " " & paramStr(i)
if `type` == "static":
exec "nim c" & " --out:build/" & name &
- ".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header --undef:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
+ ".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & name & ".nim"
else:
exec "nim c" & " --out:build/" & name &
- ".so --threads:on --app:lib --opt:size --noMain --mm:refc --header --undef:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
+ ".so --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & name & ".nim"
proc buildMobileAndroid(srcDir = ".", params = "") =
From 0adddb01da0abc36e20a0024f273e8d3765a4a74 Mon Sep 17 00:00:00 2001
From: Darshan K <35736874+darshankabariya@users.noreply.github.com>
Date: Fri, 13 Jun 2025 15:08:47 +0530
Subject: [PATCH 19/47] chore: rest-relay-cache-capacity (#3454)
---
waku/factory/external_config.nim | 2 +-
waku/waku_api/rest/builder.nim | 3 +--
2 files changed, 2 insertions(+), 3 deletions(-)
diff --git a/waku/factory/external_config.nim b/waku/factory/external_config.nim
index 9617d4403..f85a9fae3 100644
--- a/waku/factory/external_config.nim
+++ b/waku/factory/external_config.nim
@@ -503,7 +503,7 @@ with the drawback of consuming some more bandwidth.""",
restRelayCacheCapacity* {.
desc: "Capacity of the Relay REST API message cache.",
- defaultValue: 30,
+ defaultValue: 50,
name: "rest-relay-cache-capacity"
.}: uint32
diff --git a/waku/waku_api/rest/builder.nim b/waku/waku_api/rest/builder.nim
index f11a11fbc..1b5d9af70 100644
--- a/waku/waku_api/rest/builder.nim
+++ b/waku/waku_api/rest/builder.nim
@@ -139,8 +139,7 @@ proc startRestServerProtocolSupport*(
if relayEnabled:
## This MessageCache is used, f.e., in js-waku<>nwaku interop tests.
## js-waku tests asks nwaku-docker through REST whether a message is properly received.
- const RestRelayCacheCapacity = 50
- let cache = MessageCache.init(int(RestRelayCacheCapacity))
+ let cache = MessageCache.init(int(conf.relayCacheCapacity))
let handler: WakuRelayHandler = messageCacheHandler(cache)
From 11b44e3e15c65d80cadf2ab8a51afac94098c24c Mon Sep 17 00:00:00 2001
From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com>
Date: Sat, 14 Jun 2025 10:09:51 +0200
Subject: [PATCH 20/47] chore: refactor rm discv5-only (#3453)
---
waku/discovery/waku_discv5.nim | 1 -
waku/factory/conf_builder/discv5_conf_builder.nim | 5 -----
waku/factory/conf_builder/waku_conf_builder.nim | 1 -
waku/factory/external_config.nim | 7 -------
waku/factory/node_factory.nim | 4 ----
waku/factory/waku.nim | 11 +++++------
6 files changed, 5 insertions(+), 24 deletions(-)
diff --git a/waku/discovery/waku_discv5.nim b/waku/discovery/waku_discv5.nim
index 221acef42..5bdb91a2e 100644
--- a/waku/discovery/waku_discv5.nim
+++ b/waku/discovery/waku_discv5.nim
@@ -26,7 +26,6 @@ logScope:
type Discv5Conf* {.requiresInit.} = object
# TODO: This should probably be an option on the builder
# But translated to everything else "false" on the config
- discv5Only*: bool
bootstrapNodes*: seq[string]
udpPort*: Port
tableIpLimit*: uint
diff --git a/waku/factory/conf_builder/discv5_conf_builder.nim b/waku/factory/conf_builder/discv5_conf_builder.nim
index 950b2a4f6..30755669b 100644
--- a/waku/factory/conf_builder/discv5_conf_builder.nim
+++ b/waku/factory/conf_builder/discv5_conf_builder.nim
@@ -13,7 +13,6 @@ type Discv5ConfBuilder* = object
bootstrapNodes*: seq[string]
bitsPerHop*: Option[int]
bucketIpLimit*: Option[uint]
- discv5Only*: Option[bool]
enrAutoUpdate*: Option[bool]
tableIpLimit*: Option[uint]
udpPort*: Option[Port]
@@ -30,9 +29,6 @@ proc withBitsPerHop*(b: var Discv5ConfBuilder, bitsPerHop: int) =
proc withBucketIpLimit*(b: var Discv5ConfBuilder, bucketIpLimit: uint) =
b.bucketIpLimit = some(bucketIpLimit)
-proc withDiscv5Only*(b: var Discv5ConfBuilder, discv5Only: bool) =
- b.discv5Only = some(discv5Only)
-
proc withEnrAutoUpdate*(b: var Discv5ConfBuilder, enrAutoUpdate: bool) =
b.enrAutoUpdate = some(enrAutoUpdate)
@@ -56,7 +52,6 @@ proc build*(b: Discv5ConfBuilder): Result[Option[Discv5Conf], string] =
bootstrapNodes: b.bootstrapNodes,
bitsPerHop: b.bitsPerHop.get(1),
bucketIpLimit: b.bucketIpLimit.get(2),
- discv5Only: b.discv5Only.get(false),
enrAutoUpdate: b.enrAutoUpdate.get(true),
tableIpLimit: b.tableIpLimit.get(10),
udpPort: b.udpPort.get(9000.Port),
diff --git a/waku/factory/conf_builder/waku_conf_builder.nim b/waku/factory/conf_builder/waku_conf_builder.nim
index f1f0ba471..46e303e70 100644
--- a/waku/factory/conf_builder/waku_conf_builder.nim
+++ b/waku/factory/conf_builder/waku_conf_builder.nim
@@ -82,7 +82,6 @@ type WakuConfBuilder* = object
# TODO: move within a relayConf
rendezvous: Option[bool]
- discv5Only: Option[bool]
clusterConf: Option[ClusterConf]
diff --git a/waku/factory/external_config.nim b/waku/factory/external_config.nim
index f85a9fae3..a9e828893 100644
--- a/waku/factory/external_config.nim
+++ b/waku/factory/external_config.nim
@@ -610,12 +610,6 @@ with the drawback of consuming some more bandwidth.""",
name: "discv5-bits-per-hop"
.}: int
- discv5Only* {.
- desc: "Disable all protocols other than discv5",
- defaultValue: false,
- name: "discv5-only"
- .}: bool
-
## waku peer exchange config
peerExchange* {.
desc: "Enable waku peer exchange protocol (responder side): true|false",
@@ -1021,7 +1015,6 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] =
b.discv5Conf.withTableIpLimit(n.discv5TableIpLimit)
b.discv5Conf.withBucketIpLimit(n.discv5BucketIpLimit)
b.discv5Conf.withBitsPerHop(n.discv5BitsPerHop)
- b.discv5Conf.withDiscv5Only(n.discv5Only)
b.withPeerExchange(n.peerExchange)
diff --git a/waku/factory/node_factory.nim b/waku/factory/node_factory.nim
index 5dc549317..7063f8476 100644
--- a/waku/factory/node_factory.nim
+++ b/waku/factory/node_factory.nim
@@ -151,10 +151,6 @@ proc setupProtocols(
## Optionally include persistent message storage.
## No protocols are started yet.
- if conf.discv5Conf.isSome() and conf.discv5Conf.get().discv5Only:
- notice "Running node only with Discv5, not mounting additional protocols"
- return ok()
-
node.mountMetadata(conf.clusterId).isOkOr:
return err("failed to mount waku metadata protocol: " & error)
diff --git a/waku/factory/waku.nim b/waku/factory/waku.nim
index fe797b0a3..006093648 100644
--- a/waku/factory/waku.nim
+++ b/waku/factory/waku.nim
@@ -361,13 +361,12 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} =
else:
waku[].dynamicBootstrapNodes = dynamicBootstrapNodesRes.get()
- if conf.discv5Conf.isNone or not conf.discv5Conf.get().discv5Only:
- (await startNode(waku.node, waku.conf, waku.dynamicBootstrapNodes)).isOkOr:
- return err("error while calling startNode: " & $error)
+ (await startNode(waku.node, waku.conf, waku.dynamicBootstrapNodes)).isOkOr:
+ return err("error while calling startNode: " & $error)
- # Update waku data that is set dynamically on node start
- updateWaku(waku).isOkOr:
- return err("Error in updateApp: " & $error)
+ ## Update waku data that is set dynamically on node start
+ updateWaku(waku).isOkOr:
+ return err("Error in updateApp: " & $error)
## Discv5
if conf.discv5Conf.isSome:
From d148c536ca952848851ddcb5b60537f24a96fc2a Mon Sep 17 00:00:00 2001
From: NagyZoltanPeter <113987313+NagyZoltanPeter@users.noreply.github.com>
Date: Mon, 16 Jun 2025 12:46:20 +0200
Subject: [PATCH 21/47] feat: lighptush v3 for lite-protocol-tester (#3455)
* Upgrade lpt to new config methods
* Make choice of legacy and v3 lightpush configurable on cli
* Adjust runner script to allow easy lightpush version selection
* Prepare selectable lightpush for infra env runs
* Fix misused result vs return
* Fixes and more explanatory comments added
* Fix ~pure virtual~ notice to =discard
---
apps/liteprotocoltester/legacy_publisher.nim | 24 +++++
.../liteprotocoltester/liteprotocoltester.nim | 102 ++++++++++--------
apps/liteprotocoltester/lpt_supervisor.py | 4 +-
...{lightpush_publisher.nim => publisher.nim} | 31 +++---
apps/liteprotocoltester/publisher_base.nim | 14 +++
.../{filter_subscriber.nim => receiver.nim} | 2 +-
apps/liteprotocoltester/run_tester_node.sh | 7 +-
.../run_tester_node_at_infra.sh | 10 +-
.../run_tester_node_on_fleet.sh | 10 +-
.../service_peer_management.nim | 4 +-
apps/liteprotocoltester/tester_config.nim | 20 ++++
apps/liteprotocoltester/v3_publisher.nim | 29 +++++
12 files changed, 192 insertions(+), 65 deletions(-)
create mode 100644 apps/liteprotocoltester/legacy_publisher.nim
rename apps/liteprotocoltester/{lightpush_publisher.nim => publisher.nim} (92%)
create mode 100644 apps/liteprotocoltester/publisher_base.nim
rename apps/liteprotocoltester/{filter_subscriber.nim => receiver.nim} (99%)
create mode 100644 apps/liteprotocoltester/v3_publisher.nim
diff --git a/apps/liteprotocoltester/legacy_publisher.nim b/apps/liteprotocoltester/legacy_publisher.nim
new file mode 100644
index 000000000..12733ad2d
--- /dev/null
+++ b/apps/liteprotocoltester/legacy_publisher.nim
@@ -0,0 +1,24 @@
+import chronos, results, options
+import waku/[waku_node, waku_core]
+import publisher_base
+
+type LegacyPublisher* = ref object of PublisherBase
+
+proc new*(T: type LegacyPublisher, wakuNode: WakuNode): T =
+ if isNil(wakuNode.wakuLegacyLightpushClient):
+ wakuNode.mountLegacyLightPushClient()
+
+ return LegacyPublisher(wakuNode: wakuNode)
+
+method send*(
+ self: LegacyPublisher,
+ topic: PubsubTopic,
+ message: WakuMessage,
+ servicePeer: RemotePeerInfo,
+): Future[Result[void, string]] {.async.} =
+ # when error it must return original error desc due the text is used for distinction between error types in metrics.
+ discard (
+ await self.wakuNode.legacyLightpushPublish(some(topic), message, servicePeer)
+ ).valueOr:
+ return err(error)
+ return ok()
diff --git a/apps/liteprotocoltester/liteprotocoltester.nim b/apps/liteprotocoltester/liteprotocoltester.nim
index 991e9ba78..598d1a7ec 100644
--- a/apps/liteprotocoltester/liteprotocoltester.nim
+++ b/apps/liteprotocoltester/liteprotocoltester.nim
@@ -28,8 +28,8 @@ import
waku_core/multiaddrstr,
],
./tester_config,
- ./lightpush_publisher,
- ./filter_subscriber,
+ ./publisher,
+ ./receiver,
./diagnose_connections,
./service_peer_management
@@ -69,13 +69,13 @@ when isMainModule:
## - override according to tester functionality
##
- var wakuConf: WakuNodeConf
+ var wConf: WakuNodeConf
if conf.configFile.isSome():
try:
var configFile {.threadvar.}: InputFile
configFile = conf.configFile.get()
- wakuConf = WakuNodeConf.load(
+ wConf = WakuNodeConf.load(
version = versionString,
printUsage = false,
secondarySources = proc(
@@ -88,36 +88,36 @@ when isMainModule:
error "Loading Waku configuration failed", error = getCurrentExceptionMsg()
quit(QuitFailure)
- wakuConf.logLevel = conf.logLevel
- wakuConf.logFormat = conf.logFormat
- wakuConf.nat = conf.nat
- wakuConf.maxConnections = 500
- wakuConf.restAddress = conf.restAddress
- wakuConf.restPort = conf.restPort
- wakuConf.restAllowOrigin = conf.restAllowOrigin
+ wConf.logLevel = conf.logLevel
+ wConf.logFormat = conf.logFormat
+ wConf.nat = conf.nat
+ wConf.maxConnections = 500
+ wConf.restAddress = conf.restAddress
+ wConf.restPort = conf.restPort
+ wConf.restAllowOrigin = conf.restAllowOrigin
- wakuConf.dnsAddrsNameServers = @[parseIpAddress("8.8.8.8"), parseIpAddress("1.1.1.1")]
+ wConf.dnsAddrsNameServers = @[parseIpAddress("8.8.8.8"), parseIpAddress("1.1.1.1")]
- wakuConf.shards = @[conf.shard]
- wakuConf.contentTopics = conf.contentTopics
- wakuConf.clusterId = conf.clusterId
+ wConf.shards = @[conf.shard]
+ wConf.contentTopics = conf.contentTopics
+ wConf.clusterId = conf.clusterId
## TODO: Depending on the tester needs we might extend here with shards, clusterId, etc...
- wakuConf.metricsServer = true
- wakuConf.metricsServerAddress = parseIpAddress("0.0.0.0")
- wakuConf.metricsServerPort = conf.metricsPort
+ wConf.metricsServer = true
+ wConf.metricsServerAddress = parseIpAddress("0.0.0.0")
+ wConf.metricsServerPort = conf.metricsPort
# If bootstrap option is chosen we expect our clients will not mounted
# so we will mount PeerExchange manually to gather possible service peers,
# if got some we will mount the client protocols afterward.
- wakuConf.peerExchange = false
- wakuConf.relay = false
- wakuConf.filter = false
- wakuConf.lightpush = false
- wakuConf.store = false
+ wConf.peerExchange = false
+ wConf.relay = false
+ wConf.filter = false
+ wConf.lightpush = false
+ wConf.store = false
- wakuConf.rest = false
- wakuConf.relayServiceRatio = "40:60"
+ wConf.rest = false
+ wConf.relayServiceRatio = "40:60"
# NOTE: {.threadvar.} is used to make the global variable GC safe for the closure uses it
# It will always be called from main thread anyway.
@@ -126,12 +126,20 @@ when isMainModule:
nodeHealthMonitor = WakuNodeHealthMonitor()
nodeHealthMonitor.setOverallHealth(HealthStatus.INITIALIZING)
- let restServer = rest_server_builder.startRestServerEssentials(
- nodeHealthMonitor, wakuConf
- ).valueOr:
- error "Starting esential REST server failed.", error = $error
+ let wakuConf = wConf.toWakuConf().valueOr:
+ error "Waku configuration failed", error = error
quit(QuitFailure)
+ let restServer: WakuRestServerRef =
+ if wakuConf.restServerConf.isSome():
+ rest_server_builder.startRestServerEssentials(
+ nodeHealthMonitor, wakuConf.restServerConf.get(), wakuConf.portsShift
+ ).valueOr:
+ error "Starting essential REST server failed.", error = $error
+ quit(QuitFailure)
+ else:
+ nil
+
var wakuApp = Waku.new(wakuConf).valueOr:
error "Waku initialization failed", error = error
quit(QuitFailure)
@@ -144,15 +152,27 @@ when isMainModule:
error "Starting waku failed", error = error
quit(QuitFailure)
- rest_server_builder.startRestServerProtocolSupport(
- restServer, wakuApp.node, wakuApp.wakuDiscv5, wakuConf
- ).isOkOr:
- error "Starting protocols support REST server failed.", error = $error
- quit(QuitFailure)
+ if wakuConf.restServerConf.isSome():
+ rest_server_builder.startRestServerProtocolSupport(
+ restServer,
+ wakuApp.node,
+ wakuApp.wakuDiscv5,
+ wakuConf.restServerConf.get(),
+ wakuConf.relay,
+ wakuConf.lightPush,
+ wakuConf.clusterId,
+ wakuConf.shards,
+ wakuConf.contentTopics,
+ ).isOkOr:
+ error "Starting protocols support REST server failed.", error = $error
+ quit(QuitFailure)
- wakuApp.metricsServer = waku_metrics.startMetricsServerAndLogging(wakuConf).valueOr:
- error "Starting monitoring and external interfaces failed", error = error
- quit(QuitFailure)
+ if wakuConf.metricsServerConf.isSome():
+ wakuApp.metricsServer = waku_metrics.startMetricsServerAndLogging(
+ wakuConf.metricsServerConf.get(), wakuConf.portsShift
+ ).valueOr:
+ error "Starting monitoring and external interfaces failed", error = error
+ quit(QuitFailure)
nodeHealthMonitor.setOverallHealth(HealthStatus.READY)
@@ -199,12 +219,8 @@ when isMainModule:
info "Node setup complete"
- var codec = WakuLightPushCodec
+ let codec = conf.getCodec()
# mounting relevant client, for PX filter client must be mounted ahead
- if conf.testFunc == TesterFunctionality.SENDER:
- codec = WakuLightPushCodec
- else:
- codec = WakuFilterSubscribeCodec
var lookForServiceNode = false
var serviceNodePeerInfo: RemotePeerInfo
@@ -241,6 +257,6 @@ when isMainModule:
if conf.testFunc == TesterFunctionality.SENDER:
setupAndPublish(wakuApp.node, conf, serviceNodePeerInfo)
else:
- setupAndSubscribe(wakuApp.node, conf, serviceNodePeerInfo)
+ setupAndListen(wakuApp.node, conf, serviceNodePeerInfo)
runForever()
diff --git a/apps/liteprotocoltester/lpt_supervisor.py b/apps/liteprotocoltester/lpt_supervisor.py
index 24c395b0a..7d882afd2 100755
--- a/apps/liteprotocoltester/lpt_supervisor.py
+++ b/apps/liteprotocoltester/lpt_supervisor.py
@@ -24,8 +24,8 @@ def run_tester_node(predefined_test_env):
return os.system(script_cmd)
if __name__ == "__main__":
- if len(sys.argv) < 2 or sys.argv[1] not in ["RECEIVER", "SENDER"]:
- print("Error: First argument must be either 'RECEIVER' or 'SENDER'")
+ if len(sys.argv) < 2 or sys.argv[1] not in ["RECEIVER", "SENDER", "SENDERV3"]:
+ print("Error: First argument must be either 'RECEIVER' or 'SENDER' or 'SENDERV3'")
sys.exit(1)
predefined_test_env_file = '/usr/bin/infra.env'
diff --git a/apps/liteprotocoltester/lightpush_publisher.nim b/apps/liteprotocoltester/publisher.nim
similarity index 92%
rename from apps/liteprotocoltester/lightpush_publisher.nim
rename to apps/liteprotocoltester/publisher.nim
index d79e68590..d8031473d 100644
--- a/apps/liteprotocoltester/lightpush_publisher.nim
+++ b/apps/liteprotocoltester/publisher.nim
@@ -21,14 +21,17 @@ import
./tester_message,
./lpt_metrics,
./diagnose_connections,
- ./service_peer_management
+ ./service_peer_management,
+ ./publisher_base,
+ ./legacy_publisher,
+ ./v3_publisher
randomize()
type SizeRange* = tuple[min: uint64, max: uint64]
-var RANDOM_PALYLOAD {.threadvar.}: seq[byte]
-RANDOM_PALYLOAD = urandom(1024 * 1024)
+var RANDOM_PAYLOAD {.threadvar.}: seq[byte]
+RANDOM_PAYLOAD = urandom(1024 * 1024)
# 1MiB of random payload to be used to extend message
proc prepareMessage(
@@ -59,9 +62,8 @@ proc prepareMessage(
if renderSize < len(contentPayload).uint64:
renderSize = len(contentPayload).uint64
- let finalPayload = concat(
- contentPayload, RANDOM_PALYLOAD[0 .. renderSize - len(contentPayload).uint64]
- )
+ let finalPayload =
+ concat(contentPayload, RANDOM_PAYLOAD[0 .. renderSize - len(contentPayload).uint64])
let message = WakuMessage(
payload: finalPayload, # content of the message
contentTopic: contentTopic, # content topic to publish to
@@ -108,6 +110,7 @@ proc reportSentMessages() =
proc publishMessages(
wakuNode: WakuNode,
+ publisher: PublisherBase,
servicePeer: RemotePeerInfo,
lightpushPubsubTopic: PubsubTopic,
lightpushContentTopic: ContentTopic,
@@ -148,9 +151,7 @@ proc publishMessages(
let publishStartTime = Moment.now()
- let wlpRes = await wakuNode.legacyLightpushPublish(
- some(lightpushPubsubTopic), message, actualServicePeer
- )
+ let wlpRes = await publisher.send(lightpushPubsubTopic, message, actualServicePeer)
let publishDuration = Moment.now() - publishStartTime
@@ -213,10 +214,13 @@ proc publishMessages(
proc setupAndPublish*(
wakuNode: WakuNode, conf: LiteProtocolTesterConf, servicePeer: RemotePeerInfo
) =
- if isNil(wakuNode.wakuLightpushClient):
- # if we have not yet initialized lightpush client, then do it as the only way we can get here is
- # by having a service peer discovered.
- wakuNode.mountLegacyLightPushClient()
+ var publisher: PublisherBase
+ if conf.lightpushVersion == LightpushVersion.LEGACY:
+ info "Using legacy lightpush protocol for publishing messages"
+ publisher = LegacyPublisher.new(wakuNode)
+ else:
+ info "Using lightpush v3 protocol for publishing messages"
+ publisher = V3Publisher.new(wakuNode)
# give some time to receiver side to set up
let waitTillStartTesting = conf.startPublishingAfter.seconds
@@ -257,6 +261,7 @@ proc setupAndPublish*(
# Start maintaining subscription
asyncSpawn publishMessages(
wakuNode,
+ publisher,
servicePeer,
conf.getPubsubTopic(),
conf.contentTopics[0],
diff --git a/apps/liteprotocoltester/publisher_base.nim b/apps/liteprotocoltester/publisher_base.nim
new file mode 100644
index 000000000..de88d82f8
--- /dev/null
+++ b/apps/liteprotocoltester/publisher_base.nim
@@ -0,0 +1,14 @@
+import chronos, results
+import waku/[waku_node, waku_core]
+
+type PublisherBase* = ref object of RootObj
+ wakuNode*: WakuNode
+
+method send*(
+ self: PublisherBase,
+ topic: PubsubTopic,
+ message: WakuMessage,
+ servicePeer: RemotePeerInfo,
+): Future[Result[void, string]] {.base, async.} =
+ discard
+ # when error it must return original error desc due the text is used for distinction between error types in metrics.
diff --git a/apps/liteprotocoltester/filter_subscriber.nim b/apps/liteprotocoltester/receiver.nim
similarity index 99%
rename from apps/liteprotocoltester/filter_subscriber.nim
rename to apps/liteprotocoltester/receiver.nim
index fbb11c92e..f0f41b1c5 100644
--- a/apps/liteprotocoltester/filter_subscriber.nim
+++ b/apps/liteprotocoltester/receiver.nim
@@ -116,7 +116,7 @@ proc maintainSubscription(
await sleepAsync(30.seconds) # Subscription maintenance interval
-proc setupAndSubscribe*(
+proc setupAndListen*(
wakuNode: WakuNode, conf: LiteProtocolTesterConf, servicePeer: RemotePeerInfo
) =
if isNil(wakuNode.wakuFilterClient):
diff --git a/apps/liteprotocoltester/run_tester_node.sh b/apps/liteprotocoltester/run_tester_node.sh
index 4a80ca460..3c2d60e2f 100755
--- a/apps/liteprotocoltester/run_tester_node.sh
+++ b/apps/liteprotocoltester/run_tester_node.sh
@@ -25,7 +25,12 @@ fi
FUNCTION=$2
if [ "${FUNCTION}" = "SENDER" ]; then
- FUNCTION=--test-func=SENDER
+ FUNCTION="--test-func=SENDER --lightpush-version=LEGACY"
+ SERVICENAME=lightpush-service
+fi
+
+if [ "${FUNCTION}" = "SENDERV3" ]; then
+ FUNCTION="--test-func=SENDER --lightpush-version=V3"
SERVICENAME=lightpush-service
fi
diff --git a/apps/liteprotocoltester/run_tester_node_at_infra.sh b/apps/liteprotocoltester/run_tester_node_at_infra.sh
index e926875aa..db26eb091 100644
--- a/apps/liteprotocoltester/run_tester_node_at_infra.sh
+++ b/apps/liteprotocoltester/run_tester_node_at_infra.sh
@@ -26,7 +26,15 @@ fi
FUNCTION=$2
if [ "${FUNCTION}" = "SENDER" ]; then
- FUNCTION=--test-func=SENDER
+ FUNCTION="--test-func=SENDER --lightpush-version=LEGACY"
+ SERIVCE_NODE_ADDR=${LIGHTPUSH_SERVICE_PEER:-${LIGHTPUSH_BOOTSTRAP:-}}
+ NODE_ARG=${LIGHTPUSH_SERVICE_PEER:+--service-node="${LIGHTPUSH_SERVICE_PEER}"}
+ NODE_ARG=${NODE_ARG:---bootstrap-node="${LIGHTPUSH_BOOTSTRAP}"}
+ METRICS_PORT=--metrics-port="${PUBLISHER_METRICS_PORT:-8003}"
+fi
+
+if [ "${FUNCTION}" = "SENDERV3" ]; then
+ FUNCTION="--test-func=SENDER --lightpush-version=V3"
SERIVCE_NODE_ADDR=${LIGHTPUSH_SERVICE_PEER:-${LIGHTPUSH_BOOTSTRAP:-}}
NODE_ARG=${LIGHTPUSH_SERVICE_PEER:+--service-node="${LIGHTPUSH_SERVICE_PEER}"}
NODE_ARG=${NODE_ARG:---bootstrap-node="${LIGHTPUSH_BOOTSTRAP}"}
diff --git a/apps/liteprotocoltester/run_tester_node_on_fleet.sh b/apps/liteprotocoltester/run_tester_node_on_fleet.sh
index 538a890e6..533f5b1bf 100644
--- a/apps/liteprotocoltester/run_tester_node_on_fleet.sh
+++ b/apps/liteprotocoltester/run_tester_node_on_fleet.sh
@@ -26,7 +26,15 @@ fi
FUNCTION=$2
if [ "${FUNCTION}" = "SENDER" ]; then
- FUNCTION=--test-func=SENDER
+ FUNCTION="--test-func=SENDER --lightpush-version=LEGACY"
+ SERIVCE_NODE_ADDR=${LIGHTPUSH_SERVICE_PEER:-${LIGHTPUSH_BOOTSTRAP:-}}
+ NODE_ARG=${LIGHTPUSH_SERVICE_PEER:+--service-node="${LIGHTPUSH_SERVICE_PEER}"}
+ NODE_ARG=${NODE_ARG:---bootstrap-node="${LIGHTPUSH_BOOTSTRAP}"}
+ METRICS_PORT=--metrics-port="${PUBLISHER_METRICS_PORT:-8003}"
+fi
+
+if [ "${FUNCTION}" = "SENDERV3" ]; then
+ FUNCTION="--test-func=SENDER --lightpush-version=V3"
SERIVCE_NODE_ADDR=${LIGHTPUSH_SERVICE_PEER:-${LIGHTPUSH_BOOTSTRAP:-}}
NODE_ARG=${LIGHTPUSH_SERVICE_PEER:+--service-node="${LIGHTPUSH_SERVICE_PEER}"}
NODE_ARG=${NODE_ARG:---bootstrap-node="${LIGHTPUSH_BOOTSTRAP}"}
diff --git a/apps/liteprotocoltester/service_peer_management.nim b/apps/liteprotocoltester/service_peer_management.nim
index a303c3c58..7d79e0f36 100644
--- a/apps/liteprotocoltester/service_peer_management.nim
+++ b/apps/liteprotocoltester/service_peer_management.nim
@@ -158,9 +158,7 @@ proc tryCallAllPxPeers*(
proc pxLookupServiceNode*(
node: WakuNode, conf: LiteProtocolTesterConf
): Future[Result[bool, void]] {.async.} =
- var codec: string = WakuLightPushCodec
- if conf.testFunc == TesterFunctionality.RECEIVER:
- codec = WakuFilterSubscribeCodec
+ let codec: string = conf.getCodec()
if node.wakuPeerExchange.isNil():
let peerExchangeNode = translateToRemotePeerInfo(conf.bootstrapNode).valueOr:
diff --git a/apps/liteprotocoltester/tester_config.nim b/apps/liteprotocoltester/tester_config.nim
index eccaafc06..c06a970b1 100644
--- a/apps/liteprotocoltester/tester_config.nim
+++ b/apps/liteprotocoltester/tester_config.nim
@@ -33,6 +33,10 @@ type TesterFunctionality* = enum
SENDER # pumps messages to the network
RECEIVER # gather and analyze messages from the network
+type LightpushVersion* = enum
+ LEGACY # legacy lightpush protocol
+ V3 # lightpush v3 protocol
+
type LiteProtocolTesterConf* = object
configFile* {.
desc:
@@ -80,6 +84,12 @@ type LiteProtocolTesterConf* = object
name: "test-func"
.}: TesterFunctionality
+ lightpushVersion* {.
+ desc: "Version of the sender to use. Supported values: legacy, v3.",
+ defaultValue: LightpushVersion.LEGACY,
+ name: "lightpush-version"
+ .}: LightpushVersion
+
numMessages* {.
desc: "Number of messages to send.", defaultValue: 120, name: "num-messages"
.}: uint32
@@ -190,4 +200,14 @@ proc load*(T: type LiteProtocolTesterConf, version = ""): ConfResult[T] =
proc getPubsubTopic*(conf: LiteProtocolTesterConf): PubsubTopic =
return $RelayShard(clusterId: conf.clusterId, shardId: conf.shard)
+proc getCodec*(conf: LiteProtocolTesterConf): string =
+ return
+ if conf.testFunc == TesterFunctionality.RECEIVER:
+ WakuFilterSubscribeCodec
+ else:
+ if conf.lightpushVersion == LightpushVersion.LEGACY:
+ WakuLegacyLightPushCodec
+ else:
+ WakuLightPushCodec
+
{.pop.}
diff --git a/apps/liteprotocoltester/v3_publisher.nim b/apps/liteprotocoltester/v3_publisher.nim
new file mode 100644
index 000000000..74a3fdd05
--- /dev/null
+++ b/apps/liteprotocoltester/v3_publisher.nim
@@ -0,0 +1,29 @@
+import results, options, chronos
+import waku/[waku_node, waku_core, waku_lightpush]
+import publisher_base
+
+type V3Publisher* = ref object of PublisherBase
+
+proc new*(T: type V3Publisher, wakuNode: WakuNode): T =
+ if isNil(wakuNode.wakuLightpushClient):
+ wakuNode.mountLightPushClient()
+
+ return V3Publisher(wakuNode: wakuNode)
+
+method send*(
+ self: V3Publisher,
+ topic: PubsubTopic,
+ message: WakuMessage,
+ servicePeer: RemotePeerInfo,
+): Future[Result[void, string]] {.async.} =
+ # when error it must return original error desc due the text is used for distinction between error types in metrics.
+ discard (
+ await self.wakuNode.lightpushPublish(some(topic), message, some(servicePeer))
+ ).valueOr:
+ if error.code == NO_PEERS_TO_RELAY and
+ error.desc != some("No peers for topic, skipping publish"):
+ # TODO: We need better separation of errors happening on the client side or the server side.-
+ return err("dial_failure")
+ else:
+ return err($error.code)
+ return ok()
From 478925a389f6002d83da855b44362298451a616f Mon Sep 17 00:00:00 2001
From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com>
Date: Mon, 16 Jun 2025 18:44:21 +0200
Subject: [PATCH 22/47] chore: refactor to unify online and health monitors
(#3456)
---
.../liteprotocoltester/liteprotocoltester.nim | 137 +++-----
apps/wakunode2/wakunode2.nim | 53 +---
library/libwaku.nim | 4 +-
.../requests/debug_node_request.nim | 8 +-
.../requests/node_lifecycle_request.nim | 5 +-
.../requests/peer_manager_request.nim | 3 -
tests/wakunode_rest/test_rest_health.nim | 4 +-
waku/factory/builder.nim | 1 -
waku/factory/node_factory.nim | 2 +-
waku/factory/waku.nim | 87 ++++--
waku/node/health_monitor.nim | 295 +-----------------
waku/node/health_monitor/health_status.nim | 16 +
.../health_monitor/node_health_monitor.nim | 270 ++++++++++++++++
waku/node/health_monitor/online_monitor.nim | 77 +++++
waku/node/health_monitor/protocol_health.nim | 46 +++
waku/node/peer_manager/peer_manager.nim | 48 +--
waku/waku_api/rest/builder.nim | 2 +-
waku/waku_api/rest/health/handlers.nim | 2 +-
waku/waku_api/rest/health/types.nim | 21 +-
19 files changed, 559 insertions(+), 522 deletions(-)
create mode 100644 waku/node/health_monitor/health_status.nim
create mode 100644 waku/node/health_monitor/node_health_monitor.nim
create mode 100644 waku/node/health_monitor/online_monitor.nim
create mode 100644 waku/node/health_monitor/protocol_health.nim
diff --git a/apps/liteprotocoltester/liteprotocoltester.nim b/apps/liteprotocoltester/liteprotocoltester.nim
index 598d1a7ec..939332cff 100644
--- a/apps/liteprotocoltester/liteprotocoltester.nim
+++ b/apps/liteprotocoltester/liteprotocoltester.nim
@@ -14,13 +14,11 @@ import
waku/[
common/enr,
common/logging,
- factory/waku,
+ factory/waku as waku_factory,
factory/external_config,
waku_node,
- node/health_monitor,
node/waku_metrics,
node/peer_manager,
- waku_api/rest/builder as rest_server_builder,
waku_lightpush/common,
waku_filter_v2,
waku_peer_exchange/protocol,
@@ -49,7 +47,7 @@ when isMainModule:
## 5. Start monitoring tools and external interfaces
## 6. Setup graceful shutdown hooks
- const versionString = "version / git commit hash: " & waku.git_version
+ const versionString = "version / git commit hash: " & waku_factory.git_version
let confRes = LiteProtocolTesterConf.load(version = versionString)
if confRes.isErr():
@@ -61,7 +59,7 @@ when isMainModule:
## Logging setup
logging.setupLog(conf.logLevel, conf.logFormat)
- info "Running Lite Protocol Tester node", version = waku.git_version
+ info "Running Lite Protocol Tester node", version = waku_factory.git_version
logConfig(conf)
##Prepare Waku configuration
@@ -69,13 +67,13 @@ when isMainModule:
## - override according to tester functionality
##
- var wConf: WakuNodeConf
+ var wakuNodeConf: WakuNodeConf
if conf.configFile.isSome():
try:
var configFile {.threadvar.}: InputFile
configFile = conf.configFile.get()
- wConf = WakuNodeConf.load(
+ wakuNodeConf = WakuNodeConf.load(
version = versionString,
printUsage = false,
secondarySources = proc(
@@ -88,101 +86,54 @@ when isMainModule:
error "Loading Waku configuration failed", error = getCurrentExceptionMsg()
quit(QuitFailure)
- wConf.logLevel = conf.logLevel
- wConf.logFormat = conf.logFormat
- wConf.nat = conf.nat
- wConf.maxConnections = 500
- wConf.restAddress = conf.restAddress
- wConf.restPort = conf.restPort
- wConf.restAllowOrigin = conf.restAllowOrigin
+ wakuNodeConf.logLevel = conf.logLevel
+ wakuNodeConf.logFormat = conf.logFormat
+ wakuNodeConf.nat = conf.nat
+ wakuNodeConf.maxConnections = 500
+ wakuNodeConf.restAddress = conf.restAddress
+ wakuNodeConf.restPort = conf.restPort
+ wakuNodeConf.restAllowOrigin = conf.restAllowOrigin
- wConf.dnsAddrsNameServers = @[parseIpAddress("8.8.8.8"), parseIpAddress("1.1.1.1")]
+ wakuNodeConf.dnsAddrsNameServers =
+ @[parseIpAddress("8.8.8.8"), parseIpAddress("1.1.1.1")]
- wConf.shards = @[conf.shard]
- wConf.contentTopics = conf.contentTopics
- wConf.clusterId = conf.clusterId
+ wakuNodeConf.shards = @[conf.shard]
+ wakuNodeConf.contentTopics = conf.contentTopics
+ wakuNodeConf.clusterId = conf.clusterId
## TODO: Depending on the tester needs we might extend here with shards, clusterId, etc...
- wConf.metricsServer = true
- wConf.metricsServerAddress = parseIpAddress("0.0.0.0")
- wConf.metricsServerPort = conf.metricsPort
+ wakuNodeConf.metricsServer = true
+ wakuNodeConf.metricsServerAddress = parseIpAddress("0.0.0.0")
+ wakuNodeConf.metricsServerPort = conf.metricsPort
# If bootstrap option is chosen we expect our clients will not mounted
# so we will mount PeerExchange manually to gather possible service peers,
# if got some we will mount the client protocols afterward.
- wConf.peerExchange = false
- wConf.relay = false
- wConf.filter = false
- wConf.lightpush = false
- wConf.store = false
+ wakuNodeConf.peerExchange = false
+ wakuNodeConf.relay = false
+ wakuNodeConf.filter = false
+ wakuNodeConf.lightpush = false
+ wakuNodeConf.store = false
- wConf.rest = false
- wConf.relayServiceRatio = "40:60"
+ wakuNodeConf.rest = false
+ wakuNodeConf.relayServiceRatio = "40:60"
- # NOTE: {.threadvar.} is used to make the global variable GC safe for the closure uses it
- # It will always be called from main thread anyway.
- # Ref: https://nim-lang.org/docs/manual.html#threads-gc-safety
- var nodeHealthMonitor {.threadvar.}: WakuNodeHealthMonitor
- nodeHealthMonitor = WakuNodeHealthMonitor()
- nodeHealthMonitor.setOverallHealth(HealthStatus.INITIALIZING)
-
- let wakuConf = wConf.toWakuConf().valueOr:
- error "Waku configuration failed", error = error
+ let wakuConf = wakuNodeConf.toWakuConf().valueOr:
+ error "Issue converting toWakuConf", error = $error
quit(QuitFailure)
- let restServer: WakuRestServerRef =
- if wakuConf.restServerConf.isSome():
- rest_server_builder.startRestServerEssentials(
- nodeHealthMonitor, wakuConf.restServerConf.get(), wakuConf.portsShift
- ).valueOr:
- error "Starting essential REST server failed.", error = $error
- quit(QuitFailure)
- else:
- nil
-
- var wakuApp = Waku.new(wakuConf).valueOr:
+ var waku = Waku.new(wakuConf).valueOr:
error "Waku initialization failed", error = error
quit(QuitFailure)
- wakuApp.restServer = restServer
-
- nodeHealthMonitor.setNode(wakuApp.node)
-
- (waitFor startWaku(addr wakuApp)).isOkOr:
+ (waitFor startWaku(addr waku)).isOkOr:
error "Starting waku failed", error = error
quit(QuitFailure)
- if wakuConf.restServerConf.isSome():
- rest_server_builder.startRestServerProtocolSupport(
- restServer,
- wakuApp.node,
- wakuApp.wakuDiscv5,
- wakuConf.restServerConf.get(),
- wakuConf.relay,
- wakuConf.lightPush,
- wakuConf.clusterId,
- wakuConf.shards,
- wakuConf.contentTopics,
- ).isOkOr:
- error "Starting protocols support REST server failed.", error = $error
- quit(QuitFailure)
-
- if wakuConf.metricsServerConf.isSome():
- wakuApp.metricsServer = waku_metrics.startMetricsServerAndLogging(
- wakuConf.metricsServerConf.get(), wakuConf.portsShift
- ).valueOr:
- error "Starting monitoring and external interfaces failed", error = error
- quit(QuitFailure)
-
- nodeHealthMonitor.setOverallHealth(HealthStatus.READY)
-
debug "Setting up shutdown hooks"
- ## Setup shutdown hooks for this process.
- ## Stop node gracefully on shutdown.
- proc asyncStopper(wakuApp: Waku) {.async: (raises: [Exception]).} =
- nodeHealthMonitor.setOverallHealth(HealthStatus.SHUTTING_DOWN)
- await wakuApp.stop()
+ proc asyncStopper(waku: Waku) {.async: (raises: [Exception]).} =
+ await waku.stop()
quit(QuitSuccess)
# Handle Ctrl-C SIGINT
@@ -191,7 +142,7 @@ when isMainModule:
# workaround for https://github.com/nim-lang/Nim/issues/4057
setupForeignThreadGc()
notice "Shutting down after receiving SIGINT"
- asyncSpawn asyncStopper(wakuApp)
+ asyncSpawn asyncStopper(waku)
setControlCHook(handleCtrlC)
@@ -199,7 +150,7 @@ when isMainModule:
when defined(posix):
proc handleSigterm(signal: cint) {.noconv.} =
notice "Shutting down after receiving SIGTERM"
- asyncSpawn asyncStopper(wakuApp)
+ asyncSpawn asyncStopper(waku)
c_signal(ansi_c.SIGTERM, handleSigterm)
@@ -212,22 +163,26 @@ when isMainModule:
# Not available in -d:release mode
writeStackTrace()
- waitFor wakuApp.stop()
+ waitFor waku.stop()
quit(QuitFailure)
c_signal(ansi_c.SIGSEGV, handleSigsegv)
info "Node setup complete"
- let codec = conf.getCodec()
+ var codec = WakuLightPushCodec
# mounting relevant client, for PX filter client must be mounted ahead
+ if conf.testFunc == TesterFunctionality.SENDER:
+ codec = WakuLightPushCodec
+ else:
+ codec = WakuFilterSubscribeCodec
var lookForServiceNode = false
var serviceNodePeerInfo: RemotePeerInfo
if conf.serviceNode.len == 0:
if conf.bootstrapNode.len > 0:
info "Bootstrapping with PeerExchange to gather random service node"
- let futForServiceNode = pxLookupServiceNode(wakuApp.node, conf)
+ let futForServiceNode = pxLookupServiceNode(waku.node, conf)
if not (waitFor futForServiceNode.withTimeout(20.minutes)):
error "Service node not found in time via PX"
quit(QuitFailure)
@@ -237,7 +192,7 @@ when isMainModule:
quit(QuitFailure)
serviceNodePeerInfo = selectRandomServicePeer(
- wakuApp.node.peerManager, none(RemotePeerInfo), codec
+ waku.node.peerManager, none(RemotePeerInfo), codec
).valueOr:
error "Service node selection failed"
quit(QuitFailure)
@@ -252,11 +207,11 @@ when isMainModule:
info "Service node to be used", serviceNode = $serviceNodePeerInfo
- logSelfPeers(wakuApp.node.peerManager)
+ logSelfPeers(waku.node.peerManager)
if conf.testFunc == TesterFunctionality.SENDER:
- setupAndPublish(wakuApp.node, conf, serviceNodePeerInfo)
+ setupAndPublish(waku.node, conf, serviceNodePeerInfo)
else:
- setupAndListen(wakuApp.node, conf, serviceNodePeerInfo)
+ setupAndListen(waku.node, conf, serviceNodePeerInfo)
runForever()
diff --git a/apps/wakunode2/wakunode2.nim b/apps/wakunode2/wakunode2.nim
index a99cfcb52..5e6cbb700 100644
--- a/apps/wakunode2/wakunode2.nim
+++ b/apps/wakunode2/wakunode2.nim
@@ -16,7 +16,6 @@ import
factory/external_config,
factory/waku,
node/health_monitor,
- node/waku_metrics,
waku_api/rest/builder as rest_server_builder,
]
@@ -53,69 +52,21 @@ when isMainModule:
let conf = wakuNodeConf.toInspectRlnDbConf()
doInspectRlnDb(conf)
of noCommand:
- # NOTE: {.threadvar.} is used to make the global variable GC safe for the closure uses it
- # It will always be called from main thread anyway.
- # Ref: https://nim-lang.org/docs/manual.html#threads-gc-safety
- var nodeHealthMonitor {.threadvar.}: WakuNodeHealthMonitor
- nodeHealthMonitor = WakuNodeHealthMonitor()
- nodeHealthMonitor.setOverallHealth(HealthStatus.INITIALIZING)
-
let conf = wakuNodeConf.toWakuConf().valueOr:
error "Waku configuration failed", error = error
quit(QuitFailure)
- var restServer: WakuRestServerRef = nil
-
- if conf.restServerConf.isSome():
- restServer = rest_server_builder.startRestServerEssentials(
- nodeHealthMonitor, conf.restServerConf.get(), conf.portsShift
- ).valueOr:
- error "Starting essential REST server failed.", error = $error
- quit(QuitFailure)
-
var waku = Waku.new(conf).valueOr:
error "Waku initialization failed", error = error
quit(QuitFailure)
- waku.restServer = restServer
-
- nodeHealthMonitor.setNode(waku.node)
-
(waitFor startWaku(addr waku)).isOkOr:
error "Starting waku failed", error = error
quit(QuitFailure)
- if conf.restServerConf.isSome():
- rest_server_builder.startRestServerProtocolSupport(
- restServer,
- waku.node,
- waku.wakuDiscv5,
- conf.restServerConf.get(),
- conf.relay,
- conf.lightPush,
- conf.clusterId,
- conf.shards,
- conf.contentTopics,
- ).isOkOr:
- error "Starting protocols support REST server failed.", error = $error
- quit(QuitFailure)
-
- if conf.metricsServerConf.isSome():
- waku.metricsServer = waku_metrics.startMetricsServerAndLogging(
- conf.metricsServerConf.get(), conf.portsShift
- ).valueOr:
- error "Starting monitoring and external interfaces failed", error = error
- quit(QuitFailure)
-
- nodeHealthMonitor.setOverallHealth(HealthStatus.READY)
-
debug "Setting up shutdown hooks"
- ## Setup shutdown hooks for this process.
- ## Stop node gracefully on shutdown.
-
- proc asyncStopper(node: Waku) {.async: (raises: [Exception]).} =
- nodeHealthMonitor.setOverallHealth(HealthStatus.SHUTTING_DOWN)
- await node.stop()
+ proc asyncStopper(waku: Waku) {.async: (raises: [Exception]).} =
+ await waku.stop()
quit(QuitSuccess)
# Handle Ctrl-C SIGINT
diff --git a/library/libwaku.nim b/library/libwaku.nim
index 3774ad0a8..3e4431411 100644
--- a/library/libwaku.nim
+++ b/library/libwaku.nim
@@ -878,8 +878,8 @@ proc waku_is_online(
handleRequest(
ctx,
- RequestType.PEER_MANAGER,
- PeerManagementRequest.createShared(PeerManagementMsgType.IS_ONLINE),
+ RequestType.DEBUG,
+ DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_ONLINE_STATE),
callback,
userData,
)
diff --git a/library/waku_thread/inter_thread_communication/requests/debug_node_request.nim b/library/waku_thread/inter_thread_communication/requests/debug_node_request.nim
index dc0bc72f5..4ab8914ee 100644
--- a/library/waku_thread/inter_thread_communication/requests/debug_node_request.nim
+++ b/library/waku_thread/inter_thread_communication/requests/debug_node_request.nim
@@ -7,13 +7,17 @@ import
strutils,
libp2p/peerid,
metrics
-import ../../../../waku/factory/waku, ../../../../waku/node/waku_node
+import
+ ../../../../waku/factory/waku,
+ ../../../../waku/node/waku_node,
+ ../../../../waku/node/health_monitor
type DebugNodeMsgType* = enum
RETRIEVE_LISTENING_ADDRESSES
RETRIEVE_MY_ENR
RETRIEVE_MY_PEER_ID
RETRIEVE_METRICS
+ RETRIEVE_ONLINE_STATE
type DebugNodeRequest* = object
operation: DebugNodeMsgType
@@ -49,6 +53,8 @@ proc process*(
return ok($waku.node.peerId())
of RETRIEVE_METRICS:
return ok(getMetrics())
+ of RETRIEVE_ONLINE_STATE:
+ return ok($waku.healthMonitor.onlineMonitor.amIOnline())
error "unsupported operation in DebugNodeRequest"
return err("unsupported operation in DebugNodeRequest")
diff --git a/library/waku_thread/inter_thread_communication/requests/node_lifecycle_request.nim b/library/waku_thread/inter_thread_communication/requests/node_lifecycle_request.nim
index 8d504df89..0f912aaa3 100644
--- a/library/waku_thread/inter_thread_communication/requests/node_lifecycle_request.nim
+++ b/library/waku_thread/inter_thread_communication/requests/node_lifecycle_request.nim
@@ -8,6 +8,7 @@ import
../../../../waku/factory/node_factory,
../../../../waku/factory/networks_config,
../../../../waku/factory/app_callbacks,
+ ../../../../waku/waku_api/rest/builder,
../../../alloc
type NodeLifecycleMsgType* = enum
@@ -73,9 +74,11 @@ proc createWaku(
appCallbacks.topicHealthChangeHandler = nil
# TODO: Convert `confJson` directly to `WakuConf`
- let wakuConf = conf.toWakuConf().valueOr:
+ var wakuConf = conf.toWakuConf().valueOr:
return err("Configuration error: " & $error)
+ wakuConf.restServerConf = none(RestServerConf) ## don't want REST in libwaku
+
let wakuRes = Waku.new(wakuConf, appCallbacks).valueOr:
error "waku initialization failed", error = error
return err("Failed setting up Waku: " & $error)
diff --git a/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim b/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim
index 55728780f..deb520366 100644
--- a/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim
+++ b/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim
@@ -16,7 +16,6 @@ type PeerManagementMsgType* {.pure.} = enum
DIAL_PEER
DIAL_PEER_BY_ID
GET_CONNECTED_PEERS
- IS_ONLINE
type PeerManagementRequest* = object
operation: PeerManagementMsgType
@@ -156,7 +155,5 @@ proc process*(
(inPeerIds, outPeerIds) = waku.node.peerManager.connectedPeers()
connectedPeerids = concat(inPeerIds, outPeerIds)
return ok(connectedPeerids.mapIt($it).join(","))
- of IS_ONLINE:
- return ok($waku.node.peerManager.isOnline())
return ok("")
diff --git a/tests/wakunode_rest/test_rest_health.nim b/tests/wakunode_rest/test_rest_health.nim
index 93838b4fe..964e09c5b 100644
--- a/tests/wakunode_rest/test_rest_health.nim
+++ b/tests/wakunode_rest/test_rest_health.nim
@@ -39,7 +39,7 @@ suite "Waku v2 REST API - health":
asyncTest "Get node health info - GET /health":
# Given
let node = testWakuNode()
- let healthMonitor = WakuNodeHealthMonitor()
+ let healthMonitor = NodeHealthMonitor()
await node.start()
(await node.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
@@ -78,7 +78,7 @@ suite "Waku v2 REST API - health":
node.mountLightPushClient()
await node.mountFilterClient()
- healthMonitor.setNode(node)
+ healthMonitor.setNodeToHealthMonitor(node)
healthMonitor.setOverallHealth(HealthStatus.READY)
# When
response = await client.healthCheck()
diff --git a/waku/factory/builder.nim b/waku/factory/builder.nim
index 18f1535ea..b05d5d054 100644
--- a/waku/factory/builder.nim
+++ b/waku/factory/builder.nim
@@ -209,7 +209,6 @@ proc build*(builder: WakuNodeBuilder): Result[WakuNode, string] =
maxServicePeers = some(builder.maxServicePeers),
colocationLimit = builder.colocationLimit,
shardedPeerManagement = builder.shardAware,
- dnsNameServers = netConfig.dnsNameServers,
)
var node: WakuNode
diff --git a/waku/factory/node_factory.nim b/waku/factory/node_factory.nim
index 7063f8476..2c363c6c4 100644
--- a/waku/factory/node_factory.nim
+++ b/waku/factory/node_factory.nim
@@ -426,7 +426,7 @@ proc startNode*(
## Connect to static nodes and start
## keep-alive, if configured.
- # Start Waku v2 node
+ info "Running nwaku node", version = git_version
try:
await node.start()
except CatchableError:
diff --git a/waku/factory/waku.nim b/waku/factory/waku.nim
index 006093648..2602120d8 100644
--- a/waku/factory/waku.nim
+++ b/waku/factory/waku.nim
@@ -26,9 +26,11 @@ import
../waku_node,
../node/peer_manager,
../node/health_monitor,
+ ../node/waku_metrics,
../node/delivery_monitor/delivery_monitor,
../waku_api/message_cache,
../waku_api/rest/server,
+ ../waku_api/rest/builder as rest_server_builder,
../waku_archive,
../waku_relay/protocol,
../discovery/waku_dnsdisc,
@@ -66,6 +68,8 @@ type Waku* = ref object
node*: WakuNode
+ healthMonitor*: NodeHealthMonitor
+
deliveryMonitor: DeliveryMonitor
restServer*: WakuRestServerRef
@@ -159,19 +163,33 @@ proc new*(
logging.setupLog(wakuConf.logLevel, wakuConf.logFormat)
?wakuConf.validate()
-
wakuConf.logConf()
- info "Running nwaku node", version = git_version
+ let healthMonitor = NodeHealthMonitor.new(wakuConf.dnsAddrsNameServers)
+
+ let restServer: WakuRestServerRef =
+ if wakuConf.restServerConf.isSome():
+ let restServer = startRestServerEssentials(
+ healthMonitor, wakuConf.restServerConf.get(), wakuConf.portsShift
+ ).valueOr:
+ error "Starting essential REST server failed", error = $error
+ return err("Failed to start essential REST server in Waku.new: " & $error)
+
+ restServer
+ else:
+ nil
var relay = newCircuitRelay(wakuConf.circuitRelayClient)
- let nodeRes = setupNode(wakuConf, rng, relay)
- if nodeRes.isErr():
- error "Failed setting up node", error = nodeRes.error
- return err("Failed setting up node: " & nodeRes.error)
+ let node = setupNode(wakuConf, rng, relay).valueOr:
+ error "Failed setting up node", error = $error
+ return err("Failed setting up node: " & $error)
- let node = nodeRes.get()
+ healthMonitor.setNodeToHealthMonitor(node)
+ healthMonitor.onlineMonitor.setPeerStoreToOnlineMonitor(node.switch.peerStore)
+ healthMonitor.onlineMonitor.addOnlineStateObserver(
+ node.peerManager.getOnlineStateObserver()
+ )
node.setupAppCallbacks(wakuConf, appCallbacks).isOkOr:
error "Failed setting up app callbacks", error = error
@@ -197,8 +215,10 @@ proc new*(
rng: rng,
key: wakuConf.nodeKey,
node: node,
+ healthMonitor: healthMonitor,
deliveryMonitor: deliveryMonitor,
appCallbacks: appCallbacks,
+ restServer: restServer,
)
waku.setupSwitchServices(wakuConf, relay, rng)
@@ -334,15 +354,6 @@ proc startDnsDiscoveryRetryLoop(waku: ptr Waku): Future[void] {.async.} =
error "failed to connect to dynamic bootstrap nodes: " & getCurrentExceptionMsg()
return
-# The network connectivity loop checks periodically whether the node is online or not
-# and triggers any change that depends on the network connectivity state
-proc startNetworkConnectivityLoop(waku: Waku): Future[void] {.async.} =
- while true:
- await sleepAsync(15.seconds)
-
- # Update online state
- await waku.node.peerManager.updateOnlineState()
-
proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} =
debug "Retrieve dynamic bootstrap nodes"
let conf = waku[].conf
@@ -369,7 +380,7 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} =
return err("Error in updateApp: " & $error)
## Discv5
- if conf.discv5Conf.isSome:
+ if conf.discv5Conf.isSome():
waku[].wakuDiscV5 = waku_discv5.setupDiscoveryV5(
waku.node.enr,
waku.node.peerManager,
@@ -389,23 +400,41 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} =
if not waku[].deliveryMonitor.isNil():
waku[].deliveryMonitor.startDeliveryMonitor()
- # Start network connectivity check loop
- waku[].networkConnLoopHandle = waku[].startNetworkConnectivityLoop()
+ ## Health Monitor
+ waku[].healthMonitor.startHealthMonitor()
+
+ if conf.restServerConf.isSome():
+ rest_server_builder.startRestServerProtocolSupport(
+ waku[].restServer,
+ waku[].node,
+ waku[].wakuDiscv5,
+ conf.restServerConf.get(),
+ conf.relay,
+ conf.lightPush,
+ conf.clusterId,
+ conf.shards,
+ conf.contentTopics,
+ ).isOkOr:
+ return err ("Starting protocols support REST server failed: " & $error)
+
+ if conf.metricsServerConf.isSome():
+ waku[].metricsServer = waku_metrics.startMetricsServerAndLogging(
+ conf.metricsServerConf.get(), conf.portsShift
+ ).valueOr:
+ return err("Starting monitoring and external interfaces failed: " & error)
+
+ waku[].healthMonitor.setOverallHealth(HealthStatus.READY)
return ok()
-# Waku shutdown
-
proc stop*(waku: Waku): Future[void] {.async: (raises: [Exception]).} =
- if not waku.restServer.isNil():
- await waku.restServer.stop()
+ ## Waku shutdown
+
+ waku.healthMonitor.setOverallHealth(HealthStatus.SHUTTING_DOWN)
if not waku.metricsServer.isNil():
await waku.metricsServer.stop()
- if not waku.networkConnLoopHandle.isNil():
- await waku.networkConnLoopHandle.cancelAndWait()
-
if not waku.wakuDiscv5.isNil():
await waku.wakuDiscv5.stop()
@@ -414,3 +443,9 @@ proc stop*(waku: Waku): Future[void] {.async: (raises: [Exception]).} =
if not waku.dnsRetryLoopHandle.isNil():
await waku.dnsRetryLoopHandle.cancelAndWait()
+
+ if not waku.healthMonitor.isNil():
+ await waku.healthMonitor.stopHealthMonitor()
+
+ if not waku.restServer.isNil():
+ await waku.restServer.stop()
diff --git a/waku/node/health_monitor.nim b/waku/node/health_monitor.nim
index aa9082ec6..854a8bbc0 100644
--- a/waku/node/health_monitor.nim
+++ b/waku/node/health_monitor.nim
@@ -1,293 +1,4 @@
-{.push raises: [].}
+import
+ health_monitor/[node_health_monitor, protocol_health, online_monitor, health_status]
-import std/[options, sets], chronos, libp2p/protocols/rendezvous
-
-import waku_node, ../waku_rln_relay, ../waku_relay, ./peer_manager
-
-type
- HealthStatus* = enum
- INITIALIZING
- SYNCHRONIZING
- READY
- NOT_READY
- NOT_MOUNTED
- SHUTTING_DOWN
-
- ProtocolHealth* = object
- protocol*: string
- health*: HealthStatus
- desc*: Option[string] ## describes why a certain protocol is considered `NOT_READY`
-
- HealthReport* = object
- nodeHealth*: HealthStatus
- protocolsHealth*: seq[ProtocolHealth]
-
- WakuNodeHealthMonitor* = ref object
- nodeHealth: HealthStatus
- node: Option[WakuNode]
-
-proc `$`*(t: HealthStatus): string =
- result =
- case t
- of INITIALIZING: "Initializing"
- of SYNCHRONIZING: "Synchronizing"
- of READY: "Ready"
- of NOT_READY: "Not Ready"
- of NOT_MOUNTED: "Not Mounted"
- of SHUTTING_DOWN: "Shutting Down"
-
-proc init*(
- t: typedesc[HealthStatus], strRep: string
-): HealthStatus {.raises: [ValueError].} =
- case strRep
- of "Initializing":
- return HealthStatus.INITIALIZING
- of "Synchronizing":
- return HealthStatus.SYNCHRONIZING
- of "Ready":
- return HealthStatus.READY
- of "Not Ready":
- return HealthStatus.NOT_READY
- of "Not Mounted":
- return HealthStatus.NOT_MOUNTED
- of "Shutting Down":
- return HealthStatus.SHUTTING_DOWN
- else:
- raise newException(ValueError, "Invalid HealthStatus string representation")
-
-proc init*(p: typedesc[ProtocolHealth], protocol: string): ProtocolHealth =
- let p = ProtocolHealth(
- protocol: protocol, health: HealthStatus.NOT_MOUNTED, desc: none[string]()
- )
- return p
-
-proc notReady(p: var ProtocolHealth, desc: string): ProtocolHealth =
- p.health = HealthStatus.NOT_READY
- p.desc = some(desc)
- return p
-
-proc ready(p: var ProtocolHealth): ProtocolHealth =
- p.health = HealthStatus.READY
- p.desc = none[string]()
- return p
-
-proc notMounted(p: var ProtocolHealth): ProtocolHealth =
- p.health = HealthStatus.NOT_MOUNTED
- p.desc = none[string]()
- return p
-
-proc synchronizing(p: var ProtocolHealth): ProtocolHealth =
- p.health = HealthStatus.SYNCHRONIZING
- p.desc = none[string]()
- return p
-
-proc initializing(p: var ProtocolHealth): ProtocolHealth =
- p.health = HealthStatus.INITIALIZING
- p.desc = none[string]()
- return p
-
-proc shuttingDown(p: var ProtocolHealth): ProtocolHealth =
- p.health = HealthStatus.SHUTTING_DOWN
- p.desc = none[string]()
- return p
-
-const FutIsReadyTimout = 5.seconds
-
-proc getRelayHealth(hm: WakuNodeHealthMonitor): ProtocolHealth =
- var p = ProtocolHealth.init("Relay")
- if hm.node.get().wakuRelay == nil:
- return p.notMounted()
-
- let relayPeers = hm.node
- .get().wakuRelay
- .getConnectedPubSubPeers(pubsubTopic = "").valueOr:
- return p.notMounted()
-
- if relayPeers.len() == 0:
- return p.notReady("No connected peers")
-
- return p.ready()
-
-proc getRlnRelayHealth(hm: WakuNodeHealthMonitor): Future[ProtocolHealth] {.async.} =
- var p = ProtocolHealth.init("Rln Relay")
- if hm.node.get().wakuRlnRelay.isNil():
- return p.notMounted()
-
- let isReadyStateFut = hm.node.get().wakuRlnRelay.isReady()
- if not await isReadyStateFut.withTimeout(FutIsReadyTimout):
- return p.notReady("Ready state check timed out")
-
- try:
- if not isReadyStateFut.completed():
- return p.notReady("Ready state check timed out")
- elif isReadyStateFut.read():
- return p.ready()
-
- return p.synchronizing()
- except:
- error "exception reading state: " & getCurrentExceptionMsg()
- return p.notReady("State cannot be determined")
-
-proc getLightpushHealth(
- hm: WakuNodeHealthMonitor, relayHealth: HealthStatus
-): ProtocolHealth =
- var p = ProtocolHealth.init("Lightpush")
- if hm.node.get().wakuLightPush == nil:
- return p.notMounted()
-
- if relayHealth == HealthStatus.READY:
- return p.ready()
-
- return p.notReady("Node has no relay peers to fullfill push requests")
-
-proc getLightpushClientHealth(
- hm: WakuNodeHealthMonitor, relayHealth: HealthStatus
-): ProtocolHealth =
- var p = ProtocolHealth.init("Lightpush Client")
- if hm.node.get().wakuLightpushClient == nil:
- return p.notMounted()
-
- let selfServiceAvailable =
- hm.node.get().wakuLightPush != nil and relayHealth == HealthStatus.READY
- let servicePeerAvailable =
- hm.node.get().peerManager.selectPeer(WakuLightPushCodec).isSome()
-
- if selfServiceAvailable or servicePeerAvailable:
- return p.ready()
-
- return p.notReady("No Lightpush service peer available yet")
-
-proc getLegacyLightpushHealth(
- hm: WakuNodeHealthMonitor, relayHealth: HealthStatus
-): ProtocolHealth =
- var p = ProtocolHealth.init("Legacy Lightpush")
- if hm.node.get().wakuLegacyLightPush == nil:
- return p.notMounted()
-
- if relayHealth == HealthStatus.READY:
- return p.ready()
-
- return p.notReady("Node has no relay peers to fullfill push requests")
-
-proc getLegacyLightpushClientHealth(
- hm: WakuNodeHealthMonitor, relayHealth: HealthStatus
-): ProtocolHealth =
- var p = ProtocolHealth.init("Legacy Lightpush Client")
- if hm.node.get().wakuLegacyLightpushClient == nil:
- return p.notMounted()
-
- if (hm.node.get().wakuLegacyLightPush != nil and relayHealth == HealthStatus.READY) or
- hm.node.get().peerManager.selectPeer(WakuLegacyLightPushCodec).isSome():
- return p.ready()
-
- return p.notReady("No Lightpush service peer available yet")
-
-proc getFilterHealth(
- hm: WakuNodeHealthMonitor, relayHealth: HealthStatus
-): ProtocolHealth =
- var p = ProtocolHealth.init("Filter")
- if hm.node.get().wakuFilter == nil:
- return p.notMounted()
-
- if relayHealth == HealthStatus.READY:
- return p.ready()
-
- return p.notReady("Relay is not ready, filter will not be able to sort out messages")
-
-proc getFilterClientHealth(
- hm: WakuNodeHealthMonitor, relayHealth: HealthStatus
-): ProtocolHealth =
- var p = ProtocolHealth.init("Filter Client")
- if hm.node.get().wakuFilterClient == nil:
- return p.notMounted()
-
- if hm.node.get().peerManager.selectPeer(WakuFilterSubscribeCodec).isSome():
- return p.ready()
-
- return p.notReady("No Filter service peer available yet")
-
-proc getStoreHealth(hm: WakuNodeHealthMonitor): ProtocolHealth =
- var p = ProtocolHealth.init("Store")
- if hm.node.get().wakuStore == nil:
- return p.notMounted()
-
- return p.ready()
-
-proc getStoreClientHealth(hm: WakuNodeHealthMonitor): ProtocolHealth =
- var p = ProtocolHealth.init("Store Client")
- if hm.node.get().wakuStoreClient == nil:
- return p.notMounted()
-
- if hm.node.get().peerManager.selectPeer(WakuStoreCodec).isSome() or
- hm.node.get().wakuStore != nil:
- return p.ready()
-
- return p.notReady(
- "No Store service peer available yet, neither Store service set up for the node"
- )
-
-proc getLegacyStoreHealth(hm: WakuNodeHealthMonitor): ProtocolHealth =
- var p = ProtocolHealth.init("Legacy Store")
- if hm.node.get().wakuLegacyStore == nil:
- return p.notMounted()
-
- return p.ready()
-
-proc getLegacyStoreClientHealth(hm: WakuNodeHealthMonitor): ProtocolHealth =
- var p = ProtocolHealth.init("Legacy Store Client")
- if hm.node.get().wakuLegacyStoreClient == nil:
- return p.notMounted()
-
- if hm.node.get().peerManager.selectPeer(WakuLegacyStoreCodec).isSome() or
- hm.node.get().wakuLegacyStore != nil:
- return p.ready()
-
- return p.notReady(
- "No Legacy Store service peers are available yet, neither Store service set up for the node"
- )
-
-proc getPeerExchangeHealth(hm: WakuNodeHealthMonitor): ProtocolHealth =
- var p = ProtocolHealth.init("Peer Exchange")
- if hm.node.get().wakuPeerExchange == nil:
- return p.notMounted()
-
- return p.ready()
-
-proc getRendezvousHealth(hm: WakuNodeHealthMonitor): ProtocolHealth =
- var p = ProtocolHealth.init("Rendezvous")
- if hm.node.get().wakuRendezvous == nil:
- return p.notMounted()
-
- if hm.node.get().peerManager.switch.peerStore.peers(RendezVousCodec).len() == 0:
- return p.notReady("No Rendezvous peers are available yet")
-
- return p.ready()
-
-proc getNodeHealthReport*(hm: WakuNodeHealthMonitor): Future[HealthReport] {.async.} =
- var report: HealthReport
- report.nodeHealth = hm.nodeHealth
-
- if hm.node.isSome():
- let relayHealth = hm.getRelayHealth()
- report.protocolsHealth.add(relayHealth)
- report.protocolsHealth.add(await hm.getRlnRelayHealth())
- report.protocolsHealth.add(hm.getLightpushHealth(relayHealth.health))
- report.protocolsHealth.add(hm.getLegacyLightpushHealth(relayHealth.health))
- report.protocolsHealth.add(hm.getFilterHealth(relayHealth.health))
- report.protocolsHealth.add(hm.getStoreHealth())
- report.protocolsHealth.add(hm.getLegacyStoreHealth())
- report.protocolsHealth.add(hm.getPeerExchangeHealth())
- report.protocolsHealth.add(hm.getRendezvousHealth())
-
- report.protocolsHealth.add(hm.getLightpushClientHealth(relayHealth.health))
- report.protocolsHealth.add(hm.getLegacyLightpushClientHealth(relayHealth.health))
- report.protocolsHealth.add(hm.getStoreClientHealth())
- report.protocolsHealth.add(hm.getLegacyStoreClientHealth())
- report.protocolsHealth.add(hm.getFilterClientHealth(relayHealth.health))
- return report
-
-proc setNode*(hm: WakuNodeHealthMonitor, node: WakuNode) =
- hm.node = some(node)
-
-proc setOverallHealth*(hm: WakuNodeHealthMonitor, health: HealthStatus) =
- hm.nodeHealth = health
+export node_health_monitor, protocol_health, online_monitor, health_status
diff --git a/waku/node/health_monitor/health_status.nim b/waku/node/health_monitor/health_status.nim
new file mode 100644
index 000000000..4dd2bdd9a
--- /dev/null
+++ b/waku/node/health_monitor/health_status.nim
@@ -0,0 +1,16 @@
+import results, std/strutils
+
+type HealthStatus* {.pure.} = enum
+ INITIALIZING
+ SYNCHRONIZING
+ READY
+ NOT_READY
+ NOT_MOUNTED
+ SHUTTING_DOWN
+
+proc init*(t: typedesc[HealthStatus], strRep: string): Result[HealthStatus, string] =
+ try:
+ let status = parseEnum[HealthStatus](strRep)
+ return ok(status)
+ except ValueError:
+ return err("Invalid HealthStatus string representation: " & strRep)
diff --git a/waku/node/health_monitor/node_health_monitor.nim b/waku/node/health_monitor/node_health_monitor.nim
new file mode 100644
index 000000000..b13925d66
--- /dev/null
+++ b/waku/node/health_monitor/node_health_monitor.nim
@@ -0,0 +1,270 @@
+{.push raises: [].}
+
+import std/[options, sets, strformat], chronos, chronicles, libp2p/protocols/rendezvous
+
+import
+ ../waku_node,
+ ../../waku_rln_relay,
+ ../../waku_relay,
+ ../peer_manager,
+ ./online_monitor,
+ ./health_status,
+ ./protocol_health
+
+## This module is aimed to check the state of the "self" Waku Node
+
+type
+ HealthReport* = object
+ nodeHealth*: HealthStatus
+ protocolsHealth*: seq[ProtocolHealth]
+
+ NodeHealthMonitor* = ref object
+ nodeHealth: HealthStatus
+ node: WakuNode
+ onlineMonitor*: OnlineMonitor
+
+template checkWakuNodeNotNil(node: WakuNode, p: ProtocolHealth): untyped =
+ if node.isNil():
+ warn "WakuNode is not set, cannot check health", protocol_health_instance = $p
+ return p.notMounted()
+
+proc getRelayHealth(hm: NodeHealthMonitor): ProtocolHealth =
+ var p = ProtocolHealth.init("Relay")
+ checkWakuNodeNotNil(hm.node, p)
+
+ if hm.node.wakuRelay == nil:
+ return p.notMounted()
+
+ let relayPeers = hm.node.wakuRelay.getConnectedPubSubPeers(pubsubTopic = "").valueOr:
+ return p.notMounted()
+
+ if relayPeers.len() == 0:
+ return p.notReady("No connected peers")
+
+ return p.ready()
+
+proc getRlnRelayHealth(hm: NodeHealthMonitor): Future[ProtocolHealth] {.async.} =
+ var p = ProtocolHealth.init("Rln Relay")
+ if hm.node.isNil():
+ warn "WakuNode is not set, cannot check health", protocol_health_instance = $p
+ return p.notMounted()
+
+ if hm.node.wakuRlnRelay.isNil():
+ return p.notMounted()
+
+ const FutIsReadyTimout = 5.seconds
+
+ let isReadyStateFut = hm.node.wakuRlnRelay.isReady()
+ if not await isReadyStateFut.withTimeout(FutIsReadyTimout):
+ return p.notReady("Ready state check timed out")
+
+ try:
+ if not isReadyStateFut.completed():
+ return p.notReady("Ready state check timed out")
+ elif isReadyStateFut.read():
+ return p.ready()
+
+ return p.synchronizing()
+ except:
+ error "exception reading state: " & getCurrentExceptionMsg()
+ return p.notReady("State cannot be determined")
+
+proc getLightpushHealth(
+ hm: NodeHealthMonitor, relayHealth: HealthStatus
+): ProtocolHealth =
+ var p = ProtocolHealth.init("Lightpush")
+ checkWakuNodeNotNil(hm.node, p)
+
+ if hm.node.wakuLightPush == nil:
+ return p.notMounted()
+
+ if relayHealth == HealthStatus.READY:
+ return p.ready()
+
+ return p.notReady("Node has no relay peers to fullfill push requests")
+
+proc getLightpushClientHealth(
+ hm: NodeHealthMonitor, relayHealth: HealthStatus
+): ProtocolHealth =
+ var p = ProtocolHealth.init("Lightpush Client")
+ checkWakuNodeNotNil(hm.node, p)
+
+ if hm.node.wakuLightpushClient == nil:
+ return p.notMounted()
+
+ let selfServiceAvailable =
+ hm.node.wakuLightPush != nil and relayHealth == HealthStatus.READY
+ let servicePeerAvailable = hm.node.peerManager.selectPeer(WakuLightPushCodec).isSome()
+
+ if selfServiceAvailable or servicePeerAvailable:
+ return p.ready()
+
+ return p.notReady("No Lightpush service peer available yet")
+
+proc getLegacyLightpushHealth(
+ hm: NodeHealthMonitor, relayHealth: HealthStatus
+): ProtocolHealth =
+ var p = ProtocolHealth.init("Legacy Lightpush")
+ checkWakuNodeNotNil(hm.node, p)
+
+ if hm.node.wakuLegacyLightPush == nil:
+ return p.notMounted()
+
+ if relayHealth == HealthStatus.READY:
+ return p.ready()
+
+ return p.notReady("Node has no relay peers to fullfill push requests")
+
+proc getLegacyLightpushClientHealth(
+ hm: NodeHealthMonitor, relayHealth: HealthStatus
+): ProtocolHealth =
+ var p = ProtocolHealth.init("Legacy Lightpush Client")
+ checkWakuNodeNotNil(hm.node, p)
+
+ if hm.node.wakuLegacyLightpushClient == nil:
+ return p.notMounted()
+
+ if (hm.node.wakuLegacyLightPush != nil and relayHealth == HealthStatus.READY) or
+ hm.node.peerManager.selectPeer(WakuLegacyLightPushCodec).isSome():
+ return p.ready()
+
+ return p.notReady("No Lightpush service peer available yet")
+
+proc getFilterHealth(hm: NodeHealthMonitor, relayHealth: HealthStatus): ProtocolHealth =
+ var p = ProtocolHealth.init("Filter")
+ checkWakuNodeNotNil(hm.node, p)
+
+ if hm.node.wakuFilter == nil:
+ return p.notMounted()
+
+ if relayHealth == HealthStatus.READY:
+ return p.ready()
+
+ return p.notReady("Relay is not ready, filter will not be able to sort out messages")
+
+proc getFilterClientHealth(
+ hm: NodeHealthMonitor, relayHealth: HealthStatus
+): ProtocolHealth =
+ var p = ProtocolHealth.init("Filter Client")
+ checkWakuNodeNotNil(hm.node, p)
+
+ if hm.node.wakuFilterClient == nil:
+ return p.notMounted()
+
+ if hm.node.peerManager.selectPeer(WakuFilterSubscribeCodec).isSome():
+ return p.ready()
+
+ return p.notReady("No Filter service peer available yet")
+
+proc getStoreHealth(hm: NodeHealthMonitor): ProtocolHealth =
+ var p = ProtocolHealth.init("Store")
+ checkWakuNodeNotNil(hm.node, p)
+
+ if hm.node.wakuStore == nil:
+ return p.notMounted()
+
+ return p.ready()
+
+proc getStoreClientHealth(hm: NodeHealthMonitor): ProtocolHealth =
+ var p = ProtocolHealth.init("Store Client")
+ checkWakuNodeNotNil(hm.node, p)
+
+ if hm.node.wakuStoreClient == nil:
+ return p.notMounted()
+
+ if hm.node.peerManager.selectPeer(WakuStoreCodec).isSome() or hm.node.wakuStore != nil:
+ return p.ready()
+
+ return p.notReady(
+ "No Store service peer available yet, neither Store service set up for the node"
+ )
+
+proc getLegacyStoreHealth(hm: NodeHealthMonitor): ProtocolHealth =
+ var p = ProtocolHealth.init("Legacy Store")
+ checkWakuNodeNotNil(hm.node, p)
+
+ if hm.node.wakuLegacyStore == nil:
+ return p.notMounted()
+
+ return p.ready()
+
+proc getLegacyStoreClientHealth(hm: NodeHealthMonitor): ProtocolHealth =
+ var p = ProtocolHealth.init("Legacy Store Client")
+ checkWakuNodeNotNil(hm.node, p)
+
+ if hm.node.wakuLegacyStoreClient == nil:
+ return p.notMounted()
+
+ if hm.node.peerManager.selectPeer(WakuLegacyStoreCodec).isSome() or
+ hm.node.wakuLegacyStore != nil:
+ return p.ready()
+
+ return p.notReady(
+ "No Legacy Store service peers are available yet, neither Store service set up for the node"
+ )
+
+proc getPeerExchangeHealth(hm: NodeHealthMonitor): ProtocolHealth =
+ var p = ProtocolHealth.init("Peer Exchange")
+ checkWakuNodeNotNil(hm.node, p)
+
+ if hm.node.wakuPeerExchange == nil:
+ return p.notMounted()
+
+ return p.ready()
+
+proc getRendezvousHealth(hm: NodeHealthMonitor): ProtocolHealth =
+ var p = ProtocolHealth.init("Rendezvous")
+ checkWakuNodeNotNil(hm.node, p)
+
+ if hm.node.wakuRendezvous == nil:
+ return p.notMounted()
+
+ if hm.node.peerManager.switch.peerStore.peers(RendezVousCodec).len() == 0:
+ return p.notReady("No Rendezvous peers are available yet")
+
+ return p.ready()
+
+proc getNodeHealthReport*(hm: NodeHealthMonitor): Future[HealthReport] {.async.} =
+ var report: HealthReport
+ report.nodeHealth = hm.nodeHealth
+
+ if not hm.node.isNil():
+ let relayHealth = hm.getRelayHealth()
+ report.protocolsHealth.add(relayHealth)
+ report.protocolsHealth.add(await hm.getRlnRelayHealth())
+ report.protocolsHealth.add(hm.getLightpushHealth(relayHealth.health))
+ report.protocolsHealth.add(hm.getLegacyLightpushHealth(relayHealth.health))
+ report.protocolsHealth.add(hm.getFilterHealth(relayHealth.health))
+ report.protocolsHealth.add(hm.getStoreHealth())
+ report.protocolsHealth.add(hm.getLegacyStoreHealth())
+ report.protocolsHealth.add(hm.getPeerExchangeHealth())
+ report.protocolsHealth.add(hm.getRendezvousHealth())
+
+ report.protocolsHealth.add(hm.getLightpushClientHealth(relayHealth.health))
+ report.protocolsHealth.add(hm.getLegacyLightpushClientHealth(relayHealth.health))
+ report.protocolsHealth.add(hm.getStoreClientHealth())
+ report.protocolsHealth.add(hm.getLegacyStoreClientHealth())
+ report.protocolsHealth.add(hm.getFilterClientHealth(relayHealth.health))
+ return report
+
+proc setNodeToHealthMonitor*(hm: NodeHealthMonitor, node: WakuNode) =
+ hm.node = node
+
+proc setOverallHealth*(hm: NodeHealthMonitor, health: HealthStatus) =
+ hm.nodeHealth = health
+
+proc startHealthMonitor*(hm: NodeHealthMonitor) =
+ hm.onlineMonitor.startOnlineMonitor()
+
+proc stopHealthMonitor*(hm: NodeHealthMonitor) {.async.} =
+ await hm.onlineMonitor.stopOnlineMonitor()
+
+proc new*(
+ T: type NodeHealthMonitor,
+ dnsNameServers = @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")],
+): T =
+ T(
+ nodeHealth: INITIALIZING,
+ node: nil,
+ onlineMonitor: OnlineMonitor.init(dnsNameServers),
+ )
diff --git a/waku/node/health_monitor/online_monitor.nim b/waku/node/health_monitor/online_monitor.nim
new file mode 100644
index 000000000..f3a3013e2
--- /dev/null
+++ b/waku/node/health_monitor/online_monitor.nim
@@ -0,0 +1,77 @@
+import std/sequtils
+import chronos, chronicles, libp2p/nameresolving/dnsresolver, libp2p/peerstore
+
+import ../peer_manager/waku_peer_store, waku/waku_core/peers
+
+type
+ OnOnlineStateChange* = proc(online: bool) {.gcsafe, raises: [].}
+
+ OnlineMonitor* = ref object
+ onOnlineStateChange: OnOnlineStateChange
+ dnsNameServers*: seq[IpAddress]
+ onlineStateObservers: seq[OnOnlineStateChange]
+ networkConnLoopHandle: Future[void] # node: WakuNode
+ peerStore: PeerStore
+ online: bool
+
+proc checkInternetConnectivity(
+ nameServerIps: seq[IpAddress], timeout = 2.seconds
+): Future[bool] {.async.} =
+ const DNSCheckDomain = "one.one.one.one"
+ let nameServers = nameServerIps.mapIt(initTAddress(it, Port(53)))
+ let dnsResolver = DnsResolver.new(nameServers)
+
+ # Resolve domain IP
+ let resolved = await dnsResolver.resolveIp(DNSCheckDomain, 0.Port, Domain.AF_UNSPEC)
+ if resolved.len > 0:
+ return true
+ else:
+ return false
+
+proc updateOnlineState(self: OnlineMonitor) {.async.} =
+ if self.onlineStateObservers.len == 0:
+ trace "No online state observers registered, cannot notify about online state change"
+ return
+
+ let numConnectedPeers =
+ if self.peerStore.isNil():
+ 0
+ else:
+ self.peerStore.peers().countIt(it.connectedness == Connected)
+
+ self.online =
+ if numConnectedPeers > 0:
+ true
+ else:
+ await checkInternetConnectivity(self.dnsNameServers)
+
+ for onlineStateObserver in self.onlineStateObservers:
+ onlineStateObserver(self.online)
+
+proc networkConnectivityLoop(self: OnlineMonitor): Future[void] {.async.} =
+ ## Checks periodically whether the node is online or not
+ ## and triggers any change that depends on the network connectivity state
+ while true:
+ await self.updateOnlineState()
+ await sleepAsync(15.seconds)
+
+proc startOnlineMonitor*(self: OnlineMonitor) =
+ self.networkConnLoopHandle = self.networkConnectivityLoop()
+
+proc stopOnlineMonitor*(self: OnlineMonitor) {.async.} =
+ if not self.networkConnLoopHandle.isNil():
+ await self.networkConnLoopHandle.cancelAndWait()
+
+proc setPeerStoreToOnlineMonitor*(self: OnlineMonitor, peerStore: PeerStore) =
+ self.peerStore = peerStore
+
+proc addOnlineStateObserver*(self: OnlineMonitor, observer: OnOnlineStateChange) =
+ ## Adds an observer that will be called when the online state changes
+ if observer notin self.onlineStateObservers:
+ self.onlineStateObservers.add(observer)
+
+proc amIOnline*(self: OnlineMonitor): bool =
+ return self.online
+
+proc init*(T: type OnlineMonitor, dnsNameServers: seq[IpAddress]): OnlineMonitor =
+ T(dnsNameServers: dnsNameServers, onlineStateObservers: @[])
diff --git a/waku/node/health_monitor/protocol_health.nim b/waku/node/health_monitor/protocol_health.nim
new file mode 100644
index 000000000..7bacea94b
--- /dev/null
+++ b/waku/node/health_monitor/protocol_health.nim
@@ -0,0 +1,46 @@
+import std/[options, strformat]
+import ./health_status
+
+type ProtocolHealth* = object
+ protocol*: string
+ health*: HealthStatus
+ desc*: Option[string] ## describes why a certain protocol is considered `NOT_READY`
+
+proc notReady*(p: var ProtocolHealth, desc: string): ProtocolHealth =
+ p.health = HealthStatus.NOT_READY
+ p.desc = some(desc)
+ return p
+
+proc ready*(p: var ProtocolHealth): ProtocolHealth =
+ p.health = HealthStatus.READY
+ p.desc = none[string]()
+ return p
+
+proc notMounted*(p: var ProtocolHealth): ProtocolHealth =
+ p.health = HealthStatus.NOT_MOUNTED
+ p.desc = none[string]()
+ return p
+
+proc synchronizing*(p: var ProtocolHealth): ProtocolHealth =
+ p.health = HealthStatus.SYNCHRONIZING
+ p.desc = none[string]()
+ return p
+
+proc initializing*(p: var ProtocolHealth): ProtocolHealth =
+ p.health = HealthStatus.INITIALIZING
+ p.desc = none[string]()
+ return p
+
+proc shuttingDown*(p: var ProtocolHealth): ProtocolHealth =
+ p.health = HealthStatus.SHUTTING_DOWN
+ p.desc = none[string]()
+ return p
+
+proc `$`*(p: ProtocolHealth): string =
+ return fmt"protocol: {p.protocol}, health: {p.health}, description: {p.desc}"
+
+proc init*(p: typedesc[ProtocolHealth], protocol: string): ProtocolHealth =
+ let p = ProtocolHealth(
+ protocol: protocol, health: HealthStatus.NOT_MOUNTED, desc: none[string]()
+ )
+ return p
diff --git a/waku/node/peer_manager/peer_manager.nim b/waku/node/peer_manager/peer_manager.nim
index 707738e5f..0a19d5b2c 100644
--- a/waku/node/peer_manager/peer_manager.nim
+++ b/waku/node/peer_manager/peer_manager.nim
@@ -8,7 +8,6 @@ import
libp2p/multistream,
libp2p/muxers/muxer,
libp2p/nameresolving/nameresolver,
- libp2p/nameresolving/dnsresolver,
libp2p/peerstore
import
@@ -21,6 +20,7 @@ import
../../waku_enr/sharding,
../../waku_enr/capabilities,
../../waku_metadata,
+ ../health_monitor/online_monitor,
./peer_store/peer_storage,
./waku_peer_store
@@ -74,8 +74,6 @@ const
# Max peers that we allow from the same IP
DefaultColocationLimit* = 5
- DNSCheckDomain = "one.one.one.one"
-
type ConnectionChangeHandler* = proc(
peerId: PeerId, peerEvent: PeerEventKind
): Future[void] {.gcsafe, raises: [Defect].}
@@ -98,16 +96,12 @@ type PeerManager* = ref object of RootObj
started: bool
shardedPeerManagement: bool # temp feature flag
onConnectionChange*: ConnectionChangeHandler
- dnsNameServers*: seq[IpAddress]
- online: bool
+ online: bool ## state managed by online_monitor module
#~~~~~~~~~~~~~~~~~~~#
# Helper Functions #
#~~~~~~~~~~~~~~~~~~~#
-template isOnline*(self: PeerManager): bool =
- self.online
-
proc calculateBackoff(
initialBackoffInSec: int, backoffFactor: int, failedAttempts: int
): timer.Duration =
@@ -543,35 +537,9 @@ proc getStreamByPeerIdAndProtocol*(
return ok(streamRes.get())
-proc checkInternetConnectivity(
- nameServerIps: seq[IpAddress], timeout = 2.seconds
-): Future[bool] {.async.} =
- var nameServers: seq[TransportAddress]
- for ip in nameServerIps:
- nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53
-
- let dnsResolver = DnsResolver.new(nameServers)
-
- # Resolve domain IP
- let resolved = await dnsResolver.resolveIp(DNSCheckDomain, 0.Port, Domain.AF_UNSPEC)
-
- if resolved.len > 0:
- return true
- else:
- return false
-
-proc updateOnlineState*(pm: PeerManager) {.async.} =
- let numConnectedPeers =
- pm.switch.peerStore.peers().countIt(it.connectedness == Connected)
-
- if numConnectedPeers > 0:
- pm.online = true
- else:
- pm.online = await checkInternetConnectivity(pm.dnsNameServers)
-
proc connectToRelayPeers*(pm: PeerManager) {.async.} =
# only attempt if current node is online
- if not pm.isOnline():
+ if not pm.online:
error "connectToRelayPeers: won't attempt new connections - node is offline"
return
@@ -739,6 +707,7 @@ proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} =
debug "Pruning connection due to ip colocation", peerId = peerId, ip = ip
asyncSpawn(pm.switch.disconnect(peerId))
peerStore.delete(peerId)
+
if not pm.onConnectionChange.isNil():
# we don't want to await for the callback to finish
asyncSpawn pm.onConnectionChange(peerId, Joined)
@@ -753,6 +722,7 @@ proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} =
if pm.ipTable[ip].len == 0:
pm.ipTable.del(ip)
break
+
if not pm.onConnectionChange.isNil():
# we don't want to await for the callback to finish
asyncSpawn pm.onConnectionChange(peerId, Left)
@@ -809,6 +779,10 @@ proc logAndMetrics(pm: PeerManager) {.async.} =
protoStreamsOut.float64, labelValues = [$Direction.Out, proto]
)
+proc getOnlineStateObserver*(pm: PeerManager): OnOnlineStateChange =
+ return proc(online: bool) {.gcsafe, raises: [].} =
+ pm.online = online
+
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Pruning and Maintenance (Stale Peers Management) #
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
@@ -817,7 +791,7 @@ proc manageRelayPeers*(pm: PeerManager) {.async.} =
if pm.wakuMetadata.shards.len == 0:
return
- if not pm.isOnline():
+ if not pm.online:
error "manageRelayPeers: won't attempt new connections - node is offline"
return
@@ -1048,7 +1022,6 @@ proc new*(
maxFailedAttempts = MaxFailedAttempts,
colocationLimit = DefaultColocationLimit,
shardedPeerManagement = false,
- dnsNameServers = @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")],
): PeerManager {.gcsafe.} =
let capacity = switch.peerStore.capacity
let maxConnections = switch.connManager.inSema.size
@@ -1099,7 +1072,6 @@ proc new*(
maxFailedAttempts: maxFailedAttempts,
colocationLimit: colocationLimit,
shardedPeerManagement: shardedPeerManagement,
- dnsNameServers: dnsNameServers,
online: true,
)
diff --git a/waku/waku_api/rest/builder.nim b/waku/waku_api/rest/builder.nim
index 1b5d9af70..6725aaeec 100644
--- a/waku/waku_api/rest/builder.nim
+++ b/waku/waku_api/rest/builder.nim
@@ -40,7 +40,7 @@ type RestServerConf* = object
relayCacheCapacity*: uint32
proc startRestServerEssentials*(
- nodeHealthMonitor: WakuNodeHealthMonitor, conf: RestServerConf, portsShift: uint16
+ nodeHealthMonitor: NodeHealthMonitor, conf: RestServerConf, portsShift: uint16
): Result[WakuRestServerRef, string] =
let requestErrorHandler: RestRequestErrorHandler = proc(
error: RestRequestError, request: HttpRequestRef
diff --git a/waku/waku_api/rest/health/handlers.nim b/waku/waku_api/rest/health/handlers.nim
index 48dad9276..aa6b1e925 100644
--- a/waku/waku_api/rest/health/handlers.nim
+++ b/waku/waku_api/rest/health/handlers.nim
@@ -11,7 +11,7 @@ const ROUTE_HEALTH* = "/health"
const FutHealthReportTimeout = 5.seconds
proc installHealthApiHandler*(
- router: var RestRouter, nodeHealthMonitor: WakuNodeHealthMonitor
+ router: var RestRouter, nodeHealthMonitor: NodeHealthMonitor
) =
router.api(MethodGet, ROUTE_HEALTH) do() -> RestApiResponse:
let healthReportFut = nodeHealthMonitor.getNodeHealthReport()
diff --git a/waku/waku_api/rest/health/types.nim b/waku/waku_api/rest/health/types.nim
index e457ebea5..57f8b284c 100644
--- a/waku/waku_api/rest/health/types.nim
+++ b/waku/waku_api/rest/health/types.nim
@@ -1,5 +1,6 @@
{.push raises: [].}
+import results
import chronicles, json_serialization, json_serialization/std/options
import ../../../waku_node, ../serdes
@@ -31,13 +32,10 @@ proc readValue*(
)
let fieldValue = reader.readValue(string)
- try:
- health = some(HealthStatus.init(fieldValue))
- protocol = some(fieldName)
- except ValueError:
- reader.raiseUnexpectedValue(
- "Invalid `health` value: " & getCurrentExceptionMsg()
- )
+ let h = HealthStatus.init(fieldValue).valueOr:
+ reader.raiseUnexpectedValue("Invalid `health` value: " & $error)
+ health = some(h)
+ protocol = some(fieldName)
value = ProtocolHealth(protocol: protocol.get(), health: health.get(), desc: desc)
@@ -63,10 +61,11 @@ proc readValue*(
reader.raiseUnexpectedField(
"Multiple `nodeHealth` fields found", "HealthReport"
)
- try:
- nodeHealth = some(HealthStatus.init(reader.readValue(string)))
- except ValueError:
- reader.raiseUnexpectedValue("Invalid `health` value")
+
+ let health = HealthStatus.init(reader.readValue(string)).valueOr:
+ reader.raiseUnexpectedValue("Invalid `health` value: " & $error)
+
+ nodeHealth = some(health)
of "protocolsHealth":
if protocolsHealth.isSome():
reader.raiseUnexpectedField(
From d01dd9959c865aa8232d9cf544fbec2698cca0cf Mon Sep 17 00:00:00 2001
From: Danish Arora <35004822+danisharora099@users.noreply.github.com>
Date: Tue, 17 Jun 2025 10:34:10 +0530
Subject: [PATCH 23/47] fix: typo from DIRVER to DRIVER (#3442)
---
waku/waku_archive/common.nim | 2 +-
waku/waku_archive_legacy/common.nim | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/waku/waku_archive/common.nim b/waku/waku_archive/common.nim
index 5e5b2a1a9..f26c73da9 100644
--- a/waku/waku_archive/common.nim
+++ b/waku/waku_archive/common.nim
@@ -44,7 +44,7 @@ type
proc `$`*(err: ArchiveError): string =
case err.kind
of ArchiveErrorKind.DRIVER_ERROR:
- "DIRVER_ERROR: " & err.cause
+ "DRIVER_ERROR: " & err.cause
of ArchiveErrorKind.INVALID_QUERY:
"INVALID_QUERY: " & err.cause
of ArchiveErrorKind.UNKNOWN:
diff --git a/waku/waku_archive_legacy/common.nim b/waku/waku_archive_legacy/common.nim
index e068e0f0c..ee45181cb 100644
--- a/waku/waku_archive_legacy/common.nim
+++ b/waku/waku_archive_legacy/common.nim
@@ -78,7 +78,7 @@ type
proc `$`*(err: ArchiveError): string =
case err.kind
of ArchiveErrorKind.DRIVER_ERROR:
- "DIRVER_ERROR: " & err.cause
+ "DRIVER_ERROR: " & err.cause
of ArchiveErrorKind.INVALID_QUERY:
"INVALID_QUERY: " & err.cause
of ArchiveErrorKind.UNKNOWN:
From d41179e562e97109b95eb0cabd7967bca78581a3 Mon Sep 17 00:00:00 2001
From: AYAHASSAN287 <49167455+AYAHASSAN287@users.noreply.github.com>
Date: Tue, 17 Jun 2025 17:37:25 +0300
Subject: [PATCH 24/47] test: Waku sync tests part2 (#3397)
* Revert "Revert "Add finger print tests""
This reverts commit 36066311f91da31ca69fef3fa327d5e7fda7e50c.
* Add state transition test
* Add last test for state transition
* Add new tests to transfer protocol
* Add stree test scenarios
* Add stress tests and edge scenarios
* Add test outside sync window
* Add edge tests
* Add last corner test
* Apply linters on files
---
tests/waku_store_sync/sync_utils.nim | 2 +
tests/waku_store_sync/test_protocol.nim | 444 ++++++++++++++++--
.../waku_store_sync/test_state_transition.nim | 249 ++++++++++
3 files changed, 660 insertions(+), 35 deletions(-)
create mode 100644 tests/waku_store_sync/test_state_transition.nim
diff --git a/tests/waku_store_sync/sync_utils.nim b/tests/waku_store_sync/sync_utils.nim
index e7fd82b57..d5cb601a2 100644
--- a/tests/waku_store_sync/sync_utils.nim
+++ b/tests/waku_store_sync/sync_utils.nim
@@ -26,6 +26,7 @@ proc newTestWakuRecon*(
wantsTx: AsyncQueue[PeerId],
needsTx: AsyncQueue[(PeerId, Fingerprint)],
cluster: uint16 = 1,
+ syncRange: timer.Duration = DefaultSyncRange,
shards: seq[uint16] = @[0, 1, 2, 3, 4, 5, 6, 7],
): Future[SyncReconciliation] {.async.} =
let peerManager = PeerManager.new(switch)
@@ -36,6 +37,7 @@ proc newTestWakuRecon*(
peerManager = peerManager,
wakuArchive = nil,
relayJitter = 0.seconds,
+ syncRange = syncRange,
idsRx = idsRx,
localWantsTx = wantsTx,
remoteNeedsTx = needsTx,
diff --git a/tests/waku_store_sync/test_protocol.nim b/tests/waku_store_sync/test_protocol.nim
index efdd6a885..ecb50250b 100644
--- a/tests/waku_store_sync/test_protocol.nim
+++ b/tests/waku_store_sync/test_protocol.nim
@@ -1,8 +1,12 @@
{.used.}
import
- std/[options, sets, random, math], testutils/unittests, chronos, libp2p/crypto/crypto
-
+ std/[options, sets, random, math, algorithm],
+ testutils/unittests,
+ chronos,
+ libp2p/crypto/crypto
+import chronos, chronos/asyncsync
+import nimcrypto
import
../../waku/[
node/peer_manager,
@@ -21,6 +25,15 @@ import
../waku_archive/archive_utils,
./sync_utils
+proc collectDiffs*(
+ chan: var Channel[SyncID], diffCount: int
+): HashSet[WakuMessageHash] =
+ var received: HashSet[WakuMessageHash]
+ while received.len < diffCount:
+ let sid = chan.recv() # synchronous receive
+ received.incl sid.hash
+ result = received
+
suite "Waku Sync: reconciliation":
var serverSwitch {.threadvar.}: Switch
var clientSwitch {.threadvar.}: Switch
@@ -234,53 +247,377 @@ suite "Waku Sync: reconciliation":
remoteNeeds.contains((clientPeerInfo.peerId, WakuMessageHash(diff))) == true
asyncTest "sync 2 nodes 10K msgs 1K diffs":
- let msgCount = 10_000
- var diffCount = 1_000
+ const
+ msgCount = 200_000 # total messages on the server
+ diffCount = 100 # messages initially missing on the client
- var diffMsgHashes: HashSet[WakuMessageHash]
- var randIndexes: HashSet[int]
+ ## ── choose which messages will be absent from the client ─────────────
+ var missingIdx: HashSet[int]
+ while missingIdx.len < diffCount:
+ missingIdx.incl rand(0 ..< msgCount)
- # Diffs
- for i in 0 ..< diffCount:
- var randInt = rand(0 ..< msgCount)
-
- #make sure we actually have the right number of diffs
- while randInt in randIndexes:
- randInt = rand(0 ..< msgCount)
-
- randIndexes.incl(randInt)
-
- # sync window is 1 hour, spread msg equally in that time
- let timeSlice = calculateTimeRange()
- let timeWindow = int64(timeSlice.b) - int64(timeSlice.a)
- let (part, _) = divmod(timeWindow, 100_000)
-
- var timestamp = timeSlice.a
+ ## ── generate messages and pre-load the two reconcilers ───────────────
+ let slice = calculateTimeRange() # 1-hour window
+ let step = (int64(slice.b) - int64(slice.a)) div msgCount
+ var ts = slice.a
for i in 0 ..< msgCount:
let
- msg = fakeWakuMessage(ts = timestamp, contentTopic = DefaultContentTopic)
- hash = computeMessageHash(DefaultPubsubTopic, msg)
+ msg = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic)
+ h = computeMessageHash(DefaultPubsubTopic, msg)
- server.messageIngress(hash, msg)
+ server.messageIngress(h, msg) # every msg is on the server
+ if i notin missingIdx:
+ client.messageIngress(h, msg) # all but 100 are on the client
+ ts += Timestamp(step)
- if i in randIndexes:
- diffMsgHashes.incl(hash)
+ ## ── sanity before we start the round ─────────────────────────────────
+ check remoteNeeds.len == 0
+
+ ## ── launch reconciliation from the client towards the server ─────────
+ let res = await client.storeSynchronization(some(serverPeerInfo))
+ assert res.isOk(), $res.error
+
+ ## ── verify that ≈100 diffs were queued (allow 10 % slack) ────────────
+ check remoteNeeds.len >= 90 # ≈ 100 × 0.9
+
+ asyncTest "sync 2 nodes 400K msgs 100k diffs":
+ const
+ msgCount = 400_000
+ diffCount = 100_000
+ tol = 1000
+
+ var diffMsgHashes: HashSet[WakuMessageHash]
+ var missingIdx: HashSet[int]
+ while missingIdx.len < diffCount:
+ missingIdx.incl rand(0 ..< msgCount)
+
+ let slice = calculateTimeRange()
+ let step = (int64(slice.b) - int64(slice.a)) div msgCount
+ var ts = slice.a
+
+ for i in 0 ..< msgCount:
+ let
+ msg = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic)
+ h = computeMessageHash(DefaultPubsubTopic, msg)
+
+ server.messageIngress(h, msg)
+ if i notin missingIdx:
+ client.messageIngress(h, msg)
else:
- client.messageIngress(hash, msg)
+ diffMsgHashes.incl h
- timestamp += Timestamp(part)
- continue
+ ts += Timestamp(step)
- check:
- remoteNeeds.len == 0
+ check remoteNeeds.len == 0
let res = await client.storeSynchronization(some(serverPeerInfo))
assert res.isOk(), $res.error
- # timimg issue make it hard to match exact numbers
- check:
- remoteNeeds.len > 900
+ check remoteNeeds.len >= diffCount - tol and remoteNeeds.len < diffCount
+ let (_, deliveredHash) = await remoteNeeds.get()
+ check deliveredHash in diffMsgHashes
+
+ asyncTest "sync 2 nodes 100 msgs 20 diff – 1-second window":
+ const
+ msgCount = 100
+ diffCount = 20
+
+ var missingIdx: seq[int] = @[]
+ while missingIdx.len < diffCount:
+ let n = rand(0 ..< msgCount)
+ if n notin missingIdx:
+ missingIdx.add n
+
+ var diffMsgHashes: HashSet[WakuMessageHash]
+
+ let sliceEnd = now()
+ let sliceStart = Timestamp uint64(sliceEnd) - 1_000_000_000'u64
+ let step = (int64(sliceEnd) - int64(sliceStart)) div msgCount
+ var ts = sliceStart
+
+ for i in 0 ..< msgCount:
+ let msg = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic)
+ let hash = computeMessageHash(DefaultPubsubTopic, msg)
+ server.messageIngress(hash, msg)
+
+ if i in missingIdx:
+ diffMsgHashes.incl hash
+ else:
+ client.messageIngress(hash, msg)
+
+ ts += Timestamp(step)
+
+ check remoteNeeds.len == 0
+
+ let res = await client.storeSynchronization(some(serverPeerInfo))
+ assert res.isOk(), $res.error
+
+ check remoteNeeds.len == diffCount
+
+ for _ in 0 ..< diffCount:
+ let (_, deliveredHash) = await remoteNeeds.get()
+ check deliveredHash in diffMsgHashes
+
+ asyncTest "sync 2 nodes 500k msgs 300k diff – stress window":
+ const
+ msgCount = 500_000
+ diffCount = 300_000
+
+ randomize()
+ var allIdx = newSeq[int](msgCount)
+ for i in 0 ..< msgCount:
+ allIdx[i] = i
+ shuffle(allIdx)
+
+ let missingIdx = allIdx[0 ..< diffCount]
+ var missingSet: HashSet[int]
+ for idx in missingIdx:
+ missingSet.incl idx
+
+ var diffMsgHashes: HashSet[WakuMessageHash]
+
+ let sliceEnd = now()
+ let sliceStart = Timestamp uint64(sliceEnd) - 1_000_000_000'u64
+ let step = (int64(sliceEnd) - int64(sliceStart)) div msgCount
+ var ts = sliceStart
+
+ for i in 0 ..< msgCount:
+ let msg = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic)
+ let hash = computeMessageHash(DefaultPubsubTopic, msg)
+ server.messageIngress(hash, msg)
+
+ if i in missingSet:
+ diffMsgHashes.incl hash
+ else:
+ client.messageIngress(hash, msg)
+
+ ts += Timestamp(step)
+
+ check remoteNeeds.len == 0
+
+ let res = await client.storeSynchronization(some(serverPeerInfo))
+ assert res.isOk(), $res.error
+
+ check remoteNeeds.len == diffCount
+
+ for _ in 0 ..< 1000:
+ let (_, deliveredHash) = await remoteNeeds.get()
+ check deliveredHash in diffMsgHashes
+
+ asyncTest "sync 2 nodes, 40 msgs: 20 in-window diff, 20 out-window ignored":
+ const
+ diffInWin = 20
+ diffOutWin = 20
+ stepOutNs = 100_000_000'u64
+ outOffsetNs = 2_000_000_000'u64 # for 20 mesg they sent 2 seconds earlier
+
+ randomize()
+
+ let nowNs = getNowInNanosecondTime()
+ let sliceStart = Timestamp(uint64(nowNs) - 700_000_000'u64)
+ let sliceEnd = nowNs
+ let stepIn = (sliceEnd.int64 - sliceStart.int64) div diffInWin
+
+ let oldStart = Timestamp(uint64(sliceStart) - outOffsetNs)
+ let stepOut = Timestamp(stepOutNs)
+
+ var inWinHashes, outWinHashes: HashSet[WakuMessageHash]
+
+ var ts = sliceStart
+ for _ in 0 ..< diffInWin:
+ let msg = fakeWakuMessage(ts = Timestamp ts, contentTopic = DefaultContentTopic)
+ let hash = computeMessageHash(DefaultPubsubTopic, msg)
+ server.messageIngress(hash, msg)
+ inWinHashes.incl hash
+ ts += Timestamp(stepIn)
+
+ ts = oldStart
+ for _ in 0 ..< diffOutWin:
+ let msg = fakeWakuMessage(ts = Timestamp ts, contentTopic = DefaultContentTopic)
+ let hash = computeMessageHash(DefaultPubsubTopic, msg)
+ server.messageIngress(hash, msg)
+ outWinHashes.incl hash
+ ts += Timestamp(stepOut)
+
+ check remoteNeeds.len == 0
+
+ let oneSec = timer.seconds(1)
+
+ server = await newTestWakuRecon(
+ serverSwitch, idsChannel, localWants, remoteNeeds, syncRange = oneSec
+ )
+
+ client = await newTestWakuRecon(
+ clientSwitch, idsChannel, localWants, remoteNeeds, syncRange = oneSec
+ )
+
+ defer:
+ server.stop()
+ client.stop()
+
+ let res = await client.storeSynchronization(some(serverPeerInfo))
+ assert res.isOk(), $res.error
+
+ check remoteNeeds.len == diffInWin
+
+ for _ in 0 ..< diffInWin:
+ let (_, deliveredHashes) = await remoteNeeds.get()
+ check deliveredHashes in inWinHashes
+ check deliveredHashes notin outWinHashes
+
+ asyncTest "hash-fingerprint collision, same timestamp – stable sort":
+ let ts = Timestamp(getNowInNanosecondTime())
+
+ var msg1 = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic)
+ var msg2 = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic)
+ msg2.payload[0] = msg2.payload[0] xor 0x01
+ echo msg2
+ var h1 = computeMessageHash(DefaultPubsubTopic, msg1)
+ var h2 = computeMessageHash(DefaultPubsubTopic, msg2)
+
+ for i in 0 ..< 8:
+ h2[i] = h1[i]
+ for i in 0 ..< 8:
+ check h1[i] == h2[i]
+
+ check h1 != h2
+
+ server.messageIngress(h1, msg1)
+ client.messageIngress(h2, msg2)
+
+ check remoteNeeds.len == 0
+ server = await newTestWakuRecon(serverSwitch, idsChannel, localWants, remoteNeeds)
+
+ client = await newTestWakuRecon(clientSwitch, idsChannel, localWants, remoteNeeds)
+
+ defer:
+ server.stop()
+ client.stop()
+
+ let res = await client.storeSynchronization(some(serverPeerInfo))
+ assert res.isOk(), $res.error
+
+ check remoteNeeds.len == 1
+
+ var vec = @[SyncID(time: ts, hash: h2), SyncID(time: ts, hash: h1)]
+ vec.shuffle()
+ vec.sort()
+
+ let hFirst = vec[0].hash
+ let hSecond = vec[1].hash
+ check vec[0].time == ts and vec[1].time == ts
+
+ asyncTest "malformed message-ID is ignored during reconciliation":
+ let nowTs = Timestamp(getNowInNanosecondTime())
+
+ let goodMsg = fakeWakuMessage(ts = nowTs, contentTopic = DefaultContentTopic)
+ var goodHash = computeMessageHash(DefaultPubsubTopic, goodMsg)
+
+ var badHash: WakuMessageHash
+ for i in 0 ..< 32:
+ badHash[i] = 0'u8
+ let badMsg = fakeWakuMessage(ts = Timestamp(0), contentTopic = DefaultContentTopic)
+
+ server.messageIngress(goodHash, goodMsg)
+ server.messageIngress(badHash, badMsg)
+
+ check remoteNeeds.len == 0
+
+ server = await newTestWakuRecon(serverSwitch, idsChannel, localWants, remoteNeeds)
+ client = await newTestWakuRecon(clientSwitch, idsChannel, localWants, remoteNeeds)
+
+ defer:
+ server.stop()
+ client.stop()
+
+ let res = await client.storeSynchronization(some(serverPeerInfo))
+ assert res.isOk(), $res.error
+
+ check remoteNeeds.len == 1
+ let (_, neededHash) = await remoteNeeds.get()
+ check neededHash == goodHash
+ check neededHash != badHash
+
+ asyncTest "malformed ID: future-timestamp msg is ignored":
+ let nowNs = getNowInNanosecondTime()
+ let tsNow = Timestamp(nowNs)
+
+ let goodMsg = fakeWakuMessage(ts = tsNow, contentTopic = DefaultContentTopic)
+ let goodHash = computeMessageHash(DefaultPubsubTopic, goodMsg)
+
+ const tenYearsSec = 10 * 365 * 24 * 60 * 60
+ let futureNs = nowNs + int64(tenYearsSec) * 1_000_000_000'i64
+ let badTs = Timestamp(futureNs.uint64)
+
+ let badMsg = fakeWakuMessage(ts = badTs, contentTopic = DefaultContentTopic)
+ let badHash = computeMessageHash(DefaultPubsubTopic, badMsg)
+
+ server.messageIngress(goodHash, goodMsg)
+ server.messageIngress(badHash, badMsg)
+
+ check remoteNeeds.len == 0
+ server = await newTestWakuRecon(serverSwitch, idsChannel, localWants, remoteNeeds)
+ client = await newTestWakuRecon(clientSwitch, idsChannel, localWants, remoteNeeds)
+
+ defer:
+ server.stop()
+ client.stop()
+
+ let res = await client.storeSynchronization(some(serverPeerInfo))
+ assert res.isOk(), $res.error
+
+ check remoteNeeds.len == 1
+ let (_, neededHash) = await remoteNeeds.get()
+ check neededHash == goodHash
+ check neededHash != badHash
+
+ asyncTest "duplicate ID is queued only once":
+ let ts = Timestamp(getNowInNanosecondTime())
+ let msg = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic)
+ let h = computeMessageHash(DefaultPubsubTopic, msg)
+
+ server.messageIngress(h, msg)
+ server.messageIngress(h, msg)
+ check remoteNeeds.len == 0
+
+ server = await newTestWakuRecon(serverSwitch, idsChannel, localWants, remoteNeeds)
+ client = await newTestWakuRecon(clientSwitch, idsChannel, localWants, remoteNeeds)
+
+ defer:
+ server.stop()
+ client.stop()
+
+ let res = await client.storeSynchronization(some(serverPeerInfo))
+ assert res.isOk(), $res.error
+
+ check remoteNeeds.len == 1
+ let (_, neededHash) = await remoteNeeds.get()
+ check neededHash == h
+
+ asyncTest "sync terminates immediately when no diffs exist":
+ let ts = Timestamp(getNowInNanosecondTime())
+ let msg = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic)
+ let hash = computeMessageHash(DefaultPubsubTopic, msg)
+
+ server.messageIngress(hash, msg)
+ client.messageIngress(hash, msg)
+
+ let idsQ = newAsyncQueue[SyncID]()
+ let wantsQ = newAsyncQueue[PeerId]()
+ let needsQ = newAsyncQueue[(PeerId, Fingerprint)]()
+
+ server = await newTestWakuRecon(serverSwitch, idsQ, wantsQ, needsQ)
+ client = await newTestWakuRecon(clientSwitch, idsQ, wantsQ, needsQ)
+
+ defer:
+ server.stop()
+ client.stop()
+
+ let res = await client.storeSynchronization(some(serverPeerInfo))
+ assert res.isOk(), $res.error
+
+ check needsQ.len == 0
suite "Waku Sync: transfer":
var
@@ -396,3 +733,40 @@ suite "Waku Sync: transfer":
check:
response.messages.len > 0
+
+ asyncTest "Check the exact missing messages are received":
+ let timeSlice = calculateTimeRange()
+ let timeWindow = int64(timeSlice.b) - int64(timeSlice.a)
+ let (part, _) = divmod(timeWindow, 3)
+
+ var ts = timeSlice.a
+
+ let msgA = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic)
+ ts += Timestamp(part)
+ let msgB = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic)
+ ts += Timestamp(part)
+ let msgC = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic)
+
+ let hA = computeMessageHash(DefaultPubsubTopic, msgA)
+ let hB = computeMessageHash(DefaultPubsubTopic, msgB)
+ let hC = computeMessageHash(DefaultPubsubTopic, msgC)
+
+ discard serverDriver.put(DefaultPubsubTopic, @[msgA, msgB, msgC])
+ discard clientDriver.put(DefaultPubsubTopic, @[msgA])
+
+ await serverRemoteNeeds.put((clientPeerInfo.peerId, hB))
+ await serverRemoteNeeds.put((clientPeerInfo.peerId, hC))
+ await clientLocalWants.put(serverPeerInfo.peerId)
+
+ await sleepAsync(1.seconds)
+ check serverRemoteNeeds.len == 0
+
+ let sid1 = await clientIds.get()
+ let sid2 = await clientIds.get()
+
+ let received = [sid1.hash, sid2.hash].toHashSet()
+ let expected = [hB, hC].toHashSet
+
+ check received == expected
+
+ check clientIds.len == 0
diff --git a/tests/waku_store_sync/test_state_transition.nim b/tests/waku_store_sync/test_state_transition.nim
new file mode 100644
index 000000000..08ec9a91e
--- /dev/null
+++ b/tests/waku_store_sync/test_state_transition.nim
@@ -0,0 +1,249 @@
+import unittest, nimcrypto, std/sequtils
+import ../../waku/waku_store_sync/[reconciliation, common]
+import ../../waku/waku_store_sync/storage/seq_storage
+import ../../waku/waku_core/message/digest
+
+proc toDigest*(s: string): WakuMessageHash =
+ let d = nimcrypto.keccak256.digest((s & "").toOpenArrayByte(0, s.high))
+ for i in 0 .. 31:
+ result[i] = d.data[i]
+
+proc `..`(a, b: SyncID): Slice[SyncID] =
+ Slice[SyncID](a: a, b: b)
+
+suite "Waku Sync – reconciliation":
+ test "Fingerprint → ItemSet → zero (default thresholds)":
+ const N = 2_000
+ const idx = 137
+
+ let local = SeqStorage.new(@[])
+ let remote = SeqStorage.new(@[])
+
+ var baseH, altH: WakuMessageHash
+ for i in 0 ..< N:
+ let ts = 1000 + i
+ let h = toDigest("msg" & $i)
+ discard local.insert(SyncID(time: ts, hash: h))
+ var hr = h
+ if i == idx:
+ baseH = h
+ altH = toDigest("msg" & $i & "x")
+ hr = altH
+ discard remote.insert(SyncID(time: ts, hash: hr))
+
+ var z: WakuMessageHash
+ let whole = SyncID(time: 1000, hash: z) .. SyncID(time: 1000 + N - 1, hash: z)
+
+ var s1, r1: seq[WakuMessageHash]
+ let p1 = RangesData(
+ cluster: 0,
+ shards: @[0],
+ ranges: @[(whole, RangeType.Fingerprint)],
+ fingerprints: @[remote.computeFingerprint(whole)],
+ itemSets: @[],
+ )
+ let rep1 = local.processPayload(p1, s1, r1)
+ check rep1.ranges.len == 8
+ check rep1.ranges.allIt(it[1] == RangeType.Fingerprint)
+
+ let mismT = 1000 + idx
+ let sub =
+ rep1.ranges.filterIt(mismT >= it[0].a.time and mismT <= it[0].b.time)[0][0]
+
+ var s2, r2: seq[WakuMessageHash]
+ let p2 = RangesData(
+ cluster: 0,
+ shards: @[0],
+ ranges: @[(sub, RangeType.Fingerprint)],
+ fingerprints: @[remote.computeFingerprint(sub)],
+ itemSets: @[],
+ )
+ let rep2 = local.processPayload(p2, s2, r2)
+ check rep2.ranges.len == 8
+ check rep2.ranges.allIt(it[1] == RangeType.ItemSet)
+
+ var s3, r3: seq[WakuMessageHash]
+ discard remote.processPayload(rep2, s3, r3)
+ check s3.len == 1 and s3[0] == altH
+ check r3.len == 1 and r3[0] == baseH
+
+ discard local.insert(SyncID(time: mismT, hash: altH))
+ discard remote.insert(SyncID(time: mismT, hash: baseH))
+
+ var s4, r4: seq[WakuMessageHash]
+ let p3 = RangesData(
+ cluster: 0,
+ shards: @[0],
+ ranges: @[(sub, RangeType.Fingerprint)],
+ fingerprints: @[remote.computeFingerprint(sub)],
+ itemSets: @[],
+ )
+ let rep3 = local.processPayload(p3, s4, r4)
+ check rep3.ranges.len == 0
+ check s4.len == 0 and r4.len == 0
+
+ test "test 2 ranges includes 1 skip":
+ const N = 120
+ const pivot = 60
+
+ let local = SeqStorage.new(@[])
+ let remote = SeqStorage.new(@[])
+
+ var diffHash: WakuMessageHash
+ for i in 0 ..< N:
+ let ts = 1000 + i
+ let h = toDigest("msg" & $i)
+ discard local.insert(SyncID(time: ts, hash: h))
+ var hr: WakuMessageHash
+ if i >= pivot:
+ diffHash = toDigest("msg" & $i & "_x")
+ hr = diffHash
+ else:
+ hr = h
+
+ discard remote.insert(SyncID(time: ts, hash: hr))
+
+ var z: WakuMessageHash
+ let sliceA = SyncID(time: 1000, hash: z) .. SyncID(time: 1059, hash: z)
+ let sliceB = SyncID(time: 1060, hash: z) .. SyncID(time: 1119, hash: z)
+
+ var s, r: seq[WakuMessageHash]
+ let payload = RangesData(
+ cluster: 0,
+ shards: @[0],
+ ranges: @[(sliceA, RangeType.Fingerprint), (sliceB, RangeType.Fingerprint)],
+ fingerprints:
+ @[remote.computeFingerprint(sliceA), remote.computeFingerprint(sliceB)],
+ itemSets: @[],
+ )
+ let reply = local.processPayload(payload, s, r)
+
+ check reply.ranges.len == 2
+ check reply.ranges[0][1] == RangeType.Skip
+ check reply.ranges[1][1] == RangeType.ItemSet
+ check reply.itemSets.len == 1
+ check not reply.itemSets[0].elements.anyIt(it.hash == diffHash)
+
+ test "custom threshold (50) → eight ItemSets first round":
+ const N = 300
+ const idx = 123
+
+ let local = SeqStorage.new(capacity = N, threshold = 50, partitions = 8)
+ let remote = SeqStorage.new(capacity = N, threshold = 50, partitions = 8)
+
+ var baseH, altH: WakuMessageHash
+ for i in 0 ..< N:
+ let ts = 1000 + i
+ let h = toDigest("msg" & $i)
+ discard local.insert(SyncID(time: ts, hash: h))
+ var hr = h
+ if i == idx:
+ baseH = h
+ altH = toDigest("msg" & $i & "_x")
+ hr = altH
+ discard remote.insert(SyncID(time: ts, hash: hr))
+
+ var z: WakuMessageHash
+ let slice = SyncID(time: 1000, hash: z) .. SyncID(time: 1000 + N - 1, hash: z)
+
+ var toS, toR: seq[WakuMessageHash]
+ let p = RangesData(
+ cluster: 0,
+ shards: @[0],
+ ranges: @[(slice, RangeType.Fingerprint)],
+ fingerprints: @[remote.computeFingerprint(slice)],
+ itemSets: @[],
+ )
+ let reply = local.processPayload(p, toS, toR)
+
+ check reply.ranges.len == 8
+ check reply.ranges.allIt(it[1] == RangeType.ItemSet)
+ check reply.itemSets.len == 8
+
+ let mismT = 1000 + idx
+ var hit = 0
+ for ist in reply.itemSets:
+ if ist.elements.anyIt(it.time == mismT and it.hash == baseH):
+ inc hit
+ check hit == 1
+
+ test "test N=80K,3FP,2IS,SKIP":
+ const N = 80_000
+ const bad = N - 10
+
+ let local = SeqStorage.new(@[])
+ let remote = SeqStorage.new(@[])
+
+ var baseH, altH: WakuMessageHash
+ for i in 0 ..< N:
+ let ts = 1000 + i
+ let h = toDigest("msg" & $i)
+ discard local.insert(SyncID(time: ts, hash: h))
+
+ let hr =
+ if i == bad:
+ baseH = h
+ altH = toDigest("msg" & $i & "_x")
+ altH
+ else:
+ h
+ discard remote.insert(SyncID(time: ts, hash: hr))
+
+ var slice =
+ SyncID(time: 1000, hash: EmptyFingerprint) ..
+ SyncID(time: 1000 + N - 1, hash: FullFingerprint)
+
+ proc fpReply(s: Slice[SyncID], sendQ, recvQ: var seq[WakuMessageHash]): RangesData =
+ local.processPayload(
+ RangesData(
+ cluster: 0,
+ shards: @[0],
+ ranges: @[(s, RangeType.Fingerprint)],
+ fingerprints: @[remote.computeFingerprint(s)],
+ itemSets: @[],
+ ),
+ sendQ,
+ recvQ,
+ )
+
+ var tmpS, tmpR: seq[WakuMessageHash]
+
+ for r in 1 .. 3:
+ let rep = fpReply(slice, tmpS, tmpR)
+ echo "R{r} len={rep.ranges.len} expecting 8 FP"
+ check rep.ranges.len == 8
+ check rep.ranges.allIt(it[1] == RangeType.Fingerprint)
+ for (sl, _) in rep.ranges:
+ if local.computeFingerprint(sl) != remote.computeFingerprint(sl):
+ slice = sl
+ break
+
+ let rep4 = fpReply(slice, tmpS, tmpR)
+ echo "R4 len={rep4.ranges.len} expecting 8 IS"
+ check rep4.ranges.len == 8
+ check rep4.ranges.allIt(it[1] == RangeType.ItemSet)
+ for (sl, _) in rep4.ranges:
+ if sl.a.time <= 1000 + bad and sl.b.time >= 1000 + bad:
+ slice = sl
+ break
+
+ var send5, recv5: seq[WakuMessageHash]
+ let rep5 = fpReply(slice, send5, recv5)
+ echo "R5 len={rep5.ranges.len} expecting 1 IS"
+ check rep5.ranges.len == 1
+ check rep5.ranges[0][1] == RangeType.ItemSet
+
+ var qSend, qRecv: seq[WakuMessageHash]
+ discard remote.processPayload(rep5, qSend, qRecv)
+ echo "queue send={qSend.len} recv={qRecv.len}"
+ check qSend.len == 1 and qSend[0] == altH
+ check qRecv.len == 1 and qRecv[0] == baseH
+
+ discard local.insert(SyncID(time: slice.a.time, hash: altH))
+ discard remote.insert(SyncID(time: slice.a.time, hash: baseH))
+
+ var send6, recv6: seq[WakuMessageHash]
+ let rep6 = fpReply(slice, send6, recv6)
+ echo "R6 len={rep6.ranges.len} expecting 0"
+ check rep6.ranges.len == 0
+ check send6.len == 0 and recv6.len == 0
From b1dc83ec03676893db2675a1fe86b52af8bcb249 Mon Sep 17 00:00:00 2001
From: AYAHASSAN287 <49167455+AYAHASSAN287@users.noreply.github.com>
Date: Wed, 18 Jun 2025 12:44:46 +0300
Subject: [PATCH 25/47] test: Add comprehensive reconciliation unit-tests for
Waku Store Sync (#3388)
* Revert "Revert "Add finger print tests""
This reverts commit 36066311f91da31ca69fef3fa327d5e7fda7e50c.
* Adding waku sync tests
* Adding test "reconciliation produces subranges when fingerprints differ"
* Modifing the range split test
* Add more checks to range split tests
* Adding more range split tests
* Make the test file ready for review
* delete fingerprint file
* Fix review points
* Update tests/waku_store_sync/test_range_split.nim
Co-authored-by: Ivan FB <128452529+Ivansete-status@users.noreply.github.com>
* revert change in noise utils file
* Apply linters
* revert to master
* Fix linters
* Update tests/waku_store_sync/test_range_split.nim
Co-authored-by: Simon-Pierre Vivier
---------
Co-authored-by: Ivan FB <128452529+Ivansete-status@users.noreply.github.com>
Co-authored-by: Simon-Pierre Vivier
---
tests/waku_store_sync/test_range_split.nim | 242 +++++++++++++++++++++
1 file changed, 242 insertions(+)
create mode 100644 tests/waku_store_sync/test_range_split.nim
diff --git a/tests/waku_store_sync/test_range_split.nim b/tests/waku_store_sync/test_range_split.nim
new file mode 100644
index 000000000..cc09bdb9d
--- /dev/null
+++ b/tests/waku_store_sync/test_range_split.nim
@@ -0,0 +1,242 @@
+import unittest, nimcrypto, std/sequtils, results
+import ../../waku/waku_store_sync/[reconciliation, common]
+import ../../waku/waku_store_sync/storage/seq_storage
+import ../../waku/waku_core/message/digest
+
+proc toDigest(s: string): WakuMessageHash =
+ let d = nimcrypto.keccak256.digest((s & "").toOpenArrayByte(0, (s.len - 1)))
+ var res: WakuMessageHash
+ for i in 0 .. 31:
+ res[i] = d.data[i]
+ return res
+
+proc `..`(a, b: SyncID): Slice[SyncID] =
+ Slice[SyncID](a: a, b: b)
+
+suite "Waku Sync – reconciliation":
+ test "fan-out: eight fingerprint sub-ranges for large slice":
+ const N = 2_048
+ const mismatchI = 70
+
+ let local = SeqStorage.new(@[])
+ let remote = SeqStorage.new(@[])
+
+ var baseHashMismatch: WakuMessageHash
+ var remoteHashMismatch: WakuMessageHash
+
+ for i in 0 ..< N:
+ let ts = 1000 + i
+ let hashLocal = toDigest("msg" & $i)
+ local.insert(SyncID(time: ts, hash: hashLocal)).isOkOr:
+ assert false, "failed to insert hash: " & $error
+
+ var hashRemote = hashLocal
+ if i == mismatchI:
+ baseHashMismatch = hashLocal
+ remoteHashMismatch = toDigest("msg" & $i & "_x")
+ hashRemote = remoteHashMismatch
+ remote.insert(SyncID(time: ts, hash: hashRemote)).isOkOr:
+ assert false, "failed to insert hash: " & $error
+
+ var z: WakuMessageHash
+ let whole = SyncID(time: 1000, hash: z) .. SyncID(time: 1000 + N - 1, hash: z)
+
+ check local.computeFingerprint(whole) != remote.computeFingerprint(whole)
+
+ let remoteFp = remote.computeFingerprint(whole)
+ let payload = RangesData(
+ cluster: 0,
+ shards: @[0],
+ ranges: @[(whole, RangeType.Fingerprint)],
+ fingerprints: @[remoteFp],
+ itemSets: @[],
+ )
+
+ var toSend, toRecv: seq[WakuMessageHash]
+ let reply = local.processPayload(payload, toSend, toRecv)
+
+ check reply.ranges.len == 8
+ check reply.ranges.allIt(it[1] == RangeType.Fingerprint)
+ check reply.itemSets.len == 0
+ check reply.fingerprints.len == 8
+
+ let mismTime = 1000 + mismatchI
+ var covered = false
+ for (slc, _) in reply.ranges:
+ if mismTime >= slc.a.time and mismTime <= slc.b.time:
+ covered = true
+ break
+ check covered
+
+ check toSend.len == 0
+ check toRecv.len == 0
+
+ test "splits mismatched fingerprint into two sub-ranges then item-set":
+ const threshold = 4
+ const partitions = 2
+
+ let local = SeqStorage.new(@[], threshold = threshold, partitions = partitions)
+ let remote = SeqStorage.new(@[], threshold = threshold, partitions = partitions)
+
+ var mismatchHash: WakuMessageHash
+ for i in 0 ..< 8:
+ let t = 1000 + i
+ let baseHash = toDigest("msg" & $i)
+
+ var localHash = baseHash
+ var remoteHash = baseHash
+
+ if i == 3:
+ mismatchHash = toDigest("msg" & $i & "_x")
+ localHash = mismatchHash
+
+ discard local.insert (SyncID(time: t, hash: localHash))
+ discard remote.insert(SyncID(time: t, hash: remoteHash))
+
+ var zeroHash: WakuMessageHash
+ let wholeRange =
+ SyncID(time: 1000, hash: zeroHash) .. SyncID(time: 1007, hash: zeroHash)
+
+ var toSend, toRecv: seq[WakuMessageHash]
+
+ let payload = RangesData(
+ cluster: 0,
+ shards: @[0],
+ ranges: @[(wholeRange, RangeType.Fingerprint)],
+ fingerprints: @[remote.computeFingerprint(wholeRange)],
+ itemSets: @[],
+ )
+
+ let reply = local.processPayload(payload, toSend, toRecv)
+
+ check reply.ranges.len == partitions
+ check reply.itemSets.len == partitions
+
+ check reply.itemSets.anyIt(
+ it.elements.anyIt(it.hash == mismatchHash and it.time == 1003)
+ )
+
+ test "second round when N =2048 & local ":
+ const N = 2_048
+ const mismatchI = 70
+
+ let local = SeqStorage.new(@[])
+ let remote = SeqStorage.new(@[])
+
+ var baseHashMismatch, remoteHashMismatch: WakuMessageHash
+
+ for i in 0 ..< N:
+ let ts = 1000 + i
+ let hashLocal = toDigest("msg" & $i)
+ local.insert(SyncID(time: ts, hash: hashLocal)).isOkOr:
+ assert false, "failed to insert hash: " & $error
+
+ var hashRemote = hashLocal
+ if i == mismatchI:
+ baseHashMismatch = hashLocal
+ remoteHashMismatch = toDigest("msg" & $i & "_x")
+ hashRemote = remoteHashMismatch
+ remote.insert(SyncID(time: ts, hash: hashRemote)).isOkOr:
+ assert false, "failed to insert hash: " & $error
+
+ var zero: WakuMessageHash
+ let sliceWhole =
+ SyncID(time: 1000, hash: zero) .. SyncID(time: 1000 + N - 1, hash: zero)
+ check local.computeFingerprint(sliceWhole) != remote.computeFingerprint(sliceWhole)
+
+ let payload1 = RangesData(
+ cluster: 0,
+ shards: @[0],
+ ranges: @[(sliceWhole, RangeType.Fingerprint)],
+ fingerprints: @[remote.computeFingerprint(sliceWhole)],
+ itemSets: @[],
+ )
+
+ var toSend, toRecv: seq[WakuMessageHash]
+ let reply1 = local.processPayload(payload1, toSend, toRecv)
+
+ check reply1.ranges.len == 8
+ check reply1.ranges.allIt(it[1] == RangeType.Fingerprint)
+
+ let mismTime = 1000 + mismatchI
+ var subSlice: Slice[SyncID]
+ for (sl, _) in reply1.ranges:
+ if mismTime >= sl.a.time and mismTime <= sl.b.time:
+ subSlice = sl
+ break
+ check subSlice.a.time != 0
+
+ let payload2 = RangesData(
+ cluster: 0,
+ shards: @[0],
+ ranges: @[(subSlice, RangeType.Fingerprint)],
+ fingerprints: @[remote.computeFingerprint(subSlice)],
+ itemSets: @[],
+ )
+
+ var toSend2, toRecv2: seq[WakuMessageHash]
+ let reply2 = local.processPayload(payload2, toSend2, toRecv2)
+
+ check reply2.ranges.len == 8
+ check reply2.ranges.allIt(it[1] == RangeType.ItemSet)
+ check reply2.itemSets.len == 8
+
+ var matchCount = 0
+ for iset in reply2.itemSets:
+ if iset.elements.anyIt(it.time == mismTime and it.hash == baseHashMismatch):
+ inc matchCount
+ check not iset.elements.anyIt(it.hash == remoteHashMismatch)
+ check matchCount == 1
+
+ check toSend2.len == 0
+ check toRecv2.len == 0
+
+ test "second-round payload remote":
+ let local = SeqStorage.new(@[])
+ let remote = SeqStorage.new(@[])
+
+ var baseHash: WakuMessageHash
+ var alteredHash: WakuMessageHash
+
+ for i in 0 ..< 8:
+ let ts = 1000 + i
+ let hashLocal = toDigest("msg" & $i)
+ local.insert(SyncID(time: ts, hash: hashLocal)).isOkOr:
+ assert false, "failed to insert hash: " & $error
+
+ var hashRemote = hashLocal
+ if i == 3:
+ baseHash = hashLocal
+ alteredHash = toDigest("msg" & $i & "_x")
+ hashRemote = alteredHash
+
+ remote.insert(SyncID(time: ts, hash: hashRemote)).isOkOr:
+ assert false, "failed to insert hash: " & $error
+
+ var zero: WakuMessageHash
+ let slice = SyncID(time: 1000, hash: zero) .. SyncID(time: 1007, hash: zero)
+
+ check local.computeFingerprint(slice) != remote.computeFingerprint(slice)
+
+ var toSend1, toRecv1: seq[WakuMessageHash]
+ let pay1 = RangesData(
+ cluster: 0,
+ shards: @[0],
+ ranges: @[(slice, RangeType.Fingerprint)],
+ fingerprints: @[remote.computeFingerprint(slice)],
+ itemSets: @[],
+ )
+ let rep1 = local.processPayload(pay1, toSend1, toRecv1)
+
+ check rep1.ranges.len == 1
+ check rep1.ranges[0][1] == RangeType.ItemSet
+ check toSend1.len == 0
+ check toRecv1.len == 0
+
+ var toSend2, toRecv2: seq[WakuMessageHash]
+ discard remote.processPayload(rep1, toSend2, toRecv2)
+
+ check toSend2.len == 1
+ check toSend2[0] == alteredHash
+ check toRecv2.len == 1
+ check toRecv2[0] == baseHash
From 49b12e6cf3b4f2e5066f132824cb3b94816ce5e0 Mon Sep 17 00:00:00 2001
From: Simon-Pierre Vivier
Date: Wed, 18 Jun 2025 15:53:13 -0400
Subject: [PATCH 26/47] remove echo from tests (#3459)
---
tests/waku_store_sync/test_protocol.nim | 1 -
tests/waku_store_sync/test_state_transition.nim | 5 -----
2 files changed, 6 deletions(-)
diff --git a/tests/waku_store_sync/test_protocol.nim b/tests/waku_store_sync/test_protocol.nim
index ecb50250b..d0f6b81ee 100644
--- a/tests/waku_store_sync/test_protocol.nim
+++ b/tests/waku_store_sync/test_protocol.nim
@@ -472,7 +472,6 @@ suite "Waku Sync: reconciliation":
var msg1 = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic)
var msg2 = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic)
msg2.payload[0] = msg2.payload[0] xor 0x01
- echo msg2
var h1 = computeMessageHash(DefaultPubsubTopic, msg1)
var h2 = computeMessageHash(DefaultPubsubTopic, msg2)
diff --git a/tests/waku_store_sync/test_state_transition.nim b/tests/waku_store_sync/test_state_transition.nim
index 08ec9a91e..732a577a9 100644
--- a/tests/waku_store_sync/test_state_transition.nim
+++ b/tests/waku_store_sync/test_state_transition.nim
@@ -210,7 +210,6 @@ suite "Waku Sync – reconciliation":
for r in 1 .. 3:
let rep = fpReply(slice, tmpS, tmpR)
- echo "R{r} len={rep.ranges.len} expecting 8 FP"
check rep.ranges.len == 8
check rep.ranges.allIt(it[1] == RangeType.Fingerprint)
for (sl, _) in rep.ranges:
@@ -219,7 +218,6 @@ suite "Waku Sync – reconciliation":
break
let rep4 = fpReply(slice, tmpS, tmpR)
- echo "R4 len={rep4.ranges.len} expecting 8 IS"
check rep4.ranges.len == 8
check rep4.ranges.allIt(it[1] == RangeType.ItemSet)
for (sl, _) in rep4.ranges:
@@ -229,13 +227,11 @@ suite "Waku Sync – reconciliation":
var send5, recv5: seq[WakuMessageHash]
let rep5 = fpReply(slice, send5, recv5)
- echo "R5 len={rep5.ranges.len} expecting 1 IS"
check rep5.ranges.len == 1
check rep5.ranges[0][1] == RangeType.ItemSet
var qSend, qRecv: seq[WakuMessageHash]
discard remote.processPayload(rep5, qSend, qRecv)
- echo "queue send={qSend.len} recv={qRecv.len}"
check qSend.len == 1 and qSend[0] == altH
check qRecv.len == 1 and qRecv[0] == baseH
@@ -244,6 +240,5 @@ suite "Waku Sync – reconciliation":
var send6, recv6: seq[WakuMessageHash]
let rep6 = fpReply(slice, send6, recv6)
- echo "R6 len={rep6.ranges.len} expecting 0"
check rep6.ranges.len == 0
check send6.len == 0 and recv6.len == 0
From 921d1d81afa850bff7529d1c582e3454a69b2d1b Mon Sep 17 00:00:00 2001
From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com>
Date: Wed, 18 Jun 2025 22:55:18 +0200
Subject: [PATCH 27/47] dnsName servers is not properly set to waku node
(#3457)
---
waku/factory/conf_builder/dns_discovery_conf_builder.nim | 3 +++
waku/factory/external_config.nim | 1 +
2 files changed, 4 insertions(+)
diff --git a/waku/factory/conf_builder/dns_discovery_conf_builder.nim b/waku/factory/conf_builder/dns_discovery_conf_builder.nim
index dbb2c5fd3..34337c9b1 100644
--- a/waku/factory/conf_builder/dns_discovery_conf_builder.nim
+++ b/waku/factory/conf_builder/dns_discovery_conf_builder.nim
@@ -21,6 +21,9 @@ proc withEnabled*(b: var DnsDiscoveryConfBuilder, enabled: bool) =
proc withEnrTreeUrl*(b: var DnsDiscoveryConfBuilder, enrTreeUrl: string) =
b.enrTreeUrl = some(enrTreeUrl)
+proc withNameServers*(b: var DnsDiscoveryConfBuilder, nameServers: seq[IpAddress]) =
+ b.nameServers = nameServers
+
proc build*(b: DnsDiscoveryConfBuilder): Result[Option[DnsDiscoveryConf], string] =
if not b.enabled.get(false):
return ok(none(DnsDiscoveryConf))
diff --git a/waku/factory/external_config.nim b/waku/factory/external_config.nim
index a9e828893..190ce46e7 100644
--- a/waku/factory/external_config.nim
+++ b/waku/factory/external_config.nim
@@ -1005,6 +1005,7 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] =
b.dnsDiscoveryConf.withEnabled(n.dnsDiscovery)
b.dnsDiscoveryConf.withEnrTreeUrl(n.dnsDiscoveryUrl)
+ b.dnsDiscoveryConf.withNameServers(n.dnsAddrsNameServers)
if n.discv5Discovery.isSome():
b.discv5Conf.withEnabled(n.discv5Discovery.get())
From fd5780eae7b252fba3c7fa6ff9324f4feebcf9e2 Mon Sep 17 00:00:00 2001
From: Simon-Pierre Vivier
Date: Thu, 19 Jun 2025 11:35:32 -0400
Subject: [PATCH 28/47] chore: lower waku sync log lvl (#3461)
---
waku/waku_store_sync/reconciliation.nim | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/waku/waku_store_sync/reconciliation.nim b/waku/waku_store_sync/reconciliation.nim
index 01000935b..0601d2c23 100644
--- a/waku/waku_store_sync/reconciliation.nim
+++ b/waku/waku_store_sync/reconciliation.nim
@@ -122,7 +122,7 @@ proc processRequest(
roundTrips.inc()
- debug "sync payload received",
+ trace "sync payload received",
local = self.peerManager.switch.peerInfo.peerId,
remote = conn.peerId,
payload = recvPayload
@@ -141,7 +141,7 @@ proc processRequest(
recvPayload.shards.toPackedSet() == self.shards:
sendPayload = self.storage.processPayload(recvPayload, hashToSend, hashToRecv)
- debug "sync payload processed",
+ trace "sync payload processed",
hash_to_send = hashToSend, hash_to_recv = hashToRecv
sendPayload.cluster = self.cluster
@@ -166,7 +166,7 @@ proc processRequest(
return
err("remote " & $conn.peerId & " connection write error: " & writeRes.error.msg)
- debug "sync payload sent",
+ trace "sync payload sent",
local = self.peerManager.switch.peerInfo.peerId,
remote = conn.peerId,
payload = sendPayload
@@ -217,7 +217,7 @@ proc initiate(
"remote " & $connection.peerId & " connection write error: " & writeRes.error.msg
)
- debug "sync payload sent",
+ trace "sync payload sent",
local = self.peerManager.switch.peerInfo.peerId,
remote = connection.peerId,
payload = initPayload
From d3cf24f7a201b466895151f8789e3c245c9c1517 Mon Sep 17 00:00:00 2001
From: Tanya S <120410716+stubbsta@users.noreply.github.com>
Date: Fri, 20 Jun 2025 11:46:08 +0200
Subject: [PATCH 29/47] feat: Update implementation for new contract abi
(#3390)
* update RLN contract abi functions and procs
* Clean up debugging lines
* Use more descriptive object field names for MembershipInfo
* Fix formatting
* fix group_manager after rebase to use new contract method sig
* Fix linting for group_manager.nim
* Test idcommitment to BE and debug logs
* Improve IdCommitment logging
* Update all keystore credentials to use BE format
* Add workaround for groupmanager web3 eth_call
* Add await to sendEthCallWithChainID
* Add error handling for failed eth_call
* Improve error handling for eth_call workaround
* Revert keystore credentials back to using LE
* Update toRateCommitment proc to use LE instead of BE
* Add IdCommitment to calldata as BE
* feat: Update rln contract deployment and tests (#3408)
* update RLN contract abi functions and procs
* update waku-rlnv2-contract submodule commit to latest
* Add RlnV2 contract deployment using forge scripts
* Clean up output of forge script command, debug logs to trace, warn to error
* Move TestToken deployment to own proc
* first implementation of token minting and approval
* Update rln tests with usermessagelimit new minimum
* Clean up code and error handling
* Rework RLN tests WIP
* Fix RLN test for new contract
* RLN Tests updated
* Fix formatting
* Improve error logs
* Fix error message formatting
* Fix linting
* Add pnpm dependency installation for rln tests
* Update test dependencies in makefile
* Minor updates, error messages etc
* Code cleanup and change some debug logging to trace
* Improve handling of Result return value
* Use absolute path for waku-rlnv2-contract
* Simplify token approval and balance check
* Remove unused Anvil options
* Add additional checks for stopAnvil process
* Fix anvil process call to null
* Add lock to tests for rln_group_manager_onchain
* Debug for forge command
* Verify paths
* Install pnpm as global
* Cleanup anvil running procs
* Add check before installing anvil
* CLean up onchain group_manager
* Add proc to setup environment for contract deployer
* Refactoring and improved error handling
* Fix anvil install directory string
* Fix linting in test_range_split
* Add const for the contract address length
* Add separate checks for why Approval transaction fails
* Update RLN contract address and chainID for TWN
---
Makefile | 11 +-
apps/sonda/docker-compose.yml | 2 +-
apps/sonda/register_rln.sh | 2 +-
scripts/install_anvil.sh | 19 +-
scripts/install_pnpm.sh | 8 +
scripts/install_rln_tests_dependencies.sh | 7 +
.../test_rln_group_manager_onchain.nim | 204 ++++----
tests/waku_rln_relay/utils_onchain.nim | 447 +++++++++++++++---
tests/waku_store_sync/test_range_split.nim | 2 +-
vendor/waku-rlnv2-contract | 2 +-
waku/factory/networks_config.nim | 4 +-
.../group_manager/on_chain/group_manager.nim | 213 +++++++--
12 files changed, 687 insertions(+), 234 deletions(-)
create mode 100755 scripts/install_pnpm.sh
create mode 100755 scripts/install_rln_tests_dependencies.sh
diff --git a/Makefile b/Makefile
index ae57852a5..d11f50bec 100644
--- a/Makefile
+++ b/Makefile
@@ -112,11 +112,8 @@ ifeq (, $(shell which cargo))
curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain stable
endif
-anvil: rustup
-ifeq (, $(shell which anvil 2> /dev/null))
-# Install Anvil if it's not installed
- ./scripts/install_anvil.sh
-endif
+rln-deps: rustup
+ ./scripts/install_rln_tests_dependencies.sh
deps: | deps-common nat-libs waku.nims
@@ -205,8 +202,8 @@ testcommon: | build deps
##########
.PHONY: testwaku wakunode2 testwakunode2 example2 chat2 chat2bridge liteprotocoltester
-# install anvil only for the testwaku target
-testwaku: | build deps anvil librln
+# install rln-deps only for the testwaku target
+testwaku: | build deps rln-deps librln
echo -e $(BUILD_MSG) "build/$@" && \
$(ENV_SCRIPT) nim test -d:os=$(shell uname) $(NIM_PARAMS) waku.nims
diff --git a/apps/sonda/docker-compose.yml b/apps/sonda/docker-compose.yml
index c6235ef32..d6594428e 100644
--- a/apps/sonda/docker-compose.yml
+++ b/apps/sonda/docker-compose.yml
@@ -9,7 +9,7 @@ x-logging: &logging
x-rln-relay-eth-client-address: &rln_relay_eth_client_address ${RLN_RELAY_ETH_CLIENT_ADDRESS:-} # Add your RLN_RELAY_ETH_CLIENT_ADDRESS after the "-"
x-rln-environment: &rln_env
- RLN_RELAY_CONTRACT_ADDRESS: ${RLN_RELAY_CONTRACT_ADDRESS:-0xfe7a9eabcE779a090FD702346Fd0bFAc02ce6Ac8}
+ RLN_RELAY_CONTRACT_ADDRESS: ${RLN_RELAY_CONTRACT_ADDRESS:-0xB9cd878C90E49F797B4431fBF4fb333108CB90e6}
RLN_RELAY_CRED_PATH: ${RLN_RELAY_CRED_PATH:-} # Optional: Add your RLN_RELAY_CRED_PATH after the "-"
RLN_RELAY_CRED_PASSWORD: ${RLN_RELAY_CRED_PASSWORD:-} # Optional: Add your RLN_RELAY_CRED_PASSWORD after the "-"
diff --git a/apps/sonda/register_rln.sh b/apps/sonda/register_rln.sh
index aca1007a8..4fb373b3a 100755
--- a/apps/sonda/register_rln.sh
+++ b/apps/sonda/register_rln.sh
@@ -24,7 +24,7 @@ fi
docker run -v $(pwd)/keystore:/keystore/:Z harbor.status.im/wakuorg/nwaku:v0.30.1 generateRlnKeystore \
--rln-relay-eth-client-address=${RLN_RELAY_ETH_CLIENT_ADDRESS} \
--rln-relay-eth-private-key=${ETH_TESTNET_KEY} \
---rln-relay-eth-contract-address=0xfe7a9eabcE779a090FD702346Fd0bFAc02ce6Ac8 \
+--rln-relay-eth-contract-address=0xB9cd878C90E49F797B4431fBF4fb333108CB90e6 \
--rln-relay-cred-path=/keystore/keystore.json \
--rln-relay-cred-password="${RLN_RELAY_CRED_PASSWORD}" \
--rln-relay-user-message-limit=20 \
diff --git a/scripts/install_anvil.sh b/scripts/install_anvil.sh
index 13d5f8dfd..1bf4bd7b1 100755
--- a/scripts/install_anvil.sh
+++ b/scripts/install_anvil.sh
@@ -2,13 +2,14 @@
# Install Anvil
+if ! command -v anvil &> /dev/null; then
+ BASE_DIR="${XDG_CONFIG_HOME:-$HOME}"
+ FOUNDRY_DIR="${FOUNDRY_DIR:-"$BASE_DIR/.foundry"}"
+ FOUNDRY_BIN_DIR="$FOUNDRY_DIR/bin"
-BASE_DIR="${XDG_CONFIG_HOME:-$HOME}"
-FOUNDRY_DIR="${FOUNDRY_DIR-"$BASE_DIR/.foundry"}"
-FOUNDRY_BIN_DIR="$FOUNDRY_DIR/bin"
-
-curl -L https://foundry.paradigm.xyz | bash
-# Extract the source path from the download result
-echo "foundryup_path: $FOUNDRY_BIN_DIR"
-# run foundryup
-$FOUNDRY_BIN_DIR/foundryup
\ No newline at end of file
+ curl -L https://foundry.paradigm.xyz | bash
+ # Extract the source path from the download result
+ echo "foundryup_path: $FOUNDRY_BIN_DIR"
+ # run foundryup
+ $FOUNDRY_BIN_DIR/foundryup
+fi
\ No newline at end of file
diff --git a/scripts/install_pnpm.sh b/scripts/install_pnpm.sh
new file mode 100755
index 000000000..34ba47b07
--- /dev/null
+++ b/scripts/install_pnpm.sh
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+
+# Install pnpm
+if ! command -v pnpm &> /dev/null; then
+ echo "pnpm is not installed, installing it now..."
+ npm i pnpm --global
+fi
+
diff --git a/scripts/install_rln_tests_dependencies.sh b/scripts/install_rln_tests_dependencies.sh
new file mode 100755
index 000000000..e19e0ef3c
--- /dev/null
+++ b/scripts/install_rln_tests_dependencies.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+
+# Install Anvil
+./scripts/install_anvil.sh
+
+#Install pnpm
+./scripts/install_pnpm.sh
\ No newline at end of file
diff --git a/tests/waku_rln_relay/test_rln_group_manager_onchain.nim b/tests/waku_rln_relay/test_rln_group_manager_onchain.nim
index b19d15030..3de48a738 100644
--- a/tests/waku_rln_relay/test_rln_group_manager_onchain.nim
+++ b/tests/waku_rln_relay/test_rln_group_manager_onchain.nim
@@ -3,7 +3,7 @@
{.push raises: [].}
import
- std/[options, sequtils, deques, random],
+ std/[options, sequtils, deques, random, locks],
results,
stew/byteutils,
testutils/unittests,
@@ -28,58 +28,71 @@ import
../testlib/wakucore,
./utils_onchain
+var testLock: Lock
+initLock(testLock)
+
suite "Onchain group manager":
- # We run Anvil
- let runAnvil {.used.} = runAnvil()
+ setup:
+ # Acquire lock to ensure tests run sequentially
+ acquire(testLock)
- var manager {.threadvar.}: OnchainGroupManager
+ let runAnvil {.used.} = runAnvil()
- asyncSetup:
- manager = await setupOnchainGroupManager()
+ var manager {.threadvar.}: OnchainGroupManager
+ manager = waitFor setupOnchainGroupManager()
- asyncTeardown:
- await manager.stop()
+ teardown:
+ waitFor manager.stop()
+ stopAnvil(runAnvil)
+ # Release lock after test completes
+ release(testLock)
- asyncTest "should initialize successfully":
- (await manager.init()).isOkOr:
+ test "should initialize successfully":
+ (waitFor manager.init()).isOkOr:
raiseAssert $error
check:
manager.ethRpc.isSome()
manager.wakuRlnContract.isSome()
manager.initialized
- manager.rlnRelayMaxMessageLimit == 100
+ manager.rlnRelayMaxMessageLimit == 600
- asyncTest "should error on initialization when chainId does not match":
+ test "should error on initialization when chainId does not match":
manager.chainId = utils_onchain.CHAIN_ID + 1
- (await manager.init()).isErrOr:
+ (waitFor manager.init()).isErrOr:
raiseAssert "Expected error when chainId does not match"
- asyncTest "should initialize when chainId is set to 0":
+ test "should initialize when chainId is set to 0":
manager.chainId = 0x0'u256
-
- (await manager.init()).isOkOr:
+ (waitFor manager.init()).isOkOr:
raiseAssert $error
- asyncTest "should error on initialization when loaded metadata does not match":
- (await manager.init()).isOkOr:
+ test "should error on initialization when loaded metadata does not match":
+ (waitFor manager.init()).isOkOr:
assert false, $error
-
let metadataSetRes = manager.setMetadata()
assert metadataSetRes.isOk(), metadataSetRes.error
let metadataOpt = manager.rlnInstance.getMetadata().valueOr:
assert false, $error
return
-
assert metadataOpt.isSome(), "metadata is not set"
let metadata = metadataOpt.get()
-
- assert metadata.chainId == 1337, "chainId is not equal to 1337"
+ assert metadata.chainId == 1234, "chainId is not equal to 1234"
assert metadata.contractAddress == manager.ethContractAddress,
"contractAddress is not equal to " & manager.ethContractAddress
-
- let differentContractAddress = await uploadRLNContract(manager.ethClientUrls[0])
+ let web3 = manager.ethRpc.get()
+ let accounts = waitFor web3.provider.eth_accounts()
+ web3.defaultAccount = accounts[2]
+ let (privateKey, acc) = createEthAccount(web3)
+ let tokenAddress = (waitFor deployTestToken(privateKey, acc, web3)).valueOr:
+ assert false, "Failed to deploy test token contract: " & $error
+ return
+ let differentContractAddress = (
+ waitFor executeForgeContractDeployScripts(privateKey, acc, web3)
+ ).valueOr:
+ assert false, "Failed to deploy RLN contract: " & $error
+ return
# simulating a change in the contractAddress
let manager2 = OnchainGroupManager(
ethClientUrls: @[EthClient],
@@ -89,52 +102,47 @@ suite "Onchain group manager":
assert false, errStr
,
)
- let e = await manager2.init()
+ let e = waitFor manager2.init()
(e).isErrOr:
assert false, "Expected error when contract address doesn't match"
- asyncTest "should error if contract does not exist":
+ test "should error if contract does not exist":
manager.ethContractAddress = "0x0000000000000000000000000000000000000000"
- var triggeredError = false
- try:
- discard await manager.init()
- except CatchableError:
- triggeredError = true
+ (waitFor manager.init()).isErrOr:
+ raiseAssert "Expected error when contract address doesn't exist"
- check triggeredError
-
- asyncTest "should error when keystore path and password are provided but file doesn't exist":
+ test "should error when keystore path and password are provided but file doesn't exist":
manager.keystorePath = some("/inexistent/file")
manager.keystorePassword = some("password")
- (await manager.init()).isErrOr:
+ (waitFor manager.init()).isErrOr:
raiseAssert "Expected error when keystore file doesn't exist"
- asyncTest "trackRootChanges: start tracking roots":
- (await manager.init()).isOkOr:
+ test "trackRootChanges: start tracking roots":
+ (waitFor manager.init()).isOkOr:
raiseAssert $error
discard manager.trackRootChanges()
- asyncTest "trackRootChanges: should guard against uninitialized state":
+ test "trackRootChanges: should guard against uninitialized state":
try:
discard manager.trackRootChanges()
except CatchableError:
check getCurrentExceptionMsg().len == 38
- asyncTest "trackRootChanges: should sync to the state of the group":
+ test "trackRootChanges: should sync to the state of the group":
let credentials = generateCredentials(manager.rlnInstance)
- (await manager.init()).isOkOr:
+ (waitFor manager.init()).isOkOr:
raiseAssert $error
let merkleRootBefore = manager.fetchMerkleRoot()
try:
- await manager.register(credentials, UserMessageLimit(1))
+ waitFor manager.register(credentials, UserMessageLimit(20))
except Exception, CatchableError:
assert false, "exception raised: " & getCurrentExceptionMsg()
- discard await withTimeout(trackRootChanges(manager), 15.seconds)
+ discard waitFor withTimeout(trackRootChanges(manager), 15.seconds)
let merkleRootAfter = manager.fetchMerkleRoot()
@@ -151,7 +159,7 @@ suite "Onchain group manager":
metadata.validRoots == manager.validRoots.toSeq()
merkleRootBefore != merkleRootAfter
- asyncTest "trackRootChanges: should fetch history correctly":
+ test "trackRootChanges: should fetch history correctly":
# TODO: We can't use `trackRootChanges()` directly in this test because its current implementation
# relies on a busy loop rather than event-based monitoring. As a result, some root changes
# may be missed, leading to inconsistent test results (i.e., it may randomly return true or false).
@@ -159,15 +167,16 @@ suite "Onchain group manager":
# after each registration.
const credentialCount = 6
let credentials = generateCredentials(manager.rlnInstance, credentialCount)
- (await manager.init()).isOkOr:
+ (waitFor manager.init()).isOkOr:
raiseAssert $error
let merkleRootBefore = manager.fetchMerkleRoot()
try:
for i in 0 ..< credentials.len():
- await manager.register(credentials[i], UserMessageLimit(1))
- discard await manager.updateRoots()
+ debug "Registering credential", index = i, credential = credentials[i]
+ waitFor manager.register(credentials[i], UserMessageLimit(20))
+ discard waitFor manager.updateRoots()
except Exception, CatchableError:
assert false, "exception raised: " & getCurrentExceptionMsg()
@@ -177,13 +186,13 @@ suite "Onchain group manager":
merkleRootBefore != merkleRootAfter
manager.validRoots.len() == credentialCount
- asyncTest "register: should guard against uninitialized state":
+ test "register: should guard against uninitialized state":
let dummyCommitment = default(IDCommitment)
try:
- await manager.register(
+ waitFor manager.register(
RateCommitment(
- idCommitment: dummyCommitment, userMessageLimit: UserMessageLimit(1)
+ idCommitment: dummyCommitment, userMessageLimit: UserMessageLimit(20)
)
)
except CatchableError:
@@ -191,18 +200,18 @@ suite "Onchain group manager":
except Exception:
assert false, "exception raised: " & getCurrentExceptionMsg()
- asyncTest "register: should register successfully":
+ test "register: should register successfully":
# TODO :- similar to ```trackRootChanges: should fetch history correctly```
- (await manager.init()).isOkOr:
+ (waitFor manager.init()).isOkOr:
raiseAssert $error
let idCommitment = generateCredentials(manager.rlnInstance).idCommitment
let merkleRootBefore = manager.fetchMerkleRoot()
try:
- await manager.register(
+ waitFor manager.register(
RateCommitment(
- idCommitment: idCommitment, userMessageLimit: UserMessageLimit(1)
+ idCommitment: idCommitment, userMessageLimit: UserMessageLimit(20)
)
)
except Exception, CatchableError:
@@ -215,47 +224,47 @@ suite "Onchain group manager":
merkleRootAfter != merkleRootBefore
manager.latestIndex == 1
- asyncTest "register: callback is called":
+ test "register: callback is called":
let idCredentials = generateCredentials(manager.rlnInstance)
let idCommitment = idCredentials.idCommitment
let fut = newFuture[void]()
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
- let rateCommitment = getRateCommitment(idCredentials, UserMessageLimit(1)).get()
+ let rateCommitment = getRateCommitment(idCredentials, UserMessageLimit(20)).get()
check:
registrations.len == 1
registrations[0].rateCommitment == rateCommitment
registrations[0].index == 0
fut.complete()
- (await manager.init()).isOkOr:
+ (waitFor manager.init()).isOkOr:
raiseAssert $error
manager.onRegister(callback)
try:
- await manager.register(
+ waitFor manager.register(
RateCommitment(
- idCommitment: idCommitment, userMessageLimit: UserMessageLimit(1)
+ idCommitment: idCommitment, userMessageLimit: UserMessageLimit(20)
)
)
except Exception, CatchableError:
assert false, "exception raised: " & getCurrentExceptionMsg()
- await fut
+ waitFor fut
- asyncTest "withdraw: should guard against uninitialized state":
+ test "withdraw: should guard against uninitialized state":
let idSecretHash = generateCredentials(manager.rlnInstance).idSecretHash
try:
- await manager.withdraw(idSecretHash)
+ waitFor manager.withdraw(idSecretHash)
except CatchableError:
assert true
except Exception:
assert false, "exception raised: " & getCurrentExceptionMsg()
- asyncTest "validateRoot: should validate good root":
+ test "validateRoot: should validate good root":
let idCredentials = generateCredentials(manager.rlnInstance)
let idCommitment = idCredentials.idCommitment
@@ -264,27 +273,27 @@ suite "Onchain group manager":
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
if registrations.len == 1 and
registrations[0].rateCommitment ==
- getRateCommitment(idCredentials, UserMessageLimit(1)).get() and
+ getRateCommitment(idCredentials, UserMessageLimit(20)).get() and
registrations[0].index == 0:
manager.idCredentials = some(idCredentials)
fut.complete()
manager.onRegister(callback)
- (await manager.init()).isOkOr:
+ (waitFor manager.init()).isOkOr:
raiseAssert $error
try:
- await manager.register(idCredentials, UserMessageLimit(1))
+ waitFor manager.register(idCredentials, UserMessageLimit(20))
except Exception, CatchableError:
assert false, "exception raised: " & getCurrentExceptionMsg()
- await fut
+ waitFor fut
- let rootUpdated = await manager.updateRoots()
+ let rootUpdated = waitFor manager.updateRoots()
if rootUpdated:
- let proofResult = await manager.fetchMerkleProofElements()
+ let proofResult = waitFor manager.fetchMerkleProofElements()
if proofResult.isErr():
error "Failed to fetch Merkle proof", error = proofResult.error
manager.merkleProofCache = proofResult.get()
@@ -306,14 +315,14 @@ suite "Onchain group manager":
check:
validated
- asyncTest "validateRoot: should reject bad root":
+ test "validateRoot: should reject bad root":
let idCredentials = generateCredentials(manager.rlnInstance)
let idCommitment = idCredentials.idCommitment
- (await manager.init()).isOkOr:
+ (waitFor manager.init()).isOkOr:
raiseAssert $error
- manager.userMessageLimit = some(UserMessageLimit(1))
+ manager.userMessageLimit = some(UserMessageLimit(20))
manager.membershipIndex = some(MembershipIndex(0))
manager.idCredentials = some(idCredentials)
@@ -339,9 +348,9 @@ suite "Onchain group manager":
check:
validated == false
- asyncTest "verifyProof: should verify valid proof":
+ test "verifyProof: should verify valid proof":
let credentials = generateCredentials(manager.rlnInstance)
- (await manager.init()).isOkOr:
+ (waitFor manager.init()).isOkOr:
raiseAssert $error
let fut = newFuture[void]()
@@ -349,7 +358,7 @@ suite "Onchain group manager":
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
if registrations.len == 1 and
registrations[0].rateCommitment ==
- getRateCommitment(credentials, UserMessageLimit(1)).get() and
+ getRateCommitment(credentials, UserMessageLimit(20)).get() and
registrations[0].index == 0:
manager.idCredentials = some(credentials)
fut.complete()
@@ -357,15 +366,15 @@ suite "Onchain group manager":
manager.onRegister(callback)
try:
- await manager.register(credentials, UserMessageLimit(1))
+ waitFor manager.register(credentials, UserMessageLimit(20))
except Exception, CatchableError:
assert false, "exception raised: " & getCurrentExceptionMsg()
- await fut
+ waitFor fut
- let rootUpdated = await manager.updateRoots()
+ let rootUpdated = waitFor manager.updateRoots()
if rootUpdated:
- let proofResult = await manager.fetchMerkleProofElements()
+ let proofResult = waitFor manager.fetchMerkleProofElements()
if proofResult.isErr():
error "Failed to fetch Merkle proof", error = proofResult.error
manager.merkleProofCache = proofResult.get()
@@ -388,21 +397,21 @@ suite "Onchain group manager":
check:
verified
- asyncTest "verifyProof: should reject invalid proof":
- (await manager.init()).isOkOr:
+ test "verifyProof: should reject invalid proof":
+ (waitFor manager.init()).isOkOr:
raiseAssert $error
let idCredential = generateCredentials(manager.rlnInstance)
try:
- await manager.register(idCredential, UserMessageLimit(1))
+ waitFor manager.register(idCredential, UserMessageLimit(20))
except Exception, CatchableError:
assert false,
"exception raised when calling startGroupSync: " & getCurrentExceptionMsg()
let messageBytes = "Hello".toBytes()
- let rootUpdated = await manager.updateRoots()
+ let rootUpdated = waitFor manager.updateRoots()
manager.merkleProofCache = newSeq[byte](640)
for i in 0 ..< 640:
@@ -427,10 +436,10 @@ suite "Onchain group manager":
check:
verified == false
- asyncTest "root queue should be updated correctly":
+ test "root queue should be updated correctly":
const credentialCount = 12
let credentials = generateCredentials(manager.rlnInstance, credentialCount)
- (await manager.init()).isOkOr:
+ (waitFor manager.init()).isOkOr:
raiseAssert $error
type TestBackfillFuts = array[0 .. credentialCount - 1, Future[void]]
@@ -445,7 +454,7 @@ suite "Onchain group manager":
proc callback(registrations: seq[Membership]): Future[void] {.async.} =
if registrations.len == 1 and
registrations[0].rateCommitment ==
- getRateCommitment(credentials[futureIndex], UserMessageLimit(1)).get() and
+ getRateCommitment(credentials[futureIndex], UserMessageLimit(20)).get() and
registrations[0].index == MembershipIndex(futureIndex):
futs[futureIndex].complete()
futureIndex += 1
@@ -456,47 +465,40 @@ suite "Onchain group manager":
manager.onRegister(generateCallback(futures, credentials))
for i in 0 ..< credentials.len():
- await manager.register(credentials[i], UserMessageLimit(1))
- discard await manager.updateRoots()
+ waitFor manager.register(credentials[i], UserMessageLimit(20))
+ discard waitFor manager.updateRoots()
except Exception, CatchableError:
assert false, "exception raised: " & getCurrentExceptionMsg()
- await allFutures(futures)
+ waitFor allFutures(futures)
check:
manager.validRoots.len() == credentialCount
- asyncTest "isReady should return false if ethRpc is none":
- (await manager.init()).isOkOr:
+ test "isReady should return false if ethRpc is none":
+ (waitFor manager.init()).isOkOr:
raiseAssert $error
manager.ethRpc = none(Web3)
var isReady = true
try:
- isReady = await manager.isReady()
+ isReady = waitFor manager.isReady()
except Exception, CatchableError:
assert false, "exception raised: " & getCurrentExceptionMsg()
check:
isReady == false
- asyncTest "isReady should return true if ethRpc is ready":
- (await manager.init()).isOkOr:
+ test "isReady should return true if ethRpc is ready":
+ (waitFor manager.init()).isOkOr:
raiseAssert $error
var isReady = false
try:
- isReady = await manager.isReady()
+ isReady = waitFor manager.isReady()
except Exception, CatchableError:
assert false, "exception raised: " & getCurrentExceptionMsg()
check:
isReady == true
-
- ################################
- ## Terminating/removing Anvil
- ################################
-
- # We stop Anvil daemon
- stopAnvil(runAnvil)
diff --git a/tests/waku_rln_relay/utils_onchain.nim b/tests/waku_rln_relay/utils_onchain.nim
index 0c7fcce26..9066b0292 100644
--- a/tests/waku_rln_relay/utils_onchain.nim
+++ b/tests/waku_rln_relay/utils_onchain.nim
@@ -30,7 +30,7 @@ import
../testlib/common,
./utils
-const CHAIN_ID* = 1337'u256
+const CHAIN_ID* = 1234'u256
template skip0xPrefix(hexStr: string): int =
## Returns the index of the first meaningful char in `hexStr` by skipping
@@ -61,64 +61,347 @@ proc generateCredentials*(rlnInstance: ptr RLN, n: int): seq[IdentityCredential]
credentials.add(generateCredentials(rlnInstance))
return credentials
-# a util function used for testing purposes
-# it deploys membership contract on Anvil (or any Eth client available on EthClient address)
-# must be edited if used for a different contract than membership contract
-#
-proc uploadRLNContract*(ethClientAddress: string): Future[Address] {.async.} =
- let web3 = await newWeb3(ethClientAddress)
- debug "web3 connected to", ethClientAddress
+proc getContractAddressFromDeployScriptOutput(output: string): Result[string, string] =
+ const searchStr = "Return ==\n0: address "
+ const addressLength = 42 # Length of an Ethereum address in hex format
+ let idx = output.find(searchStr)
+ if idx >= 0:
+ let startPos = idx + searchStr.len
+ let endPos = output.find('\n', startPos)
+ if (endPos - startPos) >= addressLength:
+ let address = output[startPos ..< endPos]
+ return ok(address)
+ return err("Unable to find contract address in deploy script output")
- # fetch the list of registered accounts
- let accounts = await web3.provider.eth_accounts()
- web3.defaultAccount = accounts[1]
- let add = web3.defaultAccount
- debug "contract deployer account address ", add
+proc getForgePath(): string =
+ var forgePath = ""
+ if existsEnv("XDG_CONFIG_HOME"):
+ forgePath = joinPath(forgePath, os.getEnv("XDG_CONFIG_HOME", ""))
+ else:
+ forgePath = joinPath(forgePath, os.getEnv("HOME", ""))
+ forgePath = joinPath(forgePath, ".foundry/bin/forge")
+ return $forgePath
- let balance =
- await web3.provider.eth_getBalance(web3.defaultAccount, blockId("latest"))
- debug "Initial account balance: ", balance
+contract(ERC20Token):
+ proc allowance(owner: Address, spender: Address): UInt256 {.view.}
+ proc balanceOf(account: Address): UInt256 {.view.}
- # deploy poseidon hasher bytecode
- let poseidonT3Receipt = await web3.deployContract(PoseidonT3)
- let poseidonT3Address = poseidonT3Receipt.contractAddress.get()
- let poseidonAddressStripped = strip0xPrefix($poseidonT3Address)
+proc getTokenBalance(
+ web3: Web3, tokenAddress: Address, account: Address
+): Future[UInt256] {.async.} =
+ let token = web3.contractSender(ERC20Token, tokenAddress)
+ return await token.balanceOf(account).call()
- # deploy lazy imt bytecode
- let lazyImtReceipt = await web3.deployContract(
- LazyIMT.replace("__$PoseidonT3$__", poseidonAddressStripped)
- )
- let lazyImtAddress = lazyImtReceipt.contractAddress.get()
- let lazyImtAddressStripped = strip0xPrefix($lazyImtAddress)
+proc ethToWei(eth: UInt256): UInt256 =
+ eth * 1000000000000000000.u256
- # deploy waku rlnv2 contract
- let wakuRlnContractReceipt = await web3.deployContract(
- WakuRlnV2Contract.replace("__$PoseidonT3$__", poseidonAddressStripped).replace(
- "__$LazyIMT$__", lazyImtAddressStripped
+proc sendMintCall(
+ web3: Web3,
+ accountFrom: Address,
+ tokenAddress: Address,
+ recipientAddress: Address,
+ amountTokens: UInt256,
+ recipientBalanceBeforeExpectedTokens: Option[UInt256] = none(UInt256),
+): Future[TxHash] {.async.} =
+ let doBalanceAssert = recipientBalanceBeforeExpectedTokens.isSome()
+
+ if doBalanceAssert:
+ let balanceBeforeMint = await getTokenBalance(web3, tokenAddress, recipientAddress)
+ let balanceBeforeExpectedTokens = recipientBalanceBeforeExpectedTokens.get()
+ assert balanceBeforeMint == balanceBeforeExpectedTokens,
+ fmt"Balance is {balanceBeforeMint} before minting but expected {balanceBeforeExpectedTokens}"
+
+ # Create mint transaction
+ # Method ID for mint(address,uint256) is 0x40c10f19 which is part of the openzeppelin ERC20 standard
+ # The method ID for a deployed test token can be viewed here https://sepolia.lineascan.build/address/0x185A0015aC462a0aECb81beCc0497b649a64B9ea#writeContract
+ let mintSelector = "0x40c10f19"
+ let addressHex = recipientAddress.toHex()
+ # Pad the address and amount to 32 bytes each
+ let paddedAddress = addressHex.align(64, '0')
+
+ let amountHex = amountTokens.toHex()
+ let amountWithout0x =
+ if amountHex.toLower().startsWith("0x"):
+ amountHex[2 .. ^1]
+ else:
+ amountHex
+ let paddedAmount = amountWithout0x.align(64, '0')
+ let mintCallData = mintSelector & paddedAddress & paddedAmount
+ let gasPrice = int(await web3.provider.eth_gasPrice())
+
+ # Create the transaction
+ var tx: TransactionArgs
+ tx.`from` = Opt.some(accountFrom)
+ tx.to = Opt.some(tokenAddress)
+ tx.value = Opt.some(0.u256) # No ETH is sent for token operations
+ tx.gasPrice = Opt.some(Quantity(gasPrice))
+ tx.data = Opt.some(byteutils.hexToSeqByte(mintCallData))
+
+ trace "Sending mint call"
+ let txHash = await web3.send(tx)
+
+ let balanceOfSelector = "0x70a08231"
+ let balanceCallData = balanceOfSelector & paddedAddress
+
+ # Wait a bit for transaction to be mined
+ await sleepAsync(500.milliseconds)
+
+ if doBalanceAssert:
+ let balanceAfterMint = await getTokenBalance(web3, tokenAddress, recipientAddress)
+ let balanceAfterExpectedTokens =
+ recipientBalanceBeforeExpectedTokens.get() + amountTokens
+ assert balanceAfterMint == balanceAfterExpectedTokens,
+ fmt"Balance is {balanceAfterMint} after transfer but expected {balanceAfterExpectedTokens}"
+
+ return txHash
+
+# Check how many tokens a spender (the RLN contract) is allowed to spend on behalf of the owner (account which wishes to register a membership)
+proc checkTokenAllowance(
+ web3: Web3, tokenAddress: Address, owner: Address, spender: Address
+): Future[UInt256] {.async.} =
+ let token = web3.contractSender(ERC20Token, tokenAddress)
+ let allowance = await token.allowance(owner, spender).call()
+ trace "Current allowance", owner = owner, spender = spender, allowance = allowance
+ return allowance
+
+proc setupContractDeployment(
+ forgePath: string, submodulePath: string
+): Result[void, string] =
+ trace "Contract deployer paths", forgePath = forgePath, submodulePath = submodulePath
+ # Build the Foundry project
+ try:
+ let (forgeCleanOutput, forgeCleanExitCode) =
+ execCmdEx(fmt"""cd {submodulePath} && {forgePath} clean""")
+ trace "Executed forge clean command", output = forgeCleanOutput
+ if forgeCleanExitCode != 0:
+ return err("forge clean command failed")
+
+ let (forgeInstallOutput, forgeInstallExitCode) =
+ execCmdEx(fmt"""cd {submodulePath} && {forgePath} install""")
+ trace "Executed forge install command", output = forgeInstallOutput
+ if forgeInstallExitCode != 0:
+ return err("forge install command failed")
+
+ let (pnpmInstallOutput, pnpmInstallExitCode) =
+ execCmdEx(fmt"""cd {submodulePath} && pnpm install""")
+ trace "Executed pnpm install command", output = pnpmInstallOutput
+ if pnpmInstallExitCode != 0:
+ return err("pnpm install command failed" & pnpmInstallOutput)
+
+ let (forgeBuildOutput, forgeBuildExitCode) =
+ execCmdEx(fmt"""cd {submodulePath} && {forgePath} build""")
+ trace "Executed forge build command", output = forgeBuildOutput
+ if forgeBuildExitCode != 0:
+ return err("forge build command failed")
+
+ # Set the environment variable API keys to anything for local testnet deployment
+ putEnv("API_KEY_CARDONA", "123")
+ putEnv("API_KEY_LINEASCAN", "123")
+ putEnv("API_KEY_ETHERSCAN", "123")
+ except OSError, IOError:
+ return err("Command execution failed: " & getCurrentExceptionMsg())
+ return ok()
+
+proc deployTestToken*(
+ pk: keys.PrivateKey, acc: Address, web3: Web3
+): Future[Result[Address, string]] {.async.} =
+ ## Executes a Foundry forge script that deploys the a token contract (ERC-20) used for testing. This is a prerequisite to enable the contract deployment and this token contract address needs to be minted and approved for the accounts that need to register memberships with the contract
+ ## submodulePath: path to the submodule containing contract deploy scripts
+
+ # All RLN related tests should be run from the root directory of the project
+ let submodulePath = absolutePath("./vendor/waku-rlnv2-contract")
+
+ # Verify submodule path exists
+ if not dirExists(submodulePath):
+ error "Submodule path does not exist", submodulePath = submodulePath
+ return err("Submodule path does not exist: " & submodulePath)
+
+ let forgePath = getForgePath()
+
+ setupContractDeployment(forgePath, submodulePath).isOkOr:
+ error "Failed to setup contract deployment", error = $error
+ return err("Failed to setup contract deployment: " & $error)
+
+ # Deploy TestToken contract
+ let forgeCmdTestToken =
+ fmt"""cd {submodulePath} && {forgePath} script test/TestToken.sol --broadcast -vvv --rpc-url http://localhost:8540 --tc TestTokenFactory --private-key {pk} && rm -rf broadcast/*/*/run-1*.json && rm -rf cache/*/*/run-1*.json"""
+ let (outputDeployTestToken, exitCodeDeployTestToken) = execCmdEx(forgeCmdTestToken)
+ trace "Executed forge command to deploy TestToken contract",
+ output = outputDeployTestToken
+ if exitCodeDeployTestToken != 0:
+ return error("Forge command to deploy TestToken contract failed")
+
+ # Parse the command output to find contract address
+ let testTokenAddress = getContractAddressFromDeployScriptOutput(outputDeployTestToken).valueOr:
+ error "Failed to get TestToken contract address from deploy script output",
+ error = $error
+ return err(
+ "Failed to get TestToken contract address from deploy script output: " & $error
)
- )
- let wakuRlnContractAddress = wakuRlnContractReceipt.contractAddress.get()
- let wakuRlnAddressStripped = strip0xPrefix($wakuRlnContractAddress)
+ debug "Address of the TestToken contract", testTokenAddress
- debug "Address of the deployed rlnv2 contract: ", wakuRlnContractAddress
+ let testTokenAddressBytes = hexToByteArray[20](testTokenAddress)
+ let testTokenAddressAddress = Address(testTokenAddressBytes)
+ putEnv("TOKEN_ADDRESS", testTokenAddressAddress.toHex())
- # need to send concat: impl & init_bytes
- let contractInput =
- byteutils.toHex(encode(wakuRlnContractAddress)) & Erc1967ProxyContractInput
- debug "contractInput", contractInput
- let proxyReceipt =
- await web3.deployContract(Erc1967Proxy, contractInput = contractInput)
+ return ok(testTokenAddressAddress)
- debug "proxy receipt", contractAddress = proxyReceipt.contractAddress.get()
- let proxyAddress = proxyReceipt.contractAddress.get()
+# Sends an ERC20 token approval call to allow a spender to spend a certain amount of tokens on behalf of the owner
+proc approveTokenAllowanceAndVerify*(
+ web3: Web3,
+ accountFrom: Address,
+ privateKey: keys.PrivateKey,
+ tokenAddress: Address,
+ spender: Address,
+ amountWei: UInt256,
+ expectedAllowanceBefore: Option[UInt256] = none(UInt256),
+): Future[Result[TxHash, string]] {.async.} =
+ var allowanceBefore: UInt256
+ if expectedAllowanceBefore.isSome():
+ allowanceBefore =
+ await checkTokenAllowance(web3, tokenAddress, accountFrom, spender)
+ let expected = expectedAllowanceBefore.get()
+ if allowanceBefore != expected:
+ return
+ err(fmt"Allowance is {allowanceBefore} before approval but expected {expected}")
- let newBalance = await web3.provider.eth_getBalance(web3.defaultAccount, "latest")
- debug "Account balance after the contract deployment: ", newBalance
+ # Temporarily set the private key
+ let oldPrivateKey = web3.privateKey
+ web3.privateKey = Opt.some(privateKey)
+ web3.lastKnownNonce = Opt.none(Quantity)
+
+ try:
+ # ERC20 approve function signature: approve(address spender, uint256 amount)
+ # Method ID for approve(address,uint256) is 0x095ea7b3
+ const APPROVE_SELECTOR = "0x095ea7b3"
+ let addressHex = spender.toHex().align(64, '0')
+ let amountHex = amountWei.toHex().align(64, '0')
+ let approveCallData = APPROVE_SELECTOR & addressHex & amountHex
+
+ let gasPrice = await web3.provider.eth_gasPrice()
+
+ var tx: TransactionArgs
+ tx.`from` = Opt.some(accountFrom)
+ tx.to = Opt.some(tokenAddress)
+ tx.value = Opt.some(0.u256)
+ tx.gasPrice = Opt.some(gasPrice)
+ tx.gas = Opt.some(Quantity(100000))
+ tx.data = Opt.some(byteutils.hexToSeqByte(approveCallData))
+ tx.chainId = Opt.some(CHAIN_ID)
+
+ trace "Sending approve call", tx = tx
+ let txHash = await web3.send(tx)
+ let receipt = await web3.getMinedTransactionReceipt(txHash)
+
+ if receipt.status.isNone():
+ return err("Approval transaction failed receipt is none")
+ if receipt.status.get() != 1.Quantity:
+ return err("Approval transaction failed status quantity not 1")
+
+ # Single verification check after mining (no extra sleep needed)
+ let allowanceAfter =
+ await checkTokenAllowance(web3, tokenAddress, accountFrom, spender)
+ let expectedAfter =
+ if expectedAllowanceBefore.isSome():
+ expectedAllowanceBefore.get() + amountWei
+ else:
+ amountWei
+
+ if allowanceAfter < expectedAfter:
+ return err(
+ fmt"Allowance is {allowanceAfter} after approval but expected at least {expectedAfter}"
+ )
+
+ return ok(txHash)
+ except CatchableError as e:
+ return err(fmt"Failed to send approve transaction: {e.msg}")
+ finally:
+ # Restore the old private key
+ web3.privateKey = oldPrivateKey
+
+proc executeForgeContractDeployScripts*(
+ privateKey: keys.PrivateKey, acc: Address, web3: Web3
+): Future[Result[Address, string]] {.async, gcsafe.} =
+ ## Executes a set of foundry forge scripts required to deploy the RLN contract and returns the deployed proxy contract address
+ ## submodulePath: path to the submodule containing contract deploy scripts
+
+ # All RLN related tests should be run from the root directory of the project
+ let submodulePath = "./vendor/waku-rlnv2-contract"
+
+ # Verify submodule path exists
+ if not dirExists(submodulePath):
+ error "Submodule path does not exist", submodulePath = submodulePath
+ return err("Submodule path does not exist: " & submodulePath)
+
+ let forgePath = getForgePath()
+ debug "Forge path", forgePath
+
+ # Verify forge executable exists
+ if not fileExists(forgePath):
+ error "Forge executable not found", forgePath = forgePath
+ return err("Forge executable not found: " & forgePath)
+
+ trace "contract deployer account details", account = acc, privateKey = privateKey
+ let setupContractEnv = setupContractDeployment(forgePath, submodulePath)
+ if setupContractEnv.isErr():
+ error "Failed to setup contract deployment"
+ return err("Failed to setup contract deployment")
+
+ # Deploy LinearPriceCalculator contract
+ let forgeCmdPriceCalculator =
+ fmt"""cd {submodulePath} && {forgePath} script script/Deploy.s.sol --broadcast -vvvv --rpc-url http://localhost:8540 --tc DeployPriceCalculator --private-key {privateKey} && rm -rf broadcast/*/*/run-1*.json && rm -rf cache/*/*/run-1*.json"""
+ let (outputDeployPriceCalculator, exitCodeDeployPriceCalculator) =
+ execCmdEx(forgeCmdPriceCalculator)
+ trace "Executed forge command to deploy LinearPriceCalculator contract",
+ output = outputDeployPriceCalculator
+ if exitCodeDeployPriceCalculator != 0:
+ return error("Forge command to deploy LinearPriceCalculator contract failed")
+
+ # Parse the output to find contract address
+ let priceCalculatorAddressRes =
+ getContractAddressFromDeployScriptOutput(outputDeployPriceCalculator)
+ if priceCalculatorAddressRes.isErr():
+ error "Failed to get LinearPriceCalculator contract address from deploy script output"
+ let priceCalculatorAddress = priceCalculatorAddressRes.get()
+ debug "Address of the LinearPriceCalculator contract", priceCalculatorAddress
+ putEnv("PRICE_CALCULATOR_ADDRESS", priceCalculatorAddress)
+
+ let forgeCmdWakuRln =
+ fmt"""cd {submodulePath} && {forgePath} script script/Deploy.s.sol --broadcast -vvvv --rpc-url http://localhost:8540 --tc DeployWakuRlnV2 --private-key {privateKey} && rm -rf broadcast/*/*/run-1*.json && rm -rf cache/*/*/run-1*.json"""
+ let (outputDeployWakuRln, exitCodeDeployWakuRln) = execCmdEx(forgeCmdWakuRln)
+ trace "Executed forge command to deploy WakuRlnV2 contract",
+ output = outputDeployWakuRln
+ if exitCodeDeployWakuRln != 0:
+ error "Forge command to deploy WakuRlnV2 contract failed",
+ output = outputDeployWakuRln
+
+ # Parse the output to find contract address
+ let wakuRlnV2AddressRes =
+ getContractAddressFromDeployScriptOutput(outputDeployWakuRln)
+ if wakuRlnV2AddressRes.isErr():
+ error "Failed to get WakuRlnV2 contract address from deploy script output"
+ ##TODO: raise exception here?
+ let wakuRlnV2Address = wakuRlnV2AddressRes.get()
+ debug "Address of the WakuRlnV2 contract", wakuRlnV2Address
+ putEnv("WAKURLNV2_ADDRESS", wakuRlnV2Address)
+
+ # Deploy Proxy contract
+ let forgeCmdProxy =
+ fmt"""cd {submodulePath} && {forgePath} script script/Deploy.s.sol --broadcast -vvvv --rpc-url http://localhost:8540 --tc DeployProxy --private-key {privateKey} && rm -rf broadcast/*/*/run-1*.json && rm -rf cache/*/*/run-1*.json"""
+ let (outputDeployProxy, exitCodeDeployProxy) = execCmdEx(forgeCmdProxy)
+ trace "Executed forge command to deploy proxy contract", output = outputDeployProxy
+ if exitCodeDeployProxy != 0:
+ error "Forge command to deploy Proxy failed", error = outputDeployProxy
+ return err("Forge command to deploy Proxy failed")
+
+ let proxyAddress = getContractAddressFromDeployScriptOutput(outputDeployProxy)
+ let proxyAddressBytes = hexToByteArray[20](proxyAddress.get())
+ let proxyAddressAddress = Address(proxyAddressBytes)
+
+ info "Address of the Proxy contract", proxyAddressAddress
await web3.close()
- debug "disconnected from ", ethClientAddress
-
- return proxyAddress
+ return ok(proxyAddressAddress)
proc sendEthTransfer*(
web3: Web3,
@@ -133,7 +416,7 @@ proc sendEthTransfer*(
let balanceBeforeWei = await web3.provider.eth_getBalance(accountTo, "latest")
let balanceBeforeExpectedWei = accountToBalanceBeforeExpectedWei.get()
assert balanceBeforeWei == balanceBeforeExpectedWei,
- fmt"Balance is {balanceBeforeWei} but expected {balanceBeforeExpectedWei}"
+ fmt"Balance is {balanceBeforeWei} before transfer but expected {balanceBeforeExpectedWei}"
let gasPrice = int(await web3.provider.eth_gasPrice())
@@ -146,17 +429,17 @@ proc sendEthTransfer*(
# TODO: handle the error if sending fails
let txHash = await web3.send(tx)
+ # Wait a bit for transaction to be mined
+ await sleepAsync(200.milliseconds)
+
if doBalanceAssert:
let balanceAfterWei = await web3.provider.eth_getBalance(accountTo, "latest")
let balanceAfterExpectedWei = accountToBalanceBeforeExpectedWei.get() + amountWei
assert balanceAfterWei == balanceAfterExpectedWei,
- fmt"Balance is {balanceAfterWei} but expected {balanceAfterExpectedWei}"
+ fmt"Balance is {balanceAfterWei} after transfer but expected {balanceAfterExpectedWei}"
return txHash
-proc ethToWei(eth: UInt256): UInt256 =
- eth * 1000000000000000000.u256
-
proc createEthAccount*(
ethAmount: UInt256 = 1000.u256
): Future[(keys.PrivateKey, Address)] {.async.} =
@@ -198,7 +481,7 @@ proc getAnvilPath*(): string =
return $anvilPath
# Runs Anvil daemon
-proc runAnvil*(port: int = 8540, chainId: string = "1337"): Process =
+proc runAnvil*(port: int = 8540, chainId: string = "1234"): Process =
# Passed options are
# --port Port to listen on.
# --gas-limit Sets the block gas limit in WEI.
@@ -212,13 +495,13 @@ proc runAnvil*(port: int = 8540, chainId: string = "1337"): Process =
anvilPath,
args = [
"--port",
- "8540",
+ $port,
"--gas-limit",
"300000000000000",
"--balance",
"1000000000",
"--chain-id",
- $CHAIN_ID,
+ $chainId,
],
options = {poUsePath},
)
@@ -242,14 +525,26 @@ proc runAnvil*(port: int = 8540, chainId: string = "1337"): Process =
# Stops Anvil daemon
proc stopAnvil*(runAnvil: Process) {.used.} =
+ if runAnvil.isNil:
+ debug "stopAnvil called with nil Process"
+ return
+
let anvilPID = runAnvil.processID
- # We wait the daemon to exit
+ debug "Stopping Anvil daemon", anvilPID = anvilPID
+
try:
- # We terminate Anvil daemon by sending a SIGTERM signal to the runAnvil PID to trigger RPC server termination and clean-up
- kill(runAnvil)
- debug "Sent SIGTERM to Anvil", anvilPID = anvilPID
- except:
- error "Anvil daemon termination failed: ", err = getCurrentExceptionMsg()
+ # Send termination signals
+ when not defined(windows):
+ discard execCmdEx(fmt"kill -TERM {anvilPID}")
+ discard execCmdEx(fmt"kill -9 {anvilPID}")
+ else:
+ discard execCmdEx(fmt"taskkill /F /PID {anvilPID}")
+
+ # Close Process object to release resources
+ close(runAnvil)
+ debug "Anvil daemon stopped", anvilPID = anvilPID
+ except Exception as e:
+ debug "Error stopping Anvil daemon", anvilPID = anvilPID, error = e.msg
proc setupOnchainGroupManager*(
ethClientUrl: string = EthClient, amountEth: UInt256 = 10.u256
@@ -261,12 +556,10 @@ proc setupOnchainGroupManager*(
let rlnInstance = rlnInstanceRes.get()
- let contractAddress = await uploadRLNContract(ethClientUrl)
# connect to the eth client
let web3 = await newWeb3(ethClientUrl)
-
let accounts = await web3.provider.eth_accounts()
- web3.defaultAccount = accounts[0]
+ web3.defaultAccount = accounts[1]
let (privateKey, acc) = createEthAccount(web3)
@@ -276,6 +569,32 @@ proc setupOnchainGroupManager*(
web3, web3.defaultAccount, acc, ethToWei(1000.u256), some(0.u256)
)
+ let testTokenAddress = (await deployTestToken(privateKey, acc, web3)).valueOr:
+ assert false, "Failed to deploy test token contract: " & $error
+ return
+
+ # mint the token from the generated account
+ discard await sendMintCall(
+ web3, web3.defaultAccount, testTokenAddress, acc, ethToWei(1000.u256), some(0.u256)
+ )
+
+ let contractAddress = (await executeForgeContractDeployScripts(privateKey, acc, web3)).valueOr:
+ assert false, "Failed to deploy RLN contract: " & $error
+ return
+
+ # If the generated account wishes to register a membership, it needs to approve the contract to spend its tokens
+ let tokenApprovalResult = await approveTokenAllowanceAndVerify(
+ web3,
+ acc, # owner
+ privateKey,
+ testTokenAddress, # ERC20 token address
+ contractAddress, # spender - the proxy contract that will spend the tokens
+ ethToWei(200.u256),
+ some(0.u256), # expected allowance before approval
+ )
+
+ assert tokenApprovalResult.isOk, tokenApprovalResult.error()
+
let manager = OnchainGroupManager(
ethClientUrls: @[ethClientUrl],
ethContractAddress: $contractAddress,
diff --git a/tests/waku_store_sync/test_range_split.nim b/tests/waku_store_sync/test_range_split.nim
index cc09bdb9d..50ebc39fd 100644
--- a/tests/waku_store_sync/test_range_split.nim
+++ b/tests/waku_store_sync/test_range_split.nim
@@ -209,7 +209,7 @@ suite "Waku Sync – reconciliation":
baseHash = hashLocal
alteredHash = toDigest("msg" & $i & "_x")
hashRemote = alteredHash
-
+
remote.insert(SyncID(time: ts, hash: hashRemote)).isOkOr:
assert false, "failed to insert hash: " & $error
diff --git a/vendor/waku-rlnv2-contract b/vendor/waku-rlnv2-contract
index a576a8949..b7e9a9b1b 160000
--- a/vendor/waku-rlnv2-contract
+++ b/vendor/waku-rlnv2-contract
@@ -1 +1 @@
-Subproject commit a576a8949ca20e310f2fbb4ec0bd05a57ac3045f
+Subproject commit b7e9a9b1bc69256a2a3076c1f099b50ce84e7eff
diff --git a/waku/factory/networks_config.nim b/waku/factory/networks_config.nim
index 619a1a7c5..9d1da0ace 100644
--- a/waku/factory/networks_config.nim
+++ b/waku/factory/networks_config.nim
@@ -30,12 +30,12 @@ type ClusterConf* = object
# Cluster configuration corresponding to The Waku Network. Note that it
# overrides existing cli configuration
proc TheWakuNetworkConf*(T: type ClusterConf): ClusterConf =
- const RelayChainId = 11155111'u256
+ const RelayChainId = 59141'u256
return ClusterConf(
maxMessageSize: "150KiB",
clusterId: 1,
rlnRelay: true,
- rlnRelayEthContractAddress: "0xfe7a9eabcE779a090FD702346Fd0bFAc02ce6Ac8",
+ rlnRelayEthContractAddress: "0xB9cd878C90E49F797B4431fBF4fb333108CB90e6",
rlnRelayDynamic: true,
rlnRelayChainId: RelayChainId,
rlnEpochSizeSec: 600,
diff --git a/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim b/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim
index 54290a77a..4f2fb5228 100644
--- a/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim
+++ b/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim
@@ -30,23 +30,29 @@ logScope:
# using the when predicate does not work within the contract macro, hence need to dupe
contract(WakuRlnContract):
# this serves as an entrypoint into the rln membership set
- proc register(idCommitment: UInt256, userMessageLimit: UInt32)
+ proc register(
+ idCommitment: UInt256, userMessageLimit: UInt32, idCommitmentsToErase: seq[UInt256]
+ )
+
# Initializes the implementation contract (only used in unit tests)
proc initialize(maxMessageLimit: UInt256)
- # this event is raised when a new member is registered
- proc MemberRegistered(rateCommitment: UInt256, index: UInt32) {.event.}
+ # this event is emitted when a new member is registered
+ proc MembershipRegistered(
+ idCommitment: UInt256, membershipRateLimit: UInt256, index: UInt32
+ ) {.event.}
+
# this function denotes existence of a given user
- proc memberExists(idCommitment: UInt256): UInt256 {.view.}
+ proc isInMembershipSet(idCommitment: Uint256): bool {.view.}
# this constant describes the next index of a new member
- proc commitmentIndex(): UInt256 {.view.}
+ proc nextFreeIndex(): UInt256 {.view.}
# this constant describes the block number this contract was deployed on
proc deployedBlockNumber(): UInt256 {.view.}
# this constant describes max message limit of rln contract
- proc MAX_MESSAGE_LIMIT(): UInt256 {.view.}
- # this function returns the merkleProof for a given index
- # proc merkleProofElements(index: UInt40): seq[byte] {.view.}
- # this function returns the merkle root
- proc root(): UInt256 {.view.}
+ proc maxMembershipRateLimit(): UInt256 {.view.}
+ # this function returns the merkleProof for a given index
+ # proc getMerkleProof(index: EthereumUInt40): seq[array[32, byte]] {.view.}
+ # this function returns the Merkle root
+ proc root(): Uint256 {.view.}
type
WakuRlnContractWithSender = Sender[WakuRlnContract]
@@ -67,11 +73,7 @@ type
proc setMetadata*(
g: OnchainGroupManager, lastProcessedBlock = none(BlockNumber)
): GroupManagerResult[void] =
- let normalizedBlock =
- if lastProcessedBlock.isSome():
- lastProcessedBlock.get()
- else:
- g.latestProcessedBlock
+ let normalizedBlock = lastProcessedBlock.get(g.latestProcessedBlock)
try:
let metadataSetRes = g.rlnInstance.setMetadata(
RlnMetadata(
@@ -87,14 +89,68 @@ proc setMetadata*(
return err("failed to persist rln metadata: " & getCurrentExceptionMsg())
return ok()
+proc sendEthCallWithChainId(
+ ethRpc: Web3,
+ functionSignature: string,
+ fromAddress: Address,
+ toAddress: Address,
+ chainId: UInt256,
+): Future[Result[UInt256, string]] {.async.} =
+ ## Workaround for web3 chainId=null issue on some networks (e.g., linea-sepolia)
+ ## Makes contract calls with explicit chainId for view functions with no parameters
+ let functionHash =
+ keccak256.digest(functionSignature.toOpenArrayByte(0, functionSignature.len - 1))
+ let functionSelector = functionHash.data[0 .. 3]
+ let dataSignature = "0x" & functionSelector.mapIt(it.toHex(2)).join("")
+
+ var tx: TransactionArgs
+ tx.`from` = Opt.some(fromAddress)
+ tx.to = Opt.some(toAddress)
+ tx.value = Opt.some(0.u256)
+ tx.data = Opt.some(byteutils.hexToSeqByte(dataSignature))
+ tx.chainId = Opt.some(chainId)
+
+ let resultBytes = await ethRpc.provider.eth_call(tx, "latest")
+ if resultBytes.len == 0:
+ return err("No result returned for function call: " & functionSignature)
+ return ok(UInt256.fromBytesBE(resultBytes))
+
+proc sendEthCallWithParams(
+ ethRpc: Web3,
+ functionSignature: string,
+ params: seq[byte],
+ fromAddress: Address,
+ toAddress: Address,
+ chainId: UInt256,
+): Future[Result[seq[byte], string]] {.async.} =
+ ## Workaround for web3 chainId=null issue with parameterized contract calls
+ let functionHash =
+ keccak256.digest(functionSignature.toOpenArrayByte(0, functionSignature.len - 1))
+ let functionSelector = functionHash.data[0 .. 3]
+ let callData = functionSelector & params
+
+ var tx: TransactionArgs
+ tx.`from` = Opt.some(fromAddress)
+ tx.to = Opt.some(toAddress)
+ tx.value = Opt.some(0.u256)
+ tx.data = Opt.some(callData)
+ tx.chainId = Opt.some(chainId)
+
+ let resultBytes = await ethRpc.provider.eth_call(tx, "latest")
+ return ok(resultBytes)
+
proc fetchMerkleProofElements*(
g: OnchainGroupManager
): Future[Result[seq[byte], string]] {.async.} =
try:
+ # let merkleRootInvocation = g.wakuRlnContract.get().root()
+ # let merkleRoot = await merkleRootInvocation.call()
+ # The above code is not working with the latest web3 version due to chainId being null (specifically on linea-sepolia)
+ # TODO: find better solution than this custom sendEthCallWithChainId call
let membershipIndex = g.membershipIndex.get()
let index40 = stuint(membershipIndex, 40)
- let methodSig = "merkleProofElements(uint40)"
+ let methodSig = "getMerkleProof(uint40)"
let methodIdDigest = keccak.keccak256.digest(methodSig)
let methodId = methodIdDigest.data[0 .. 3]
@@ -111,6 +167,7 @@ proc fetchMerkleProofElements*(
var tx: TransactionArgs
tx.to = Opt.some(fromHex(Address, g.ethContractAddress))
tx.data = Opt.some(callData)
+ tx.chainId = Opt.some(g.chainId) # Explicitly set the chain ID
let responseBytes = await g.ethRpc.get().provider.eth_call(tx, "latest")
@@ -123,8 +180,17 @@ proc fetchMerkleRoot*(
g: OnchainGroupManager
): Future[Result[UInt256, string]] {.async.} =
try:
- let merkleRootInvocation = g.wakuRlnContract.get().root()
- let merkleRoot = await merkleRootInvocation.call()
+ let merkleRoot = (
+ await sendEthCallWithChainId(
+ ethRpc = g.ethRpc.get(),
+ functionSignature = "root()",
+ fromAddress = g.ethRpc.get().defaultAccount,
+ toAddress = fromHex(Address, g.ethContractAddress),
+ chainId = g.chainId,
+ )
+ ).valueOr:
+ error "Failed to fetch Merkle root", error = $error
+ return err("Failed to fetch merkle root: " & $error)
return ok(merkleRoot)
except CatchableError:
error "Failed to fetch Merkle root", error = getCurrentExceptionMsg()
@@ -151,6 +217,7 @@ proc updateRoots*(g: OnchainGroupManager): Future[bool] {.async.} =
return false
let merkleRoot = UInt256ToField(rootRes.get())
+
if g.validRoots.len == 0:
g.validRoots.addLast(merkleRoot)
return true
@@ -183,8 +250,26 @@ proc trackRootChanges*(g: OnchainGroupManager) {.async: (raises: [CatchableError
error "Failed to fetch Merkle proof", error = proofResult.error
g.merkleProofCache = proofResult.get()
- # also need update registerd membership
- let memberCount = cast[int64](await wakuRlnContract.commitmentIndex().call())
+ # also need to update registered membership
+ # g.rlnRelayMaxMessageLimit =
+ # cast[uint64](await wakuRlnContract.nextFreeIndex().call())
+ # The above code is not working with the latest web3 version due to chainId being null (specifically on linea-sepolia)
+ # TODO: find better solution than this custom sendEthCallWithChainId call
+ let nextFreeIndex = await sendEthCallWithChainId(
+ ethRpc = ethRpc,
+ functionSignature = "nextFreeIndex()",
+ fromAddress = ethRpc.defaultAccount,
+ toAddress = fromHex(Address, g.ethContractAddress),
+ chainId = g.chainId,
+ )
+
+ if nextFreeIndex.isErr():
+ error "Failed to fetch next free index", error = nextFreeIndex.error
+ raise newException(
+ CatchableError, "Failed to fetch next free index: " & nextFreeIndex.error
+ )
+
+ let memberCount = cast[int64](nextFreeIndex.get())
waku_rln_number_registered_memberships.set(float64(memberCount))
await sleepAsync(rpcDelay)
@@ -219,15 +304,19 @@ method register*(
var gasPrice: int
g.retryWrapper(gasPrice, "Failed to get gas price"):
int(await ethRpc.provider.eth_gasPrice()) * 2
+ let idCommitmentHex = identityCredential.idCommitment.inHex()
+ debug "identityCredential idCommitmentHex", idCommitment = idCommitmentHex
let idCommitment = identityCredential.idCommitment.toUInt256()
-
+ let idCommitmentsToErase: seq[UInt256] = @[]
debug "registering the member",
- idCommitment = idCommitment, userMessageLimit = userMessageLimit
+ idCommitment = idCommitment,
+ userMessageLimit = userMessageLimit,
+ idCommitmentsToErase = idCommitmentsToErase
var txHash: TxHash
g.retryWrapper(txHash, "Failed to register the member"):
- await wakuRlnContract.register(idCommitment, userMessageLimit.stuint(32)).send(
- gasPrice = gasPrice
- )
+ await wakuRlnContract
+ .register(idCommitment, userMessageLimit.stuint(32), idCommitmentsToErase)
+ .send(gasPrice = gasPrice)
# wait for the transaction to be mined
var tsReceipt: ReceiptObject
@@ -240,27 +329,29 @@ method register*(
debug "ts receipt", receipt = tsReceipt[]
if tsReceipt.status.isNone():
- raise newException(ValueError, "register: transaction failed status is None")
+ raise newException(ValueError, "Transaction failed: status is None")
if tsReceipt.status.get() != 1.Quantity:
raise newException(
- ValueError, "register: transaction failed status is: " & $tsReceipt.status.get()
+ ValueError, "Transaction failed with status: " & $tsReceipt.status.get()
)
- let firstTopic = tsReceipt.logs[0].topics[0]
- # the hash of the signature of MemberRegistered(uint256,uint32) event is equal to the following hex value
- if firstTopic !=
- cast[FixedBytes[32]](keccak.keccak256.digest("MemberRegistered(uint256,uint32)").data):
+ ## Extract MembershipRegistered event from transaction logs (third event)
+ let thirdTopic = tsReceipt.logs[2].topics[0]
+ debug "third topic", thirdTopic = thirdTopic
+ if thirdTopic !=
+ cast[FixedBytes[32]](keccak.keccak256.digest(
+ "MembershipRegistered(uint256,uint256,uint32)"
+ ).data):
raise newException(ValueError, "register: unexpected event signature")
- # the arguments of the raised event i.e., MemberRegistered are encoded inside the data field
- # data = rateCommitment encoded as 256 bits || index encoded as 32 bits
- let arguments = tsReceipt.logs[0].data
+ ## Parse MembershipRegistered event data: rateCommitment(256) || membershipRateLimit(256) || index(32)
+ let arguments = tsReceipt.logs[2].data
debug "tx log data", arguments = arguments
let
- # In TX log data, uints are encoded in big endian
- membershipIndex = UInt256.fromBytesBE(arguments[32 ..^ 1])
+ ## Extract membership index from transaction log data (big endian)
+ membershipIndex = UInt256.fromBytesBE(arguments[64 .. 95])
- debug "parsed membershipIndex", membershipIndex
+ trace "parsed membershipIndex", membershipIndex
g.userMessageLimit = some(userMessageLimit)
g.membershipIndex = some(membershipIndex.toMembershipIndex())
g.idCredentials = some(identityCredential)
@@ -376,7 +467,7 @@ method generateProof*(
var proofValue = cast[ptr array[320, byte]](output_witness_buffer.`ptr`)
let proofBytes: array[320, byte] = proofValue[]
- ## parse the proof as [ proof<128> | root<32> | external_nullifier<32> | share_x<32> | share_y<32> | nullifier<32> ]
+ ## Parse the proof as [ proof<128> | root<32> | external_nullifier<32> | share_x<32> | share_y<32> | nullifier<32> ]
let
proofOffset = 128
rootOffset = proofOffset + 32
@@ -418,9 +509,7 @@ method generateProof*(
return ok(output)
method verifyProof*(
- g: OnchainGroupManager, # verifier context
- input: seq[byte], # raw message data (signal)
- proof: RateLimitProof, # proof received from the peer
+ g: OnchainGroupManager, input: seq[byte], proof: RateLimitProof
): GroupManagerResult[bool] {.gcsafe, raises: [].} =
## -- Verifies an RLN rate-limit proof against the set of valid Merkle roots --
@@ -543,11 +632,31 @@ method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.}
g.membershipIndex = some(keystoreCred.treeIndex)
g.userMessageLimit = some(keystoreCred.userMessageLimit)
# now we check on the contract if the commitment actually has a membership
+ let idCommitmentBytes = keystoreCred.identityCredential.idCommitment
+ let idCommitmentUInt256 = keystoreCred.identityCredential.idCommitment.toUInt256()
+ let idCommitmentHex = idCommitmentBytes.inHex()
+ debug "Keystore idCommitment in bytes", idCommitmentBytes = idCommitmentBytes
+ debug "Keystore idCommitment in UInt256 ", idCommitmentUInt256 = idCommitmentUInt256
+ debug "Keystore idCommitment in hex ", idCommitmentHex = idCommitmentHex
+ let idCommitment = idCommitmentUInt256
try:
- let membershipExists = await wakuRlnContract
- .memberExists(keystoreCred.identityCredential.idCommitment.toUInt256())
- .call()
- if membershipExists == 0:
+ let commitmentBytes = keystoreCred.identityCredential.idCommitment
+ let params = commitmentBytes.reversed()
+ let resultBytes = await sendEthCallWithParams(
+ ethRpc = g.ethRpc.get(),
+ functionSignature = "isInMembershipSet(uint256)",
+ params = params,
+ fromAddress = ethRpc.defaultAccount,
+ toAddress = contractAddress,
+ chainId = g.chainId,
+ )
+ if resultBytes.isErr():
+ return err("Failed to check membership: " & resultBytes.error)
+ let responseBytes = resultBytes.get()
+ let membershipExists = responseBytes.len == 32 and responseBytes[^1] == 1'u8
+
+ debug "membershipExists", membershipExists = membershipExists
+ if membershipExists == false:
return err("the commitment does not have a membership")
except CatchableError:
return err("failed to check if the commitment has a membership")
@@ -564,8 +673,18 @@ method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.}
if metadata.contractAddress != g.ethContractAddress.toLower():
return err("persisted data: contract address mismatch")
- g.rlnRelayMaxMessageLimit =
- cast[uint64](await wakuRlnContract.MAX_MESSAGE_LIMIT().call())
+ let maxMembershipRateLimit = (
+ await sendEthCallWithChainId(
+ ethRpc = ethRpc,
+ functionSignature = "maxMembershipRateLimit()",
+ fromAddress = ethRpc.defaultAccount,
+ toAddress = contractAddress,
+ chainId = g.chainId,
+ )
+ ).valueOr:
+ return err("Failed to fetch max membership rate limit: " & $error)
+
+ g.rlnRelayMaxMessageLimit = cast[uint64](maxMembershipRateLimit)
proc onDisconnect() {.async.} =
error "Ethereum client disconnected"
From 5f5e0893e040e8c93fd9a4fdc3d9c58145a548a5 Mon Sep 17 00:00:00 2001
From: AYAHASSAN287 <49167455+AYAHASSAN287@users.noreply.github.com>
Date: Tue, 24 Jun 2025 15:54:38 +0300
Subject: [PATCH 30/47] fix: failed sync test (#3464)
* Increase time window to avoid messages overlapping in the failed test
---
tests/waku_store_sync/test_protocol.nim | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/waku_store_sync/test_protocol.nim b/tests/waku_store_sync/test_protocol.nim
index d0f6b81ee..c606934cf 100644
--- a/tests/waku_store_sync/test_protocol.nim
+++ b/tests/waku_store_sync/test_protocol.nim
@@ -410,7 +410,7 @@ suite "Waku Sync: reconciliation":
diffInWin = 20
diffOutWin = 20
stepOutNs = 100_000_000'u64
- outOffsetNs = 2_000_000_000'u64 # for 20 mesg they sent 2 seconds earlier
+ outOffsetNs = 2_300_000_000'u64 # for 20 mesg they sent 2 seconds earlier
randomize()
From d7a3a85db9dbea35f865c623b6d22f1e7af78076 Mon Sep 17 00:00:00 2001
From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com>
Date: Tue, 24 Jun 2025 23:20:08 +0200
Subject: [PATCH 31/47] chore: Libwaku watchdog that can potentially raise a
WakuNotResponding event if Waku is blocked (#3466)
* refactor add waku not responding event to libwaku
Co-authored-by: NagyZoltanPeter <113987313+NagyZoltanPeter@users.noreply.github.com>
---
.../events/json_waku_not_responding_event.nim | 9 +
library/libwaku.h | 2 +
library/libwaku.nim | 43 +---
.../requests/debug_node_request.nim | 3 +
library/waku_thread/waku_thread.nim | 227 ++++++++++++------
5 files changed, 175 insertions(+), 109 deletions(-)
create mode 100644 library/events/json_waku_not_responding_event.nim
diff --git a/library/events/json_waku_not_responding_event.nim b/library/events/json_waku_not_responding_event.nim
new file mode 100644
index 000000000..1e1d5fcc5
--- /dev/null
+++ b/library/events/json_waku_not_responding_event.nim
@@ -0,0 +1,9 @@
+import system, std/json, ./json_base_event
+
+type JsonWakuNotRespondingEvent* = ref object of JsonEvent
+
+proc new*(T: type JsonWakuNotRespondingEvent): T =
+ return JsonWakuNotRespondingEvent(eventType: "waku_not_responding")
+
+method `$`*(event: JsonWakuNotRespondingEvent): string =
+ $(%*event)
diff --git a/library/libwaku.h b/library/libwaku.h
index 525fec69a..b5d6c9bab 100644
--- a/library/libwaku.h
+++ b/library/libwaku.h
@@ -45,6 +45,8 @@ int waku_version(void* ctx,
WakuCallBack callback,
void* userData);
+// Sets a callback that will be invoked whenever an event occurs.
+// It is crucial that the passed callback is fast, non-blocking and potentially thread-safe.
void waku_set_event_callback(void* ctx,
WakuCallBack callback,
void* userData);
diff --git a/library/libwaku.nim b/library/libwaku.nim
index 3e4431411..bc1614af8 100644
--- a/library/libwaku.nim
+++ b/library/libwaku.nim
@@ -15,8 +15,7 @@ import
waku/waku_core/topics/pubsub_topic,
waku/waku_core/subscription/push_handler,
waku/waku_relay,
- ./events/
- [json_message_event, json_topic_health_change_event, json_connection_change_event],
+ ./events/json_message_event,
./waku_thread/waku_thread,
./waku_thread/inter_thread_communication/requests/node_lifecycle_request,
./waku_thread/inter_thread_communication/requests/peer_manager_request,
@@ -48,25 +47,6 @@ template checkLibwakuParams*(
if isNil(callback):
return RET_MISSING_CALLBACK
-template callEventCallback(ctx: ptr WakuContext, eventName: string, body: untyped) =
- if isNil(ctx[].eventCallback):
- error eventName & " - eventCallback is nil"
- return
-
- foreignThreadGc:
- try:
- let event = body
- cast[WakuCallBack](ctx[].eventCallback)(
- RET_OK, unsafeAddr event[0], cast[csize_t](len(event)), ctx[].eventUserData
- )
- except Exception, CatchableError:
- let msg =
- "Exception " & eventName & " when calling 'eventCallBack': " &
- getCurrentExceptionMsg()
- cast[WakuCallBack](ctx[].eventCallback)(
- RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), ctx[].eventUserData
- )
-
proc handleRequest(
ctx: ptr WakuContext,
requestType: RequestType,
@@ -81,21 +61,6 @@ proc handleRequest(
return RET_OK
-proc onConnectionChange(ctx: ptr WakuContext): ConnectionChangeHandler =
- return proc(peerId: PeerId, peerEvent: PeerEventKind) {.async.} =
- callEventCallback(ctx, "onConnectionChange"):
- $JsonConnectionChangeEvent.new($peerId, peerEvent)
-
-proc onReceivedMessage(ctx: ptr WakuContext): WakuRelayHandler =
- return proc(pubsubTopic: PubsubTopic, msg: WakuMessage) {.async.} =
- callEventCallback(ctx, "onReceivedMessage"):
- $JsonMessageEvent.new(pubsubTopic, msg)
-
-proc onTopicHealthChange(ctx: ptr WakuContext): TopicHealthChangeHandler =
- return proc(pubsubTopic: PubsubTopic, topicHealth: TopicHealth) {.async.} =
- callEventCallback(ctx, "onTopicHealthChange"):
- $JsonTopicHealthChangeEvent.new(pubsubTopic, topicHealth)
-
### End of not-exported components
################################################################################
@@ -146,8 +111,8 @@ proc waku_new(
return nil
## Create the Waku thread that will keep waiting for req from the main thread.
- var ctx = waku_thread.createWakuThread().valueOr:
- let msg = "Error in createWakuThread: " & $error
+ var ctx = waku_thread.createWakuContext().valueOr:
+ let msg = "Error in createWakuContext: " & $error
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return nil
@@ -180,7 +145,7 @@ proc waku_destroy(
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
- waku_thread.destroyWakuThread(ctx).isOkOr:
+ waku_thread.destroyWakuContext(ctx).isOkOr:
let msg = "libwaku error: " & $error
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return RET_ERR
diff --git a/library/waku_thread/inter_thread_communication/requests/debug_node_request.nim b/library/waku_thread/inter_thread_communication/requests/debug_node_request.nim
index 4ab8914ee..0bd9235b6 100644
--- a/library/waku_thread/inter_thread_communication/requests/debug_node_request.nim
+++ b/library/waku_thread/inter_thread_communication/requests/debug_node_request.nim
@@ -18,6 +18,7 @@ type DebugNodeMsgType* = enum
RETRIEVE_MY_PEER_ID
RETRIEVE_METRICS
RETRIEVE_ONLINE_STATE
+ CHECK_WAKU_NOT_BLOCKED
type DebugNodeRequest* = object
operation: DebugNodeMsgType
@@ -55,6 +56,8 @@ proc process*(
return ok(getMetrics())
of RETRIEVE_ONLINE_STATE:
return ok($waku.healthMonitor.onlineMonitor.amIOnline())
+ of CHECK_WAKU_NOT_BLOCKED:
+ return ok("waku thread is not blocked")
error "unsupported operation in DebugNodeRequest"
return err("unsupported operation in DebugNodeRequest")
diff --git a/library/waku_thread/waku_thread.nim b/library/waku_thread/waku_thread.nim
index 640389e32..37c37e6df 100644
--- a/library/waku_thread/waku_thread.nim
+++ b/library/waku_thread/waku_thread.nim
@@ -4,10 +4,22 @@
import std/[options, atomics, os, net, locks]
import chronicles, chronos, chronos/threadsync, taskpools/channels_spsc_single, results
-import waku/factory/waku, ./inter_thread_communication/waku_thread_request, ../ffi_types
+import
+ waku/factory/waku,
+ waku/node/peer_manager,
+ waku/waku_relay/[protocol, topic_health],
+ waku/waku_core/[topics/pubsub_topic, message],
+ ./inter_thread_communication/[waku_thread_request, requests/debug_node_request],
+ ../ffi_types,
+ ../events/[
+ json_message_event, json_topic_health_change_event, json_connection_change_event,
+ json_waku_not_responding_event,
+ ]
type WakuContext* = object
- thread: Thread[(ptr WakuContext)]
+ wakuThread: Thread[(ptr WakuContext)]
+ watchdogThread: Thread[(ptr WakuContext)]
+ # monitors the Waku thread and notifies the Waku SDK consumer if it hangs
lock: Lock
reqChannel: ChannelSPSCSingle[ptr WakuThreadRequest]
reqSignal: ThreadSignalPtr
@@ -17,78 +29,48 @@ type WakuContext* = object
userData*: pointer
eventCallback*: pointer
eventUserdata*: pointer
- running: Atomic[bool] # To control when the thread is running
+ running: Atomic[bool] # To control when the threads are running
const git_version* {.strdefine.} = "n/a"
const versionString = "version / git commit hash: " & waku.git_version
-proc runWaku(ctx: ptr WakuContext) {.async.} =
- ## This is the worker body. This runs the Waku node
- ## and attends library user requests (stop, connect_to, etc.)
+template callEventCallback(ctx: ptr WakuContext, eventName: string, body: untyped) =
+ if isNil(ctx[].eventCallback):
+ error eventName & " - eventCallback is nil"
+ return
- var waku: Waku
+ foreignThreadGc:
+ try:
+ let event = body
+ cast[WakuCallBack](ctx[].eventCallback)(
+ RET_OK, unsafeAddr event[0], cast[csize_t](len(event)), ctx[].eventUserData
+ )
+ except Exception, CatchableError:
+ let msg =
+ "Exception " & eventName & " when calling 'eventCallBack': " &
+ getCurrentExceptionMsg()
+ cast[WakuCallBack](ctx[].eventCallback)(
+ RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), ctx[].eventUserData
+ )
- while true:
- await ctx.reqSignal.wait()
+proc onConnectionChange*(ctx: ptr WakuContext): ConnectionChangeHandler =
+ return proc(peerId: PeerId, peerEvent: PeerEventKind) {.async.} =
+ callEventCallback(ctx, "onConnectionChange"):
+ $JsonConnectionChangeEvent.new($peerId, peerEvent)
- if ctx.running.load == false:
- break
+proc onReceivedMessage*(ctx: ptr WakuContext): WakuRelayHandler =
+ return proc(pubsubTopic: PubsubTopic, msg: WakuMessage) {.async.} =
+ callEventCallback(ctx, "onReceivedMessage"):
+ $JsonMessageEvent.new(pubsubTopic, msg)
- ## Trying to get a request from the libwaku requestor thread
- var request: ptr WakuThreadRequest
- let recvOk = ctx.reqChannel.tryRecv(request)
- if not recvOk:
- error "waku thread could not receive a request"
- continue
+proc onTopicHealthChange*(ctx: ptr WakuContext): TopicHealthChangeHandler =
+ return proc(pubsubTopic: PubsubTopic, topicHealth: TopicHealth) {.async.} =
+ callEventCallback(ctx, "onTopicHealthChange"):
+ $JsonTopicHealthChangeEvent.new(pubsubTopic, topicHealth)
- let fireRes = ctx.reqReceivedSignal.fireSync()
- if fireRes.isErr():
- error "could not fireSync back to requester thread", error = fireRes.error
-
- ## Handle the request
- asyncSpawn WakuThreadRequest.process(request, addr waku)
-
-proc run(ctx: ptr WakuContext) {.thread.} =
- ## Launch waku worker
- waitFor runWaku(ctx)
-
-proc createWakuThread*(): Result[ptr WakuContext, string] =
- ## This proc is called from the main thread and it creates
- ## the Waku working thread.
- var ctx = createShared(WakuContext, 1)
- ctx.reqSignal = ThreadSignalPtr.new().valueOr:
- return err("couldn't create reqSignal ThreadSignalPtr")
- ctx.reqReceivedSignal = ThreadSignalPtr.new().valueOr:
- return err("couldn't create reqReceivedSignal ThreadSignalPtr")
- ctx.lock.initLock()
-
- ctx.running.store(true)
-
- try:
- createThread(ctx.thread, run, ctx)
- except ValueError, ResourceExhaustedError:
- # and freeShared for typed allocations!
- freeShared(ctx)
-
- return err("failed to create the Waku thread: " & getCurrentExceptionMsg())
-
- return ok(ctx)
-
-proc destroyWakuThread*(ctx: ptr WakuContext): Result[void, string] =
- ctx.running.store(false)
-
- let signaledOnTime = ctx.reqSignal.fireSync().valueOr:
- return err("error in destroyWakuThread: " & $error)
- if not signaledOnTime:
- return err("failed to signal reqSignal on time in destroyWakuThread")
-
- joinThread(ctx.thread)
- ctx.lock.deinitLock()
- ?ctx.reqSignal.close()
- ?ctx.reqReceivedSignal.close()
- freeShared(ctx)
-
- return ok()
+proc onWakuNotResponding*(ctx: ptr WakuContext) =
+ callEventCallback(ctx, "onWakuNotResponsive"):
+ $JsonWakuNotRespondingEvent.new()
proc sendRequestToWakuThread*(
ctx: ptr WakuContext,
@@ -96,16 +78,17 @@ proc sendRequestToWakuThread*(
reqContent: pointer,
callback: WakuCallBack,
userData: pointer,
+ timeout = InfiniteDuration,
): Result[void, string] =
- let req = WakuThreadRequest.createShared(reqType, reqContent, callback, userData)
-
+ ctx.lock.acquire()
# This lock is only necessary while we use a SP Channel and while the signalling
# between threads assumes that there aren't concurrent requests.
# Rearchitecting the signaling + migrating to a MP Channel will allow us to receive
# requests concurrently and spare us the need of locks
- ctx.lock.acquire()
defer:
ctx.lock.release()
+
+ let req = WakuThreadRequest.createShared(reqType, reqContent, callback, userData)
## Sending the request
let sentOk = ctx.reqChannel.trySend(req)
if not sentOk:
@@ -122,11 +105,115 @@ proc sendRequestToWakuThread*(
return err("Couldn't fireSync in time")
## wait until the Waku Thread properly received the request
- let res = ctx.reqReceivedSignal.waitSync()
+ let res = ctx.reqReceivedSignal.waitSync(timeout)
if res.isErr():
deallocShared(req)
return err("Couldn't receive reqReceivedSignal signal")
## Notice that in case of "ok", the deallocShared(req) is performed by the Waku Thread in the
- ## process proc.
+ ## process proc. See the 'waku_thread_request.nim' module for more details.
ok()
+
+proc watchdogThreadBody(ctx: ptr WakuContext) {.thread.} =
+ ## Watchdog thread that monitors the Waku thread and notifies the library user if it hangs.
+
+ let watchdogRun = proc(ctx: ptr WakuContext) {.async.} =
+ const WatchdogTimeinterval = 1.seconds
+ const WakuNotRespondingTimeout = 3.seconds
+ while true:
+ await sleepAsync(WatchdogTimeinterval)
+
+ if ctx.running.load == false:
+ debug "Watchdog thread exiting because WakuContext is not running"
+ break
+
+ let wakuCallback = proc(
+ callerRet: cint, msg: ptr cchar, len: csize_t, userData: pointer
+ ) {.cdecl, gcsafe, raises: [].} =
+ discard ## Don't do anything. Just respecting the callback signature.
+ const nilUserData = nil
+
+ trace "Sending watchdog request to Waku thread"
+
+ sendRequestToWakuThread(
+ ctx,
+ RequestType.DEBUG,
+ DebugNodeRequest.createShared(DebugNodeMsgType.CHECK_WAKU_NOT_BLOCKED),
+ wakuCallback,
+ nilUserData,
+ WakuNotRespondingTimeout,
+ ).isOkOr:
+ error "Failed to send watchdog request to Waku thread", error = $error
+ onWakuNotResponding(ctx)
+
+ waitFor watchdogRun(ctx)
+
+proc wakuThreadBody(ctx: ptr WakuContext) {.thread.} =
+ ## Waku thread that attends library user requests (stop, connect_to, etc.)
+
+ let wakuRun = proc(ctx: ptr WakuContext) {.async.} =
+ var waku: Waku
+ while true:
+ await ctx.reqSignal.wait()
+
+ if ctx.running.load == false:
+ break
+
+ ## Trying to get a request from the libwaku requestor thread
+ var request: ptr WakuThreadRequest
+ let recvOk = ctx.reqChannel.tryRecv(request)
+ if not recvOk:
+ error "waku thread could not receive a request"
+ continue
+
+ let fireRes = ctx.reqReceivedSignal.fireSync()
+ if fireRes.isErr():
+ error "could not fireSync back to requester thread", error = fireRes.error
+
+ ## Handle the request
+ asyncSpawn WakuThreadRequest.process(request, addr waku)
+
+ waitFor wakuRun(ctx)
+
+proc createWakuContext*(): Result[ptr WakuContext, string] =
+ ## This proc is called from the main thread and it creates
+ ## the Waku working thread.
+ var ctx = createShared(WakuContext, 1)
+ ctx.reqSignal = ThreadSignalPtr.new().valueOr:
+ return err("couldn't create reqSignal ThreadSignalPtr")
+ ctx.reqReceivedSignal = ThreadSignalPtr.new().valueOr:
+ return err("couldn't create reqReceivedSignal ThreadSignalPtr")
+ ctx.lock.initLock()
+
+ ctx.running.store(true)
+
+ try:
+ createThread(ctx.wakuThread, wakuThreadBody, ctx)
+ except ValueError, ResourceExhaustedError:
+ freeShared(ctx)
+ return err("failed to create the Waku thread: " & getCurrentExceptionMsg())
+
+ try:
+ createThread(ctx.watchdogThread, watchdogThreadBody, ctx)
+ except ValueError, ResourceExhaustedError:
+ freeShared(ctx)
+ return err("failed to create the watchdog thread: " & getCurrentExceptionMsg())
+
+ return ok(ctx)
+
+proc destroyWakuContext*(ctx: ptr WakuContext): Result[void, string] =
+ ctx.running.store(false)
+
+ let signaledOnTime = ctx.reqSignal.fireSync().valueOr:
+ return err("error in destroyWakuContext: " & $error)
+ if not signaledOnTime:
+ return err("failed to signal reqSignal on time in destroyWakuContext")
+
+ joinThread(ctx.wakuThread)
+ joinThread(ctx.watchdogThread)
+ ctx.lock.deinitLock()
+ ?ctx.reqSignal.close()
+ ?ctx.reqReceivedSignal.close()
+ freeShared(ctx)
+
+ return ok()
From 15025fe6cc57af15ea296be322f1231d31fcbf93 Mon Sep 17 00:00:00 2001
From: fryorcraken <110212804+fryorcraken@users.noreply.github.com>
Date: Wed, 25 Jun 2025 13:58:49 +1000
Subject: [PATCH 32/47] test: include all factory tests (#3467)
* test: include all factory tests
* test: don't expect to override a preset
---
tests/all_tests_waku.nim | 2 +-
tests/factory/test_waku_conf.nim | 19 ++++++++++---------
.../conf_builder/discv5_conf_builder.nim | 3 +++
3 files changed, 14 insertions(+), 10 deletions(-)
diff --git a/tests/all_tests_waku.nim b/tests/all_tests_waku.nim
index 07e0cd895..2723fac8f 100644
--- a/tests/all_tests_waku.nim
+++ b/tests/all_tests_waku.nim
@@ -108,4 +108,4 @@ import
import ./waku_rln_relay/test_all
# Node Factory
-import ./factory/test_external_config
+import ./factory/[test_external_config, test_node_factory, test_waku_conf]
diff --git a/tests/factory/test_waku_conf.nim b/tests/factory/test_waku_conf.nim
index 6b7040dd5..c18a2c73c 100644
--- a/tests/factory/test_waku_conf.nim
+++ b/tests/factory/test_waku_conf.nim
@@ -9,7 +9,7 @@ import
testutils/unittests
import
waku/factory/waku_conf,
- waku/factory/waku_conf_builder,
+ waku/factory/conf_builder/conf_builder,
waku/factory/networks_config,
waku/common/utils/parse_size_units
@@ -24,7 +24,7 @@ suite "Waku Conf - build with cluster conf":
let expectedShards = toSeq[0.uint16 .. 7.uint16]
## Given
- builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
+ builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"])
builder.withClusterConf(clusterConf)
builder.withRelay(true)
builder.rlnRelayConf.withTreePath("/tmp/test-tree-path")
@@ -65,7 +65,7 @@ suite "Waku Conf - build with cluster conf":
let expectedShards = toSeq[0.uint16 .. 7.uint16]
## Given
- builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
+ builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"])
builder.withClusterConf(clusterConf)
builder.withRelay(false)
@@ -95,7 +95,7 @@ suite "Waku Conf - build with cluster conf":
expectedShards = toSeq[0.uint16 .. 7.uint16]
## Given
- builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
+ builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"])
builder.withClusterConf(clusterConf)
builder.rlnRelayConf.withEnabled(false)
@@ -122,7 +122,7 @@ suite "Waku Conf - build with cluster conf":
let shards = @[2.uint16, 3.uint16]
## Given
- builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
+ builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"])
builder.withClusterConf(clusterConf)
builder.withShards(shards)
@@ -148,7 +148,7 @@ suite "Waku Conf - build with cluster conf":
let shards = @[2.uint16, 10.uint16]
## Given
- builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
+ builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"])
builder.withClusterConf(clusterConf)
builder.withShards(shards)
@@ -158,11 +158,11 @@ suite "Waku Conf - build with cluster conf":
## Then
assert resConf.isErr(), "Invalid shard was accepted"
- test "Cluster Conf is passed and RLN contract is overridden":
+ test "Cluster Conf is passed and RLN contract is **not** overridden":
## Setup
let clusterConf = ClusterConf.TheWakuNetworkConf()
var builder = WakuConfBuilder.init()
- builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
+ builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"])
# Mount all shards in network
let expectedShards = toSeq[0.uint16 .. 7.uint16]
@@ -194,7 +194,8 @@ suite "Waku Conf - build with cluster conf":
assert conf.rlnRelayConf.isSome
let rlnRelayConf = conf.rlnRelayConf.get()
- check rlnRelayConf.ethContractAddress.string == contractAddress
+ check rlnRelayConf.ethContractAddress.string ==
+ clusterConf.rlnRelayEthContractAddress
check rlnRelayConf.dynamic == clusterConf.rlnRelayDynamic
check rlnRelayConf.chainId == clusterConf.rlnRelayChainId
check rlnRelayConf.epochSizeSec == clusterConf.rlnEpochSizeSec
diff --git a/waku/factory/conf_builder/discv5_conf_builder.nim b/waku/factory/conf_builder/discv5_conf_builder.nim
index 30755669b..e2729021e 100644
--- a/waku/factory/conf_builder/discv5_conf_builder.nim
+++ b/waku/factory/conf_builder/discv5_conf_builder.nim
@@ -38,6 +38,9 @@ proc withTableIpLimit*(b: var Discv5ConfBuilder, tableIpLimit: uint) =
proc withUdpPort*(b: var Discv5ConfBuilder, udpPort: Port) =
b.udpPort = some(udpPort)
+proc withUdpPort*(b: var Discv5ConfBuilder, udpPort: uint) =
+ b.udpPort = some(Port(udpPort.uint16))
+
proc withBootstrapNodes*(b: var Discv5ConfBuilder, bootstrapNodes: seq[string]) =
# TODO: validate ENRs?
b.bootstrapNodes = concat(b.bootstrapNodes, bootstrapNodes)
From 5c38a53f7c39ee75a7f93b82246d539de72d260c Mon Sep 17 00:00:00 2001
From: Darshan K <35736874+darshankabariya@users.noreply.github.com>
Date: Thu, 26 Jun 2025 01:03:40 +0530
Subject: [PATCH 33/47] feat: libwaku dll for status go (#3460)
---
.github/workflows/windows-build.yml | 12 ++++-
Makefile | 12 ++---
README.md | 2 +-
...d_wakunode_windows.sh => build_windows.sh} | 11 +++--
waku.nimble | 44 ++++++++-----------
5 files changed, 45 insertions(+), 36 deletions(-)
rename scripts/{build_wakunode_windows.sh => build_windows.sh} (88%)
diff --git a/.github/workflows/windows-build.yml b/.github/workflows/windows-build.yml
index 0582d5fd1..52cd7f91a 100644
--- a/.github/workflows/windows-build.yml
+++ b/.github/workflows/windows-build.yml
@@ -81,9 +81,13 @@ jobs:
make CC="gcc -fPIC -D_WIN32_WINNT=0x0600 -DNATPMP_STATICLIB" libnatpmp.a V=1
cd ../../../../
- - name: Building wakunode2
+ - name: Building wakunode2.exe
run: |
make wakunode2 LOG_LEVEL=DEBUG V=3 -j8
+
+ - name: Building libwaku.dll
+ run: |
+ make libwaku STATIC=0 LOG_LEVEL=DEBUG V=1 -j
- name: Check Executable
run: |
@@ -93,3 +97,9 @@ jobs:
echo "Build failed: wakunode2.exe not found"
exit 1
fi
+ if [ -f "./build/libwaku.dll" ]; then
+ echo "libwaku.dll build successful"
+ else
+ echo "Build failed: libwaku.dll not found"
+ exit 1
+ fi
diff --git a/Makefile b/Makefile
index d11f50bec..555a20472 100644
--- a/Makefile
+++ b/Makefile
@@ -398,14 +398,16 @@ docker-liteprotocoltester-push:
STATIC ?= 0
+
libwaku: | build deps librln
- rm -f build/libwaku*
+ rm -f build/libwaku*
+
ifeq ($(STATIC), 1)
- echo -e $(BUILD_MSG) "build/$@.a" && \
- $(ENV_SCRIPT) nim libwakuStatic $(NIM_PARAMS) waku.nims
+ echo -e $(BUILD_MSG) "build/$@.a" && $(ENV_SCRIPT) nim libwakuStatic $(NIM_PARAMS) waku.nims
+else ifeq ($(detected_OS),Windows)
+ echo -e $(BUILD_MSG) "build/$@.dll" && $(ENV_SCRIPT) nim libwakuDynamic $(NIM_PARAMS) waku.nims
else
- echo -e $(BUILD_MSG) "build/$@.so" && \
- $(ENV_SCRIPT) nim libwakuDynamic $(NIM_PARAMS) waku.nims
+ echo -e $(BUILD_MSG) "build/$@.so" && $(ENV_SCRIPT) nim libwakuDynamic $(NIM_PARAMS) waku.nims
endif
#####################
diff --git a/README.md b/README.md
index 057d0b622..119c00052 100644
--- a/README.md
+++ b/README.md
@@ -87,7 +87,7 @@ pacman -S --noconfirm --needed mingw-w64-x86_64-python
#### 3. Build Wakunode
- Open Git Bash as administrator
- clone nwaku and cd nwaku
-- Execute: `./scripts/build_wakunode_windows.sh`
+- Execute: `./scripts/build_windows.sh`
#### 4. Troubleshooting
If `wakunode2.exe` isn't generated:
diff --git a/scripts/build_wakunode_windows.sh b/scripts/build_windows.sh
similarity index 88%
rename from scripts/build_wakunode_windows.sh
rename to scripts/build_windows.sh
index ef0881836..e56fb8871 100755
--- a/scripts/build_wakunode_windows.sh
+++ b/scripts/build_windows.sh
@@ -36,25 +36,28 @@ cd ../../../..
echo "6. -.-.-.- Building libunwind -.-.-.-"
cd vendor/nim-libbacktrace
-execute_command "make all V=1"
-execute_command "make install/usr/lib/libunwind.a V=1"
+execute_command "make all V=1 -j8"
+execute_command "make install/usr/lib/libunwind.a V=1 -j8"
cp ./vendor/libunwind/build/lib/libunwind.a install/usr/lib
cd ../../
echo "7. -.-.-.- Building miniupnpc -.-.-.- "
cd vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc
execute_command "git checkout little_chore_windows_support"
-execute_command "make -f Makefile.mingw CC=gcc CXX=g++ libminiupnpc.a V=1"
+execute_command "make -f Makefile.mingw CC=gcc CXX=g++ libminiupnpc.a V=1 -j8"
cd ../../../../..
echo "8. -.-.-.- Building libnatpmp -.-.-.- "
cd ./vendor/nim-nat-traversal/vendor/libnatpmp-upstream
-make CC="gcc -fPIC -D_WIN32_WINNT=0x0600 -DNATPMP_STATICLIB" libnatpmp.a V=1
+make CC="gcc -fPIC -D_WIN32_WINNT=0x0600 -DNATPMP_STATICLIB" libnatpmp.a V=1 -j8
cd ../../../../
echo "9. -.-.-.- Building wakunode2 -.-.-.- "
execute_command "make wakunode2 LOG_LEVEL=DEBUG V=1 -j8"
+echo "10. -.-.-.- Building libwaku -.-.-.- "
+execute_command "make libwaku STATIC=0 LOG_LEVEL=DEBUG V=1 -j8"
+
echo "Windows setup completed successfully!"
echo "✓ Successful commands: $success_count"
echo "✗ Failed commands: $failure_count"
diff --git a/waku.nimble b/waku.nimble
index 5be212264..2f9a73595 100644
--- a/waku.nimble
+++ b/waku.nimble
@@ -1,3 +1,4 @@
+import os
mode = ScriptMode.Verbose
### Package
@@ -69,9 +70,15 @@ proc buildLibrary(name: string, srcDir = "./", params = "", `type` = "static") =
".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & name & ".nim"
else:
- exec "nim c" & " --out:build/" & name &
- ".so --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
- extra_params & " " & srcDir & name & ".nim"
+ var lib_name = toDll("libwaku")
+ when defined(windows):
+ exec "nim c" & " --out:build/" & lib_name &
+ " --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:off -d:discv5_protocol_id=d5waku " &
+ extra_params & " " & srcDir & name & ".nim"
+ else:
+ exec "nim c" & " --out:build/" & lib_name &
+ " --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
+ extra_params & " " & srcDir & name & ".nim"
proc buildMobileAndroid(srcDir = ".", params = "") =
let cpu = getEnv("CPU")
@@ -160,33 +167,20 @@ task testone, "Test custom target":
exec "build/" & filepath & ".bin"
### C Bindings
+let chroniclesParams =
+ "-d:chronicles_line_numbers " & "-d:chronicles_runtime_filtering=on " &
+ """-d:chronicles_sinks="textlines,json" """ &
+ "-d:chronicles_default_output_device=Dynamic " &
+ """-d:chronicles_disabled_topics="eth,dnsdisc.client" """ & "--warning:Deprecated:off " &
+ "--warning:UnusedImport:on " & "-d:chronicles_log_level=TRACE"
+
task libwakuStatic, "Build the cbindings waku node library":
let name = "libwaku"
- buildLibrary name,
- "library/",
- """-d:chronicles_line_numbers \
- -d:chronicles_runtime_filtering=on \
- -d:chronicles_sinks="textlines,json" \
- -d:chronicles_default_output_device=Dynamic \
- -d:chronicles_disabled_topics="eth,dnsdisc.client" \
- --warning:Deprecated:off \
- --warning:UnusedImport:on \
- -d:chronicles_log_level=TRACE """,
- "static"
+ buildLibrary name, "library/", chroniclesParams, "static"
task libwakuDynamic, "Build the cbindings waku node library":
let name = "libwaku"
- buildLibrary name,
- "library/",
- """-d:chronicles_line_numbers \
- -d:chronicles_runtime_filtering=on \
- -d:chronicles_sinks="textlines,json" \
- -d:chronicles_default_output_device=Dynamic \
- -d:chronicles_disabled_topics="eth,dnsdisc.client" \
- --warning:Deprecated:off \
- --warning:UnusedImport:on \
- -d:chronicles_log_level=TRACE """,
- "dynamic"
+ buildLibrary name, "library/", chroniclesParams, "dynamic"
### Mobile Android
task libWakuAndroid, "Build the mobile bindings for Android":
From 26c2b96cfe8f035c06c51df734deedc8b041dad7 Mon Sep 17 00:00:00 2001
From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com>
Date: Thu, 26 Jun 2025 11:27:39 +0200
Subject: [PATCH 34/47] chore: rename modules (#3469)
---
library/libwaku.nim | 28 +++++++++----------
.../waku_thread.nim => waku_context.nim} | 6 ++--
.../requests/debug_node_request.nim | 6 ++--
.../requests/discovery_request.nim | 12 ++++----
.../requests/node_lifecycle_request.nim | 16 +++++------
.../requests/peer_manager_request.nim | 8 +++---
.../requests/ping_request.nim | 2 +-
.../requests/protocols/filter_request.nim | 20 ++++++-------
.../requests/protocols/lightpush_request.nim | 20 ++++++-------
.../requests/protocols/relay_request.nim | 20 ++++++-------
.../requests/protocols/store_request.nim | 18 ++++++------
.../waku_thread_request.nim | 4 +--
12 files changed, 80 insertions(+), 80 deletions(-)
rename library/{waku_thread/waku_thread.nim => waku_context.nim} (98%)
rename library/{waku_thread/inter_thread_communication => waku_thread_requests}/requests/debug_node_request.nim (93%)
rename library/{waku_thread/inter_thread_communication => waku_thread_requests}/requests/discovery_request.nim (95%)
rename library/{waku_thread/inter_thread_communication => waku_thread_requests}/requests/node_lifecycle_request.nim (91%)
rename library/{waku_thread/inter_thread_communication => waku_thread_requests}/requests/peer_manager_request.nim (97%)
rename library/{waku_thread/inter_thread_communication => waku_thread_requests}/requests/ping_request.nim (94%)
rename library/{waku_thread/inter_thread_communication => waku_thread_requests}/requests/protocols/filter_request.nim (88%)
rename library/{waku_thread/inter_thread_communication => waku_thread_requests}/requests/protocols/lightpush_request.nim (85%)
rename library/{waku_thread/inter_thread_communication => waku_thread_requests}/requests/protocols/relay_request.nim (91%)
rename library/{waku_thread/inter_thread_communication => waku_thread_requests}/requests/protocols/store_request.nim (92%)
rename library/{waku_thread/inter_thread_communication => waku_thread_requests}/waku_thread_request.nim (98%)
diff --git a/library/libwaku.nim b/library/libwaku.nim
index bc1614af8..ad3afa134 100644
--- a/library/libwaku.nim
+++ b/library/libwaku.nim
@@ -16,17 +16,17 @@ import
waku/waku_core/subscription/push_handler,
waku/waku_relay,
./events/json_message_event,
- ./waku_thread/waku_thread,
- ./waku_thread/inter_thread_communication/requests/node_lifecycle_request,
- ./waku_thread/inter_thread_communication/requests/peer_manager_request,
- ./waku_thread/inter_thread_communication/requests/protocols/relay_request,
- ./waku_thread/inter_thread_communication/requests/protocols/store_request,
- ./waku_thread/inter_thread_communication/requests/protocols/lightpush_request,
- ./waku_thread/inter_thread_communication/requests/protocols/filter_request,
- ./waku_thread/inter_thread_communication/requests/debug_node_request,
- ./waku_thread/inter_thread_communication/requests/discovery_request,
- ./waku_thread/inter_thread_communication/requests/ping_request,
- ./waku_thread/inter_thread_communication/waku_thread_request,
+ ./waku_context,
+ ./waku_thread_requests/requests/node_lifecycle_request,
+ ./waku_thread_requests/requests/peer_manager_request,
+ ./waku_thread_requests/requests/protocols/relay_request,
+ ./waku_thread_requests/requests/protocols/store_request,
+ ./waku_thread_requests/requests/protocols/lightpush_request,
+ ./waku_thread_requests/requests/protocols/filter_request,
+ ./waku_thread_requests/requests/debug_node_request,
+ ./waku_thread_requests/requests/discovery_request,
+ ./waku_thread_requests/requests/ping_request,
+ ./waku_thread_requests/waku_thread_request,
./alloc,
./ffi_types,
../waku/factory/app_callbacks
@@ -54,7 +54,7 @@ proc handleRequest(
callback: WakuCallBack,
userData: pointer,
): cint =
- waku_thread.sendRequestToWakuThread(ctx, requestType, content, callback, userData).isOkOr:
+ waku_context.sendRequestToWakuThread(ctx, requestType, content, callback, userData).isOkOr:
let msg = "libwaku error: " & $error
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return RET_ERR
@@ -111,7 +111,7 @@ proc waku_new(
return nil
## Create the Waku thread that will keep waiting for req from the main thread.
- var ctx = waku_thread.createWakuContext().valueOr:
+ var ctx = waku_context.createWakuContext().valueOr:
let msg = "Error in createWakuContext: " & $error
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return nil
@@ -145,7 +145,7 @@ proc waku_destroy(
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
- waku_thread.destroyWakuContext(ctx).isOkOr:
+ waku_context.destroyWakuContext(ctx).isOkOr:
let msg = "libwaku error: " & $error
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return RET_ERR
diff --git a/library/waku_thread/waku_thread.nim b/library/waku_context.nim
similarity index 98%
rename from library/waku_thread/waku_thread.nim
rename to library/waku_context.nim
index 37c37e6df..2dd9e9c95 100644
--- a/library/waku_thread/waku_thread.nim
+++ b/library/waku_context.nim
@@ -9,9 +9,9 @@ import
waku/node/peer_manager,
waku/waku_relay/[protocol, topic_health],
waku/waku_core/[topics/pubsub_topic, message],
- ./inter_thread_communication/[waku_thread_request, requests/debug_node_request],
- ../ffi_types,
- ../events/[
+ ./waku_thread_requests/[waku_thread_request, requests/debug_node_request],
+ ./ffi_types,
+ ./events/[
json_message_event, json_topic_health_change_event, json_connection_change_event,
json_waku_not_responding_event,
]
diff --git a/library/waku_thread/inter_thread_communication/requests/debug_node_request.nim b/library/waku_thread_requests/requests/debug_node_request.nim
similarity index 93%
rename from library/waku_thread/inter_thread_communication/requests/debug_node_request.nim
rename to library/waku_thread_requests/requests/debug_node_request.nim
index 0bd9235b6..c9aa5a743 100644
--- a/library/waku_thread/inter_thread_communication/requests/debug_node_request.nim
+++ b/library/waku_thread_requests/requests/debug_node_request.nim
@@ -8,9 +8,9 @@ import
libp2p/peerid,
metrics
import
- ../../../../waku/factory/waku,
- ../../../../waku/node/waku_node,
- ../../../../waku/node/health_monitor
+ ../../../waku/factory/waku,
+ ../../../waku/node/waku_node,
+ ../../../waku/node/health_monitor
type DebugNodeMsgType* = enum
RETRIEVE_LISTENING_ADDRESSES
diff --git a/library/waku_thread/inter_thread_communication/requests/discovery_request.nim b/library/waku_thread_requests/requests/discovery_request.nim
similarity index 95%
rename from library/waku_thread/inter_thread_communication/requests/discovery_request.nim
rename to library/waku_thread_requests/requests/discovery_request.nim
index 4eb193728..8fec0dd9f 100644
--- a/library/waku_thread/inter_thread_communication/requests/discovery_request.nim
+++ b/library/waku_thread_requests/requests/discovery_request.nim
@@ -1,12 +1,12 @@
import std/json
import chronos, chronicles, results, strutils, libp2p/multiaddress
import
- ../../../../waku/factory/waku,
- ../../../../waku/discovery/waku_dnsdisc,
- ../../../../waku/discovery/waku_discv5,
- ../../../../waku/waku_core/peers,
- ../../../../waku/node/waku_node,
- ../../../alloc
+ ../../../waku/factory/waku,
+ ../../../waku/discovery/waku_dnsdisc,
+ ../../../waku/discovery/waku_discv5,
+ ../../../waku/waku_core/peers,
+ ../../../waku/node/waku_node,
+ ../../alloc
type DiscoveryMsgType* = enum
GET_BOOTSTRAP_NODES
diff --git a/library/waku_thread/inter_thread_communication/requests/node_lifecycle_request.nim b/library/waku_thread_requests/requests/node_lifecycle_request.nim
similarity index 91%
rename from library/waku_thread/inter_thread_communication/requests/node_lifecycle_request.nim
rename to library/waku_thread_requests/requests/node_lifecycle_request.nim
index 0f912aaa3..21765838e 100644
--- a/library/waku_thread/inter_thread_communication/requests/node_lifecycle_request.nim
+++ b/library/waku_thread_requests/requests/node_lifecycle_request.nim
@@ -2,14 +2,14 @@ import std/[options, json, strutils, net]
import chronos, chronicles, results, confutils, confutils/std/net
import
- ../../../../waku/node/peer_manager/peer_manager,
- ../../../../waku/factory/external_config,
- ../../../../waku/factory/waku,
- ../../../../waku/factory/node_factory,
- ../../../../waku/factory/networks_config,
- ../../../../waku/factory/app_callbacks,
- ../../../../waku/waku_api/rest/builder,
- ../../../alloc
+ ../../../waku/node/peer_manager/peer_manager,
+ ../../../waku/factory/external_config,
+ ../../../waku/factory/waku,
+ ../../../waku/factory/node_factory,
+ ../../../waku/factory/networks_config,
+ ../../../waku/factory/app_callbacks,
+ ../../../waku/waku_api/rest/builder,
+ ../../alloc
type NodeLifecycleMsgType* = enum
CREATE_NODE
diff --git a/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim b/library/waku_thread_requests/requests/peer_manager_request.nim
similarity index 97%
rename from library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim
rename to library/waku_thread_requests/requests/peer_manager_request.nim
index deb520366..1acc78595 100644
--- a/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim
+++ b/library/waku_thread_requests/requests/peer_manager_request.nim
@@ -1,10 +1,10 @@
import std/[sequtils, strutils]
import chronicles, chronos, results, options, json
import
- ../../../../waku/factory/waku,
- ../../../../waku/node/waku_node,
- ../../../alloc,
- ../../../../waku/node/peer_manager
+ ../../../waku/factory/waku,
+ ../../../waku/node/waku_node,
+ ../../alloc,
+ ../../../waku/node/peer_manager
type PeerManagementMsgType* {.pure.} = enum
CONNECT_TO
diff --git a/library/waku_thread/inter_thread_communication/requests/ping_request.nim b/library/waku_thread_requests/requests/ping_request.nim
similarity index 94%
rename from library/waku_thread/inter_thread_communication/requests/ping_request.nim
rename to library/waku_thread_requests/requests/ping_request.nim
index 4467f9659..53d33968e 100644
--- a/library/waku_thread/inter_thread_communication/requests/ping_request.nim
+++ b/library/waku_thread_requests/requests/ping_request.nim
@@ -1,7 +1,7 @@
import std/[json, strutils]
import chronos, results
import libp2p/[protocols/ping, switch, multiaddress, multicodec]
-import ../../../../waku/[factory/waku, waku_core/peers, node/waku_node], ../../../alloc
+import ../../../waku/[factory/waku, waku_core/peers, node/waku_node], ../../alloc
type PingRequest* = object
peerAddr: cstring
diff --git a/library/waku_thread/inter_thread_communication/requests/protocols/filter_request.nim b/library/waku_thread_requests/requests/protocols/filter_request.nim
similarity index 88%
rename from library/waku_thread/inter_thread_communication/requests/protocols/filter_request.nim
rename to library/waku_thread_requests/requests/protocols/filter_request.nim
index 452a0c7c3..274ec32ea 100644
--- a/library/waku_thread/inter_thread_communication/requests/protocols/filter_request.nim
+++ b/library/waku_thread_requests/requests/protocols/filter_request.nim
@@ -1,16 +1,16 @@
import options, std/[strutils, sequtils]
import chronicles, chronos, results
import
- ../../../../../waku/waku_filter_v2/client,
- ../../../../../waku/waku_core/message/message,
- ../../../../../waku/factory/waku,
- ../../../../../waku/waku_filter_v2/common,
- ../../../../../waku/waku_core/subscription/push_handler,
- ../../../../../waku/node/peer_manager/peer_manager,
- ../../../../../waku/node/waku_node,
- ../../../../../waku/waku_core/topics/pubsub_topic,
- ../../../../../waku/waku_core/topics/content_topic,
- ../../../../alloc
+ ../../../../waku/waku_filter_v2/client,
+ ../../../../waku/waku_core/message/message,
+ ../../../../waku/factory/waku,
+ ../../../../waku/waku_filter_v2/common,
+ ../../../../waku/waku_core/subscription/push_handler,
+ ../../../../waku/node/peer_manager/peer_manager,
+ ../../../../waku/node/waku_node,
+ ../../../../waku/waku_core/topics/pubsub_topic,
+ ../../../../waku/waku_core/topics/content_topic,
+ ../../../alloc
type FilterMsgType* = enum
SUBSCRIBE
diff --git a/library/waku_thread/inter_thread_communication/requests/protocols/lightpush_request.nim b/library/waku_thread_requests/requests/protocols/lightpush_request.nim
similarity index 85%
rename from library/waku_thread/inter_thread_communication/requests/protocols/lightpush_request.nim
rename to library/waku_thread_requests/requests/protocols/lightpush_request.nim
index f167cd239..bc3d9de2c 100644
--- a/library/waku_thread/inter_thread_communication/requests/protocols/lightpush_request.nim
+++ b/library/waku_thread_requests/requests/protocols/lightpush_request.nim
@@ -1,16 +1,16 @@
import options
import chronicles, chronos, results
import
- ../../../../../waku/waku_core/message/message,
- ../../../../../waku/waku_core/codecs,
- ../../../../../waku/factory/waku,
- ../../../../../waku/waku_core/message,
- ../../../../../waku/waku_core/time, # Timestamp
- ../../../../../waku/waku_core/topics/pubsub_topic,
- ../../../../../waku/waku_lightpush_legacy/client,
- ../../../../../waku/waku_lightpush_legacy/common,
- ../../../../../waku/node/peer_manager/peer_manager,
- ../../../../alloc
+ ../../../../waku/waku_core/message/message,
+ ../../../../waku/waku_core/codecs,
+ ../../../../waku/factory/waku,
+ ../../../../waku/waku_core/message,
+ ../../../../waku/waku_core/time, # Timestamp
+ ../../../../waku/waku_core/topics/pubsub_topic,
+ ../../../../waku/waku_lightpush_legacy/client,
+ ../../../../waku/waku_lightpush_legacy/common,
+ ../../../../waku/node/peer_manager/peer_manager,
+ ../../../alloc
type LightpushMsgType* = enum
PUBLISH
diff --git a/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim b/library/waku_thread_requests/requests/protocols/relay_request.nim
similarity index 91%
rename from library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim
rename to library/waku_thread_requests/requests/protocols/relay_request.nim
index cfff1442c..279a1efb4 100644
--- a/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim
+++ b/library/waku_thread_requests/requests/protocols/relay_request.nim
@@ -1,16 +1,16 @@
import std/[net, sequtils, strutils]
import chronicles, chronos, stew/byteutils, results
import
- ../../../../../waku/waku_core/message/message,
- ../../../../../waku/factory/[external_config, validator_signed, waku],
- ../../../../../waku/waku_node,
- ../../../../../waku/waku_core/message,
- ../../../../../waku/waku_core/time, # Timestamp
- ../../../../../waku/waku_core/topics/pubsub_topic,
- ../../../../../waku/waku_core/topics,
- ../../../../../waku/waku_relay/protocol,
- ../../../../../waku/node/peer_manager,
- ../../../../alloc
+ ../../../../waku/waku_core/message/message,
+ ../../../../waku/factory/[external_config, validator_signed, waku],
+ ../../../../waku/waku_node,
+ ../../../../waku/waku_core/message,
+ ../../../../waku/waku_core/time, # Timestamp
+ ../../../../waku/waku_core/topics/pubsub_topic,
+ ../../../../waku/waku_core/topics,
+ ../../../../waku/waku_relay/protocol,
+ ../../../../waku/node/peer_manager,
+ ../../../alloc
type RelayMsgType* = enum
SUBSCRIBE
diff --git a/library/waku_thread/inter_thread_communication/requests/protocols/store_request.nim b/library/waku_thread_requests/requests/protocols/store_request.nim
similarity index 92%
rename from library/waku_thread/inter_thread_communication/requests/protocols/store_request.nim
rename to library/waku_thread_requests/requests/protocols/store_request.nim
index 57786a581..3fe1e2f13 100644
--- a/library/waku_thread/inter_thread_communication/requests/protocols/store_request.nim
+++ b/library/waku_thread_requests/requests/protocols/store_request.nim
@@ -1,15 +1,15 @@
import std/[json, sugar, strutils, options]
import chronos, chronicles, results, stew/byteutils
import
- ../../../../../waku/factory/waku,
- ../../../../alloc,
- ../../../../utils,
- ../../../../../waku/waku_core/peers,
- ../../../../../waku/waku_core/time,
- ../../../../../waku/waku_core/message/digest,
- ../../../../../waku/waku_store/common,
- ../../../../../waku/waku_store/client,
- ../../../../../waku/common/paging
+ ../../../../waku/factory/waku,
+ ../../../alloc,
+ ../../../utils,
+ ../../../../waku/waku_core/peers,
+ ../../../../waku/waku_core/time,
+ ../../../../waku/waku_core/message/digest,
+ ../../../../waku/waku_store/common,
+ ../../../../waku/waku_store/client,
+ ../../../../waku/common/paging
type StoreReqType* = enum
REMOTE_QUERY ## to perform a query to another Store node
diff --git a/library/waku_thread/inter_thread_communication/waku_thread_request.nim b/library/waku_thread_requests/waku_thread_request.nim
similarity index 98%
rename from library/waku_thread/inter_thread_communication/waku_thread_request.nim
rename to library/waku_thread_requests/waku_thread_request.nim
index bcfb84198..50462fba7 100644
--- a/library/waku_thread/inter_thread_communication/waku_thread_request.nim
+++ b/library/waku_thread_requests/waku_thread_request.nim
@@ -5,8 +5,8 @@
import std/json, results
import chronos, chronos/threadsync
import
- ../../../waku/factory/waku,
- ../../ffi_types,
+ ../../waku/factory/waku,
+ ../ffi_types,
./requests/node_lifecycle_request,
./requests/peer_manager_request,
./requests/protocols/relay_request,
From 671a4f0ae25a6fe9b06832aa98882ae8f5727062 Mon Sep 17 00:00:00 2001
From: gabrielmer <101006718+gabrielmer@users.noreply.github.com>
Date: Thu, 26 Jun 2025 15:41:45 +0200
Subject: [PATCH 35/47] fix: libwaku.so compilation (#3474)
---
waku.nimble | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/waku.nimble b/waku.nimble
index 2f9a73595..3790b0333 100644
--- a/waku.nimble
+++ b/waku.nimble
@@ -70,7 +70,7 @@ proc buildLibrary(name: string, srcDir = "./", params = "", `type` = "static") =
".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & name & ".nim"
else:
- var lib_name = toDll("libwaku")
+ let lib_name = (when defined(windows): toDll(name) else: name & ".so")
when defined(windows):
exec "nim c" & " --out:build/" & lib_name &
" --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:off -d:discv5_protocol_id=d5waku " &
From edf416f9e0f8a725e2245846711e858385648227 Mon Sep 17 00:00:00 2001
From: Simon-Pierre Vivier
Date: Thu, 26 Jun 2025 11:40:10 -0400
Subject: [PATCH 36/47] fix: remove waku sync broken dos mechanism (#3472)
---
waku/waku_store_sync/transfer.nim | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/waku/waku_store_sync/transfer.nim b/waku/waku_store_sync/transfer.nim
index f17fe944b..783cbffb6 100644
--- a/waku/waku_store_sync/transfer.nim
+++ b/waku/waku_store_sync/transfer.nim
@@ -143,9 +143,10 @@ proc needsReceiverLoop(self: SyncTransfer) {.async.} =
proc initProtocolHandler(self: SyncTransfer) =
proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
while true:
- if not self.inSessions.contains(conn.peerId):
+ ## removed DOS prototection until we can design something better
+ #[ if not self.inSessions.contains(conn.peerId):
error "unwanted peer, disconnecting", remote = conn.peerId
- break
+ break ]#
let readRes = catch:
await conn.readLp(int64(DefaultMaxWakuMessageSize))
From d820976eaf922c67e3e2f038674a8f0bc59209eb Mon Sep 17 00:00:00 2001
From: gabrielmer <101006718+gabrielmer@users.noreply.github.com>
Date: Fri, 27 Jun 2025 11:16:00 +0200
Subject: [PATCH 37/47] chore: improve keep alive (#3458)
---
apps/chat2/chat2.nim | 3 -
.../requests/peer_manager_request.nim | 9 +-
tests/test_waku_keepalive.nim | 5 +-
.../conf_builder/waku_conf_builder.nim | 3 -
waku/factory/external_config.nim | 7 -
waku/factory/node_factory.nim | 4 -
waku/factory/waku.nim | 3 +-
.../health_monitor/node_health_monitor.nim | 156 +++++++++++++++++-
waku/node/health_monitor/online_monitor.nim | 2 +-
waku/node/peer_manager/peer_manager.nim | 7 +
waku/node/waku_node.nim | 83 +++++++---
waku/waku_relay/protocol.nim | 9 +-
12 files changed, 233 insertions(+), 58 deletions(-)
diff --git a/apps/chat2/chat2.nim b/apps/chat2/chat2.nim
index 1ba599d78..d18d35674 100644
--- a/apps/chat2/chat2.nim
+++ b/apps/chat2/chat2.nim
@@ -590,9 +590,6 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
await chat.readWriteLoop()
- if conf.keepAlive:
- node.startKeepalive()
-
runForever()
proc main(rng: ref HmacDrbgContext) {.async.} =
diff --git a/library/waku_thread_requests/requests/peer_manager_request.nim b/library/waku_thread_requests/requests/peer_manager_request.nim
index 1acc78595..a7e643a21 100644
--- a/library/waku_thread_requests/requests/peer_manager_request.nim
+++ b/library/waku_thread_requests/requests/peer_manager_request.nim
@@ -122,14 +122,7 @@ proc process*(
await waku.node.peerManager.disconnectNode(peerId)
return ok("")
of DISCONNECT_ALL_PEERS:
- let connectedPeers = waku.node.peerManager.switch.peerStore.peers().filterIt(
- it.connectedness == Connected
- )
-
- var futs: seq[Future[void]]
- for peer in connectedPeers:
- futs.add(waku.node.peerManager.disconnectNode(peer))
- await allFutures(futs)
+ await waku.node.peerManager.disconnectAllPeers()
return ok("")
of DIAL_PEER:
let remotePeerInfo = parsePeerInfo($self[].peerMultiAddr).valueOr:
diff --git a/tests/test_waku_keepalive.nim b/tests/test_waku_keepalive.nim
index 3fcf01b8e..f6a9e631b 100644
--- a/tests/test_waku_keepalive.nim
+++ b/tests/test_waku_keepalive.nim
@@ -44,7 +44,10 @@ suite "Waku Keepalive":
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
- node1.startKeepalive(2.seconds)
+ let healthMonitor = NodeHealthMonitor()
+ healthMonitor.setNodeToHealthMonitor(node1)
+ healthMonitor.startKeepalive(2.seconds).isOkOr:
+ assert false, "Failed to start keepalive"
check:
(await completionFut.withTimeout(5.seconds)) == true
diff --git a/waku/factory/conf_builder/waku_conf_builder.nim b/waku/factory/conf_builder/waku_conf_builder.nim
index 46e303e70..5a3abbba4 100644
--- a/waku/factory/conf_builder/waku_conf_builder.nim
+++ b/waku/factory/conf_builder/waku_conf_builder.nim
@@ -255,9 +255,6 @@ proc withRelayShardedPeerManagement*(
) =
b.relayShardedPeerManagement = some(relayShardedPeerManagement)
-proc withKeepAlive*(b: var WakuConfBuilder, keepAlive: bool) =
- b.keepAlive = some(keepAlive)
-
proc withP2pReliability*(b: var WakuConfBuilder, p2pReliability: bool) =
b.p2pReliability = some(p2pReliability)
diff --git a/waku/factory/external_config.nim b/waku/factory/external_config.nim
index 190ce46e7..ecf57afd7 100644
--- a/waku/factory/external_config.nim
+++ b/waku/factory/external_config.nim
@@ -314,12 +314,6 @@ hence would have reachability issues.""",
name: "staticnode"
.}: seq[string]
- keepAlive* {.
- desc: "Enable keep-alive for idle connections: true|false",
- defaultValue: false,
- name: "keep-alive"
- .}: bool
-
# TODO: This is trying to do too much, this should only be used for autosharding, which itself should be configurable
# If numShardsInNetwork is not set, we use the number of shards configured as numShardsInNetwork
numShardsInNetwork* {.
@@ -951,7 +945,6 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] =
b.withRelayPeerExchange(n.relayPeerExchange)
b.withRelayShardedPeerManagement(n.relayShardedPeerManagement)
b.withStaticNodes(n.staticNodes)
- b.withKeepAlive(n.keepAlive)
if n.numShardsInNetwork != 0:
b.withNumShardsInNetwork(n.numShardsInNetwork)
diff --git a/waku/factory/node_factory.nim b/waku/factory/node_factory.nim
index 2c363c6c4..5298fa2b9 100644
--- a/waku/factory/node_factory.nim
+++ b/waku/factory/node_factory.nim
@@ -462,10 +462,6 @@ proc startNode*(
if conf.peerExchange and not conf.discv5Conf.isSome():
node.startPeerExchangeLoop()
- # Start keepalive, if enabled
- if conf.keepAlive:
- node.startKeepalive()
-
# Maintain relay connections
if conf.relay:
node.peerManager.start()
diff --git a/waku/factory/waku.nim b/waku/factory/waku.nim
index 2602120d8..faca627a4 100644
--- a/waku/factory/waku.nim
+++ b/waku/factory/waku.nim
@@ -401,7 +401,8 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} =
waku[].deliveryMonitor.startDeliveryMonitor()
## Health Monitor
- waku[].healthMonitor.startHealthMonitor()
+ waku[].healthMonitor.startHealthMonitor().isOkOr:
+ return err("failed to start health monitor: " & $error)
if conf.restServerConf.isSome():
rest_server_builder.startRestServerProtocolSupport(
diff --git a/waku/node/health_monitor/node_health_monitor.nim b/waku/node/health_monitor/node_health_monitor.nim
index b13925d66..fa31c0529 100644
--- a/waku/node/health_monitor/node_health_monitor.nim
+++ b/waku/node/health_monitor/node_health_monitor.nim
@@ -1,6 +1,10 @@
{.push raises: [].}
-import std/[options, sets, strformat], chronos, chronicles, libp2p/protocols/rendezvous
+import
+ std/[options, sets, strformat, random, sequtils],
+ chronos,
+ chronicles,
+ libp2p/protocols/rendezvous
import
../waku_node,
@@ -13,6 +17,10 @@ import
## This module is aimed to check the state of the "self" Waku Node
+# randomize initializes sdt/random's random number generator
+# if not called, the outcome of randomization procedures will be the same in every run
+randomize()
+
type
HealthReport* = object
nodeHealth*: HealthStatus
@@ -22,6 +30,7 @@ type
nodeHealth: HealthStatus
node: WakuNode
onlineMonitor*: OnlineMonitor
+ keepAliveFut: Future[void]
template checkWakuNodeNotNil(node: WakuNode, p: ProtocolHealth): untyped =
if node.isNil():
@@ -224,6 +233,145 @@ proc getRendezvousHealth(hm: NodeHealthMonitor): ProtocolHealth =
return p.ready()
+proc selectRandomPeersForKeepalive(
+ node: WakuNode, outPeers: seq[PeerId], numRandomPeers: int
+): Future[seq[PeerId]] {.async.} =
+ ## Select peers for random keepalive, prioritizing mesh peers
+
+ if node.wakuRelay.isNil():
+ return selectRandomPeers(outPeers, numRandomPeers)
+
+ let meshPeers = node.wakuRelay.getPeersInMesh().valueOr:
+ error "Failed getting peers in mesh for ping", error = error
+ # Fallback to random selection from all outgoing peers
+ return selectRandomPeers(outPeers, numRandomPeers)
+
+ trace "Mesh peers for keepalive", meshPeers = meshPeers
+
+ # Get non-mesh peers and shuffle them
+ var nonMeshPeers = outPeers.filterIt(it notin meshPeers)
+ shuffle(nonMeshPeers)
+
+ # Combine mesh peers + random non-mesh peers up to numRandomPeers total
+ let numNonMeshPeers = max(0, numRandomPeers - len(meshPeers))
+ let selectedNonMeshPeers = nonMeshPeers[0 ..< min(len(nonMeshPeers), numNonMeshPeers)]
+
+ let selectedPeers = meshPeers & selectedNonMeshPeers
+ trace "Selected peers for keepalive", selected = selectedPeers
+ return selectedPeers
+
+proc keepAliveLoop(
+ node: WakuNode,
+ randomPeersKeepalive: chronos.Duration,
+ allPeersKeepAlive: chronos.Duration,
+ numRandomPeers = 10,
+) {.async.} =
+ # Calculate how many random peer cycles before pinging all peers
+ let randomToAllRatio =
+ int(allPeersKeepAlive.seconds() / randomPeersKeepalive.seconds())
+ var countdownToPingAll = max(0, randomToAllRatio - 1)
+
+ # Sleep detection configuration
+ let sleepDetectionInterval = 3 * randomPeersKeepalive
+
+ # Failure tracking
+ var consecutiveIterationFailures = 0
+ const maxAllowedConsecutiveFailures = 2
+
+ var lastTimeExecuted = Moment.now()
+
+ while true:
+ trace "Running keepalive loop"
+ await sleepAsync(randomPeersKeepalive)
+
+ if not node.started:
+ continue
+
+ let currentTime = Moment.now()
+
+ # Check for sleep detection
+ if currentTime - lastTimeExecuted > sleepDetectionInterval:
+ warn "Keep alive hasn't been executed recently. Killing all connections"
+ await node.peerManager.disconnectAllPeers()
+ lastTimeExecuted = currentTime
+ consecutiveIterationFailures = 0
+ continue
+
+ # Check for consecutive failures
+ if consecutiveIterationFailures > maxAllowedConsecutiveFailures:
+ warn "Too many consecutive ping failures, node likely disconnected. Killing all connections",
+ consecutiveIterationFailures, maxAllowedConsecutiveFailures
+ await node.peerManager.disconnectAllPeers()
+ consecutiveIterationFailures = 0
+ lastTimeExecuted = currentTime
+ continue
+
+ # Determine which peers to ping
+ let outPeers = node.peerManager.connectedPeers()[1]
+ let peersToPing =
+ if countdownToPingAll > 0:
+ await selectRandomPeersForKeepalive(node, outPeers, numRandomPeers)
+ else:
+ outPeers
+
+ let numPeersToPing = len(peersToPing)
+
+ if countdownToPingAll > 0:
+ trace "Pinging random peers",
+ count = numPeersToPing, countdownToPingAll = countdownToPingAll
+ countdownToPingAll.dec()
+ else:
+ trace "Pinging all peers", count = numPeersToPing
+ countdownToPingAll = max(0, randomToAllRatio - 1)
+
+ # Execute keepalive pings
+ let successfulPings = await parallelPings(node, peersToPing)
+
+ if successfulPings != numPeersToPing:
+ waku_node_errors.inc(
+ amount = numPeersToPing - successfulPings, labelValues = ["keep_alive_failure"]
+ )
+
+ trace "Keepalive results",
+ attemptedPings = numPeersToPing, successfulPings = successfulPings
+
+ # Update failure tracking
+ if numPeersToPing > 0 and successfulPings == 0:
+ consecutiveIterationFailures.inc()
+ error "All pings failed", consecutiveFailures = consecutiveIterationFailures
+ else:
+ consecutiveIterationFailures = 0
+
+ lastTimeExecuted = currentTime
+
+# 2 minutes default - 20% of the default chronosstream timeout duration
+proc startKeepalive*(
+ hm: NodeHealthMonitor,
+ randomPeersKeepalive = 10.seconds,
+ allPeersKeepalive = 2.minutes,
+): Result[void, string] =
+ # Validate input parameters
+ if randomPeersKeepalive.isZero() or allPeersKeepAlive.isZero():
+ error "startKeepalive: allPeersKeepAlive and randomPeersKeepalive must be greater than 0",
+ randomPeersKeepalive = $randomPeersKeepalive,
+ allPeersKeepAlive = $allPeersKeepAlive
+ return err(
+ "startKeepalive: allPeersKeepAlive and randomPeersKeepalive must be greater than 0"
+ )
+
+ if allPeersKeepAlive < randomPeersKeepalive:
+ error "startKeepalive: allPeersKeepAlive can't be less than randomPeersKeepalive",
+ allPeersKeepAlive = $allPeersKeepAlive,
+ randomPeersKeepalive = $randomPeersKeepalive
+ return
+ err("startKeepalive: allPeersKeepAlive can't be less than randomPeersKeepalive")
+
+ info "starting keepalive",
+ randomPeersKeepalive = randomPeersKeepalive, allPeersKeepalive = allPeersKeepalive
+
+ hm.keepAliveFut = hm.node.keepAliveLoop(randomPeersKeepalive, allPeersKeepalive)
+ return ok()
+
proc getNodeHealthReport*(hm: NodeHealthMonitor): Future[HealthReport] {.async.} =
var report: HealthReport
report.nodeHealth = hm.nodeHealth
@@ -253,11 +401,15 @@ proc setNodeToHealthMonitor*(hm: NodeHealthMonitor, node: WakuNode) =
proc setOverallHealth*(hm: NodeHealthMonitor, health: HealthStatus) =
hm.nodeHealth = health
-proc startHealthMonitor*(hm: NodeHealthMonitor) =
+proc startHealthMonitor*(hm: NodeHealthMonitor): Result[void, string] =
hm.onlineMonitor.startOnlineMonitor()
+ hm.startKeepalive().isOkOr:
+ return err("startHealthMonitor: failed starting keep alive: " & error)
+ return ok()
proc stopHealthMonitor*(hm: NodeHealthMonitor) {.async.} =
await hm.onlineMonitor.stopOnlineMonitor()
+ await hm.keepAliveFut.cancelAndWait()
proc new*(
T: type NodeHealthMonitor,
diff --git a/waku/node/health_monitor/online_monitor.nim b/waku/node/health_monitor/online_monitor.nim
index f3a3013e2..27bd53bc3 100644
--- a/waku/node/health_monitor/online_monitor.nim
+++ b/waku/node/health_monitor/online_monitor.nim
@@ -53,7 +53,7 @@ proc networkConnectivityLoop(self: OnlineMonitor): Future[void] {.async.} =
## and triggers any change that depends on the network connectivity state
while true:
await self.updateOnlineState()
- await sleepAsync(15.seconds)
+ await sleepAsync(5.seconds)
proc startOnlineMonitor*(self: OnlineMonitor) =
self.networkConnLoopHandle = self.networkConnectivityLoop()
diff --git a/waku/node/peer_manager/peer_manager.nim b/waku/node/peer_manager/peer_manager.nim
index 0a19d5b2c..7deff0593 100644
--- a/waku/node/peer_manager/peer_manager.nim
+++ b/waku/node/peer_manager/peer_manager.nim
@@ -501,6 +501,13 @@ proc connectedPeers*(
return (inPeers, outPeers)
+proc disconnectAllPeers*(pm: PeerManager) {.async.} =
+ let (inPeerIds, outPeerIds) = pm.connectedPeers()
+ let connectedPeers = concat(inPeerIds, outPeerIds)
+
+ let futs = connectedPeers.mapIt(pm.disconnectNode(it))
+ await allFutures(futs)
+
proc getStreamByPeerIdAndProtocol*(
pm: PeerManager, peerId: PeerId, protocol: string
): Future[Result[Connection, string]] {.async.} =
diff --git a/waku/node/waku_node.nim b/waku/node/waku_node.nim
index ac72f3e37..6a5c3fdb0 100644
--- a/waku/node/waku_node.nim
+++ b/waku/node/waku_node.nim
@@ -1,7 +1,7 @@
{.push raises: [].}
import
- std/[hashes, options, sugar, tables, strutils, sequtils, os, net],
+ std/[hashes, options, sugar, tables, strutils, sequtils, os, net, random],
chronos,
chronicles,
metrics,
@@ -69,6 +69,10 @@ declarePublicGauge waku_px_peers,
logScope:
topics = "waku node"
+# randomize initializes sdt/random's random number generator
+# if not called, the outcome of randomization procedures will be the same in every run
+randomize()
+
# TODO: Move to application instance (e.g., `WakuNode2`)
# Git version in git describe format (defined compile time)
const git_version* {.strdefine.} = "n/a"
@@ -1325,35 +1329,60 @@ proc mountLibp2pPing*(node: WakuNode) {.async: (raises: []).} =
except LPError:
error "failed to mount libp2pPing", error = getCurrentExceptionMsg()
-# TODO: Move this logic to PeerManager
-proc keepaliveLoop(node: WakuNode, keepalive: chronos.Duration) {.async.} =
- while true:
- await sleepAsync(keepalive)
- if not node.started:
+proc pingPeer(node: WakuNode, peerId: PeerId): Future[Result[void, string]] {.async.} =
+ ## Ping a single peer and return the result
+
+ try:
+ # Establish a stream
+ let stream = (await node.peerManager.dialPeer(peerId, PingCodec)).valueOr:
+ error "pingPeer: failed dialing peer", peerId = peerId
+ return err("pingPeer failed dialing peer peerId: " & $peerId)
+ defer:
+ # Always close the stream
+ try:
+ await stream.close()
+ except CatchableError as e:
+ debug "Error closing ping connection", peerId = peerId, error = e.msg
+
+ # Perform ping
+ let pingDuration = await node.libp2pPing.ping(stream)
+
+ trace "Ping successful", peerId = peerId, duration = pingDuration
+ return ok()
+ except CatchableError as e:
+ error "pingPeer: exception raised pinging peer", peerId = peerId, error = e.msg
+ return err("pingPeer: exception raised pinging peer: " & e.msg)
+
+proc selectRandomPeers*(peers: seq[PeerId], numRandomPeers: int): seq[PeerId] =
+ var randomPeers = peers
+ shuffle(randomPeers)
+ return randomPeers[0 ..< min(len(randomPeers), numRandomPeers)]
+
+# Returns the number of succesful pings performed
+proc parallelPings*(node: WakuNode, peerIds: seq[PeerId]): Future[int] {.async.} =
+ if len(peerIds) == 0:
+ return 0
+
+ var pingFuts: seq[Future[Result[void, string]]]
+
+ # Create ping futures for each peer
+ for i, peerId in peerIds:
+ let fut = pingPeer(node, peerId)
+ pingFuts.add(fut)
+
+ # Wait for all pings to complete
+ discard await allFutures(pingFuts).withTimeout(5.seconds)
+
+ var successCount = 0
+ for fut in pingFuts:
+ if not fut.completed() or fut.failed():
continue
- # Keep connected peers alive while running
- # Each node is responsible of keeping its outgoing connections alive
- trace "Running keepalive"
+ let res = fut.read()
+ if res.isOk():
+ successCount.inc()
- # First get a list of connected peer infos
- let outPeers = node.peerManager.connectedPeers()[1]
-
- for peerId in outPeers:
- try:
- let conn = (await node.peerManager.dialPeer(peerId, PingCodec)).valueOr:
- warn "Failed dialing peer for keep alive", peerId = peerId
- continue
- let pingDelay = await node.libp2pPing.ping(conn)
- await conn.close()
- except CatchableError as exc:
- waku_node_errors.inc(labelValues = ["keep_alive_failure"])
-
-# 2 minutes default - 20% of the default chronosstream timeout duration
-proc startKeepalive*(node: WakuNode, keepalive = 2.minutes) =
- info "starting keepalive", keepalive = keepalive
-
- asyncSpawn node.keepaliveLoop(keepalive)
+ return successCount
proc mountRendezvous*(node: WakuNode) {.async: (raises: []).} =
info "mounting rendezvous discovery protocol"
diff --git a/waku/waku_relay/protocol.nim b/waku/waku_relay/protocol.nim
index c87519b06..18d60dcef 100644
--- a/waku/waku_relay/protocol.nim
+++ b/waku/waku_relay/protocol.nim
@@ -332,6 +332,13 @@ proc getPubSubPeersInMesh*(
## Returns the list of PubSubPeers in a mesh defined by the passed pubsub topic.
## The 'mesh' atribute is defined in the GossipSub ref object.
+ # If pubsubTopic is empty, we return all peers in mesh for any pubsub topic
+ if pubsubTopic == "":
+ var allPeers = initHashSet[PubSubPeer]()
+ for topic, topicMesh in w.mesh.pairs:
+ allPeers = allPeers.union(topicMesh)
+ return ok(allPeers)
+
if not w.mesh.hasKey(pubsubTopic):
debug "getPubSubPeersInMesh - there is no mesh peer for the given pubsub topic",
pubsubTopic = pubsubTopic
@@ -348,7 +355,7 @@ proc getPubSubPeersInMesh*(
return ok(peers)
proc getPeersInMesh*(
- w: WakuRelay, pubsubTopic: PubsubTopic
+ w: WakuRelay, pubsubTopic: PubsubTopic = ""
): Result[seq[PeerId], string] =
## Returns the list of peerIds in a mesh defined by the passed pubsub topic.
## The 'mesh' atribute is defined in the GossipSub ref object.
From bed5c9ab527fe61c8e39a41d6c3464de64d06d6a Mon Sep 17 00:00:00 2001
From: NagyZoltanPeter <113987313+NagyZoltanPeter@users.noreply.github.com>
Date: Mon, 30 Jun 2025 17:11:38 +0200
Subject: [PATCH 38/47] Fix legacy lightpush diagnostic log(#3478)
DST team needs unintentionally removed my_peer_id back for legacy-lightpush for their analysis tool
---
waku/waku_lightpush_legacy/protocol.nim | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/waku/waku_lightpush_legacy/protocol.nim b/waku/waku_lightpush_legacy/protocol.nim
index feb6e17dc..75bededaf 100644
--- a/waku/waku_lightpush_legacy/protocol.nim
+++ b/waku/waku_lightpush_legacy/protocol.nim
@@ -45,7 +45,8 @@ proc handleRequest*(
let msg_hash = pubsubTopic.computeMessageHash(message).to0xHex()
waku_lightpush_messages.inc(labelValues = ["PushRequest"])
- notice "handling lightpush request",
+ notice "handling legacy lightpush request",
+ my_peer_id = wl.peerManager.switch.peerInfo.peerId,
peer_id = peerId,
requestId = requestId,
pubsubTopic = pubsubTopic,
From cc30666016c6b8e0083a4eb9616b6ea6d9d543c7 Mon Sep 17 00:00:00 2001
From: gabrielmer <101006718+gabrielmer@users.noreply.github.com>
Date: Tue, 1 Jul 2025 21:14:21 +0200
Subject: [PATCH 39/47] fix: removing keepAlive from wakuConf (#3481)
---
waku/factory/conf_builder/waku_conf_builder.nim | 2 --
waku/factory/waku_conf.nim | 1 -
2 files changed, 3 deletions(-)
diff --git a/waku/factory/conf_builder/waku_conf_builder.nim b/waku/factory/conf_builder/waku_conf_builder.nim
index 5a3abbba4..0b9ca0d88 100644
--- a/waku/factory/conf_builder/waku_conf_builder.nim
+++ b/waku/factory/conf_builder/waku_conf_builder.nim
@@ -121,7 +121,6 @@ type WakuConfBuilder* = object
relayShardedPeerManagement: Option[bool]
relayServiceRatio: Option[string]
circuitRelayClient: Option[bool]
- keepAlive: Option[bool]
p2pReliability: Option[bool]
proc init*(T: type WakuConfBuilder): WakuConfBuilder =
@@ -622,7 +621,6 @@ proc build*(
relayServiceRatio: builder.relayServiceRatio.get("60:40"),
rateLimits: rateLimits,
circuitRelayClient: builder.circuitRelayClient.get(false),
- keepAlive: builder.keepAlive.get(true),
staticNodes: builder.staticNodes,
relayShardedPeerManagement: relayShardedPeerManagement,
p2pReliability: builder.p2pReliability.get(false),
diff --git a/waku/factory/waku_conf.nim b/waku/factory/waku_conf.nim
index caf925ff2..584c60f33 100644
--- a/waku/factory/waku_conf.nim
+++ b/waku/factory/waku_conf.nim
@@ -83,7 +83,6 @@ type WakuConf* {.requiresInit.} = ref object
relayPeerExchange*: bool
rendezvous*: bool
circuitRelayClient*: bool
- keepAlive*: bool
discv5Conf*: Option[Discv5Conf]
dnsDiscoveryConf*: Option[DnsDiscoveryConf]
From ac094eae38054650d1878575b5b1417786328eb2 Mon Sep 17 00:00:00 2001
From: gabrielmer <101006718+gabrielmer@users.noreply.github.com>
Date: Wed, 2 Jul 2025 17:22:51 +0200
Subject: [PATCH 40/47] chore: not supporting legacy store by default (#3484)
---
waku/factory/conf_builder/store_service_conf_builder.nim | 2 +-
waku/factory/external_config.nim | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/waku/factory/conf_builder/store_service_conf_builder.nim b/waku/factory/conf_builder/store_service_conf_builder.nim
index d12bc8150..d5d48c34d 100644
--- a/waku/factory/conf_builder/store_service_conf_builder.nim
+++ b/waku/factory/conf_builder/store_service_conf_builder.nim
@@ -64,7 +64,7 @@ proc build*(b: StoreServiceConfBuilder): Result[Option[StoreServiceConf], string
dbMigration: b.dbMigration.get(true),
dbURl: b.dbUrl.get(),
dbVacuum: b.dbVacuum.get(false),
- supportV2: b.supportV2.get(true),
+ supportV2: b.supportV2.get(false),
maxNumDbConnections: b.maxNumDbConnections.get(50),
retentionPolicy: b.retentionPolicy.get("time:" & $2.days.seconds),
resume: b.resume.get(false),
diff --git a/waku/factory/external_config.nim b/waku/factory/external_config.nim
index ecf57afd7..4e71783c5 100644
--- a/waku/factory/external_config.nim
+++ b/waku/factory/external_config.nim
@@ -351,7 +351,7 @@ hence would have reachability issues.""",
legacyStore* {.
desc: "Enable/disable support of Waku Store v2 as a service",
- defaultValue: true,
+ defaultValue: false,
name: "legacy-store"
.}: bool
From 4b186a4b285779222a33290698fff47616300156 Mon Sep 17 00:00:00 2001
From: fryorcraken <110212804+fryorcraken@users.noreply.github.com>
Date: Thu, 3 Jul 2025 11:56:43 +1000
Subject: [PATCH 41/47] fix: deprecate --dns-discovery (#3485)
* fix: deprecate `--dns-discovery`
Properly deprecates `--dns-discovery` CLI arg.
DNS Discovery is enabled if a non-empty DNS Discovery URL is passed.
* test: add test_all for factory
add and use test_all for some tests.
---
tests/all_tests_waku.nim | 15 ++-------------
tests/factory/test_all.nim | 1 +
tests/wakunode_rest/test_all.nim | 11 ++++++-----
.../conf_builder/dns_discovery_conf_builder.nim | 12 ++++--------
waku/factory/external_config.nim | 7 ++++---
5 files changed, 17 insertions(+), 29 deletions(-)
create mode 100644 tests/factory/test_all.nim
diff --git a/tests/all_tests_waku.nim b/tests/all_tests_waku.nim
index 2723fac8f..98ea0e36f 100644
--- a/tests/all_tests_waku.nim
+++ b/tests/all_tests_waku.nim
@@ -92,20 +92,9 @@ import
import ./test_waku_keystore_keyfile, ./test_waku_keystore
## Wakunode Rest API test suite
-import
- ./wakunode_rest/test_rest_debug,
- ./wakunode_rest/test_rest_debug_serdes,
- ./wakunode_rest/test_rest_relay,
- ./wakunode_rest/test_rest_relay_serdes,
- ./wakunode_rest/test_rest_serdes,
- ./wakunode_rest/test_rest_filter,
- ./wakunode_rest/test_rest_lightpush,
- ./wakunode_rest/test_rest_lightpush_legacy,
- ./wakunode_rest/test_rest_admin,
- ./wakunode_rest/test_rest_cors,
- ./wakunode_rest/test_rest_health
+import ./wakunode_rest/test_all
import ./waku_rln_relay/test_all
# Node Factory
-import ./factory/[test_external_config, test_node_factory, test_waku_conf]
+import ./factory/test_all
diff --git a/tests/factory/test_all.nim b/tests/factory/test_all.nim
new file mode 100644
index 000000000..b704a8ef3
--- /dev/null
+++ b/tests/factory/test_all.nim
@@ -0,0 +1 @@
+import ./test_external_config, ./test_node_factory, ./test_waku_conf
diff --git a/tests/wakunode_rest/test_all.nim b/tests/wakunode_rest/test_all.nim
index 6e34b6fdd..4071e635b 100644
--- a/tests/wakunode_rest/test_all.nim
+++ b/tests/wakunode_rest/test_all.nim
@@ -1,14 +1,15 @@
{.used.}
import
- ./test_rest_debug_serdes,
+ ./test_rest_admin,
+ ./test_rest_cors,
./test_rest_debug,
+ ./test_rest_debug_serdes,
./test_rest_filter,
- ./test_rest_lightpush_legacy,
./test_rest_health,
+ ./test_rest_lightpush,
+ ./test_rest_lightpush_legacy,
./test_rest_relay_serdes,
./test_rest_relay,
./test_rest_serdes,
- ./test_rest_store,
- ./test_rest_admin,
- ./test_rest_cors
+ ./test_rest_store
diff --git a/waku/factory/conf_builder/dns_discovery_conf_builder.nim b/waku/factory/conf_builder/dns_discovery_conf_builder.nim
index 34337c9b1..1c577bbf8 100644
--- a/waku/factory/conf_builder/dns_discovery_conf_builder.nim
+++ b/waku/factory/conf_builder/dns_discovery_conf_builder.nim
@@ -1,4 +1,4 @@
-import chronicles, std/[net, options, sequtils], results
+import chronicles, std/[net, options, strutils], results
import ../waku_conf
logScope:
@@ -8,16 +8,12 @@ logScope:
## DNS Discovery Config Builder ##
##################################
type DnsDiscoveryConfBuilder* = object
- enabled*: Option[bool]
enrTreeUrl*: Option[string]
nameServers*: seq[IpAddress]
proc init*(T: type DnsDiscoveryConfBuilder): DnsDiscoveryConfBuilder =
DnsDiscoveryConfBuilder()
-proc withEnabled*(b: var DnsDiscoveryConfBuilder, enabled: bool) =
- b.enabled = some(enabled)
-
proc withEnrTreeUrl*(b: var DnsDiscoveryConfBuilder, enrTreeUrl: string) =
b.enrTreeUrl = some(enrTreeUrl)
@@ -25,13 +21,13 @@ proc withNameServers*(b: var DnsDiscoveryConfBuilder, nameServers: seq[IpAddress
b.nameServers = nameServers
proc build*(b: DnsDiscoveryConfBuilder): Result[Option[DnsDiscoveryConf], string] =
- if not b.enabled.get(false):
+ if b.enrTreeUrl.isNone():
return ok(none(DnsDiscoveryConf))
+ if isEmptyOrWhiteSpace(b.enrTreeUrl.get()):
+ return err("dnsDiscovery.enrTreeUrl cannot be an empty string")
if b.nameServers.len == 0:
return err("dnsDiscovery.nameServers is not specified")
- if b.enrTreeUrl.isNone():
- return err("dnsDiscovery.enrTreeUrl is not specified")
return ok(
some(DnsDiscoveryConf(nameServers: b.nameServers, enrTreeUrl: b.enrTreeUrl.get()))
diff --git a/waku/factory/external_config.nim b/waku/factory/external_config.nim
index 4e71783c5..704c6d4e5 100644
--- a/waku/factory/external_config.nim
+++ b/waku/factory/external_config.nim
@@ -550,7 +550,8 @@ with the drawback of consuming some more bandwidth.""",
.}: bool
dnsDiscoveryUrl* {.
- desc: "URL for DNS node list in format 'enrtree://@'",
+ desc:
+ "URL for DNS node list in format 'enrtree://@', enables DNS Discovery",
defaultValue: "",
name: "dns-discovery-url"
.}: string
@@ -996,8 +997,8 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] =
b.metricsServerConf.withHttpPort(n.metricsServerPort)
b.metricsServerConf.withLogging(n.metricsLogging)
- b.dnsDiscoveryConf.withEnabled(n.dnsDiscovery)
- b.dnsDiscoveryConf.withEnrTreeUrl(n.dnsDiscoveryUrl)
+ if n.dnsDiscoveryUrl != "":
+ b.dnsDiscoveryConf.withEnrTreeUrl(n.dnsDiscoveryUrl)
b.dnsDiscoveryConf.withNameServers(n.dnsAddrsNameServers)
if n.discv5Discovery.isSome():
From 0ed3fc8079e1c8051fdedaab73e9192266418505 Mon Sep 17 00:00:00 2001
From: fryorcraken <110212804+fryorcraken@users.noreply.github.com>
Date: Thu, 3 Jul 2025 12:29:16 +1000
Subject: [PATCH 42/47] fix: lightpush metrics (#3486)
* fix: lightpush metrics
Some light push errors were not reported in metrics due to
an early return.
* Small improvements
* Bound metrics value by using error codes
---
waku/waku_lightpush/protocol.nim | 146 ++++++++++++++-----------------
1 file changed, 66 insertions(+), 80 deletions(-)
diff --git a/waku/waku_lightpush/protocol.nim b/waku/waku_lightpush/protocol.nim
index 1165cbb52..7831fb20f 100644
--- a/waku/waku_lightpush/protocol.nim
+++ b/waku/waku_lightpush/protocol.nim
@@ -28,94 +28,80 @@ type WakuLightPush* = ref object of LPProtocol
requestRateLimiter*: RequestRateLimiter
sharding: Sharding
+proc handleRequest(
+ wl: WakuLightPush, peerId: PeerId, pushRequest: LightPushRequest
+): Future[WakuLightPushResult] {.async.} =
+ let pubsubTopic = pushRequest.pubSubTopic.valueOr:
+ let parsedTopic = NsContentTopic.parse(pushRequest.message.contentTopic).valueOr:
+ let msg = "Invalid content-topic:" & $error
+ error "lightpush request handling error", error = msg
+ return WakuLightPushResult.err(
+ (code: LightPushStatusCode.INVALID_MESSAGE_ERROR, desc: some(msg))
+ )
+
+ wl.sharding.getShard(parsedTopic).valueOr:
+ let msg = "Sharding error: " & error
+ error "lightpush request handling error", error = msg
+ return WakuLightPushResult.err(
+ (code: LightPushStatusCode.INTERNAL_SERVER_ERROR, desc: some(msg))
+ )
+
+ # ensure checking topic will not cause error at gossipsub level
+ if pubsubTopic.isEmptyOrWhitespace():
+ let msg = "topic must not be empty"
+ error "lightpush request handling error", error = msg
+ return
+ WakuLightPushResult.err((code: LightPushStatusCode.BAD_REQUEST, desc: some(msg)))
+
+ waku_lightpush_v3_messages.inc(labelValues = ["PushRequest"])
+
+ let msg_hash = pubsubTopic.computeMessageHash(pushRequest.message).to0xHex()
+ notice "handling lightpush request",
+ my_peer_id = wl.peerManager.switch.peerInfo.peerId,
+ peer_id = peerId,
+ requestId = pushRequest.requestId,
+ pubsubTopic = pushRequest.pubsubTopic,
+ msg_hash = msg_hash,
+ receivedTime = getNowInNanosecondTime()
+
+ let res = (await wl.pushHandler(peerId, pubsubTopic, pushRequest.message)).valueOr:
+ return err((code: error.code, desc: error.desc))
+ return ok(res)
+
proc handleRequest*(
wl: WakuLightPush, peerId: PeerId, buffer: seq[byte]
): Future[LightPushResponse] {.async.} =
- let reqDecodeRes = LightpushRequest.decode(buffer)
- var isSuccess = false
- var pushResponse: LightpushResponse
+ var pushResponse: LightPushResponse
- if reqDecodeRes.isErr():
- pushResponse = LightpushResponse(
+ let pushRequest = LightPushRequest.decode(buffer).valueOr:
+ let desc = decodeRpcFailure & ": " & $error
+ error "failed to push message", error = desc
+ let errorCode = LightPushStatusCode.BAD_REQUEST.uint32
+ waku_lightpush_v3_errors.inc(labelValues = [$errorCode])
+ return LightPushResponse(
requestId: "N/A", # due to decode failure we don't know requestId
- statusCode: LightpushStatusCode.BAD_REQUEST.uint32,
- statusDesc: some(decodeRpcFailure & ": " & $reqDecodeRes.error),
- )
- else:
- let pushRequest = reqDecodeRes.get()
-
- let pubsubTopic = pushRequest.pubSubTopic.valueOr:
- let parsedTopic = NsContentTopic.parse(pushRequest.message.contentTopic).valueOr:
- let msg = "Invalid content-topic:" & $error
- error "lightpush request handling error", error = msg
- return LightpushResponse(
- requestId: pushRequest.requestId,
- statusCode: LightpushStatusCode.INVALID_MESSAGE_ERROR.uint32,
- statusDesc: some(msg),
- )
-
- wl.sharding.getShard(parsedTopic).valueOr:
- let msg = "Autosharding error: " & error
- error "lightpush request handling error", error = msg
- return LightpushResponse(
- requestId: pushRequest.requestId,
- statusCode: LightpushStatusCode.INTERNAL_SERVER_ERROR.uint32,
- statusDesc: some(msg),
- )
-
- # ensure checking topic will not cause error at gossipsub level
- if pubsubTopic.isEmptyOrWhitespace():
- let msg = "topic must not be empty"
- error "lightpush request handling error", error = msg
- return LightPushResponse(
- requestId: pushRequest.requestId,
- statusCode: LightpushStatusCode.BAD_REQUEST.uint32,
- statusDesc: some(msg),
- )
-
- waku_lightpush_v3_messages.inc(labelValues = ["PushRequest"])
-
- let msg_hash = pubsubTopic.computeMessageHash(pushRequest.message).to0xHex()
- notice "handling lightpush request",
- my_peer_id = wl.peerManager.switch.peerInfo.peerId,
- peer_id = peerId,
- requestId = pushRequest.requestId,
- pubsubTopic = pushRequest.pubsubTopic,
- msg_hash = msg_hash,
- receivedTime = getNowInNanosecondTime()
-
- let handleRes = await wl.pushHandler(peerId, pubsubTopic, pushRequest.message)
-
- isSuccess = handleRes.isOk()
- pushResponse = LightpushResponse(
- requestId: pushRequest.requestId,
- statusCode:
- if isSuccess:
- LightpushStatusCode.SUCCESS.uint32
- else:
- handleRes.error.code.uint32,
- statusDesc:
- if isSuccess:
- none[string]()
- else:
- handleRes.error.desc,
- relayPeerCount:
- if isSuccess:
- some(handleRes.get())
- else:
- none[uint32](),
+ statusCode: errorCode.uint32,
+ statusDesc: some(desc),
)
- if not isSuccess:
- waku_lightpush_v3_errors.inc(
- labelValues = [pushResponse.statusDesc.valueOr("unknown")]
+ let relayPeerCount = (await handleRequest(wl, peerId, pushRequest)).valueOr:
+ let desc = error.desc
+ waku_lightpush_v3_errors.inc(labelValues = [$error.code])
+ error "failed to push message", error = desc
+ return LightPushResponse(
+ requestId: pushRequest.requestId, statusCode: error.code.uint32, statusDesc: desc
)
- error "failed to push message", error = pushResponse.statusDesc
- return pushResponse
+
+ return LightPushResponse(
+ requestId: pushRequest.requestId,
+ statusCode: LightPushStatusCode.SUCCESS.uint32,
+ statusDesc: none[string](),
+ relayPeerCount: some(relayPeerCount),
+ )
proc initProtocolHandler(wl: WakuLightPush) =
proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
- var rpc: LightpushResponse
+ var rpc: LightPushResponse
wl.requestRateLimiter.checkUsageLimit(WakuLightPushCodec, conn):
var buffer: seq[byte]
try:
@@ -137,7 +123,7 @@ proc initProtocolHandler(wl: WakuLightPush) =
peerId = conn.peerId, limit = $wl.requestRateLimiter.setting
rpc = static(
- LightpushResponse(
+ LightPushResponse(
## We will not copy and decode RPC buffer from stream only for requestId
## in reject case as it is comparably too expensive and opens possible
## attack surface
@@ -152,8 +138,8 @@ proc initProtocolHandler(wl: WakuLightPush) =
except LPStreamError:
error "lightpush write stream failed", error = getCurrentExceptionMsg()
- ## For lightpush might not worth to measure outgoing trafic as it is only
- ## small respones about success/failure
+ ## For lightpush might not worth to measure outgoing traffic as it is only
+ ## small response about success/failure
wl.handler = handler
wl.codec = WakuLightPushCodec
From 994d485b49f60941772bdd4e9dc47eb3b812238f Mon Sep 17 00:00:00 2001
From: fryorcraken <110212804+fryorcraken@users.noreply.github.com>
Date: Fri, 4 Jul 2025 17:10:53 +1000
Subject: [PATCH 43/47] chore!: make sharding configuration explicit (#3468)
* Reserve `networkconfig` name to waku network related settings
* Rename cluster conf to network conf
A `NetworkConf` is a Waku network configuration.
# Conflicts:
# tests/factory/test_waku_conf.nim
# Conflicts:
# tests/factory/test_waku_conf.nim
* Improve sharding configuration
A smarter data types simplifies the logic.
* Fixing tests
* fixup! rename to endpointConf
* wip: autosharding is a specific configuration state and treat it like
it
# Conflicts:
# waku/factory/external_config.nim
* refactor lightpush handler
some metrics error reporting were missing
# Conflicts:
# waku/waku_lightpush/protocol.nim
* test_node_factory tests pass
* remove warnings
* fix tests
* Revert eager previous replace-all command
* fix up build tools compilation
* metadata is used to store cluster id
* Mount relay routes in static sharding
* Rename activeRelayShards to subscribeShards
To make it clearer that these are the shards the node will subscribe to.
* Remove unused msg var
* Improve error handling
* Set autosharding as default, with 1 shard in network
Also makes shards to subscribe to all shards in auto sharding, none in
static sharding.
---
apps/networkmonitor/networkmonitor.nim | 17 +-
examples/wakustealthcommitments/node_spec.nim | 24 +--
tests/factory/test_external_config.nim | 84 +++++++--
tests/factory/test_waku_conf.nim | 118 +++++++------
tests/test_peer_manager.nim | 6 +-
tests/test_waku_netconfig.nim | 84 ++++-----
tests/testlib/wakunode.nim | 8 +-
tests/waku_discv5/test_waku_discv5.nim | 4 +-
tests/waku_lightpush/lightpush_utils.nim | 6 +-
tests/waku_relay/test_wakunode_relay.nim | 11 +-
tests/wakunode2/test_app.nim | 2 +-
tests/wakunode_rest/test_rest_admin.nim | 12 +-
tests/wakunode_rest/test_rest_relay.nim | 10 +-
.../conf_builder/waku_conf_builder.nim | 160 ++++++++++--------
.../conf_builder/web_socket_conf_builder.nim | 2 +-
waku/factory/external_config.nim | 47 +++--
waku/factory/internal_config.nim | 20 +--
waku/factory/networks_config.nim | 50 ++++--
waku/factory/node_factory.nim | 42 +++--
waku/factory/waku.nim | 15 +-
waku/factory/waku_conf.nim | 44 ++---
waku/node/waku_node.nim | 84 ++++++---
waku/waku_api/rest/admin/handlers.nim | 21 ++-
waku/waku_api/rest/builder.nim | 20 ++-
waku/waku_api/rest/relay/handlers.nim | 9 +-
waku/waku_core/topics/content_topic.nim | 12 ++
waku/waku_core/topics/sharding.nim | 57 +++----
waku/waku_lightpush/protocol.nim | 19 ++-
waku/waku_metadata/protocol.nim | 2 +-
29 files changed, 578 insertions(+), 412 deletions(-)
diff --git a/apps/networkmonitor/networkmonitor.nim b/apps/networkmonitor/networkmonitor.nim
index f8cde5281..f391b3d20 100644
--- a/apps/networkmonitor/networkmonitor.nim
+++ b/apps/networkmonitor/networkmonitor.nim
@@ -570,17 +570,18 @@ when isMainModule:
info "cli flags", conf = conf
if conf.clusterId == 1:
- let twnClusterConf = ClusterConf.TheWakuNetworkConf()
+ let twnNetworkConf = NetworkConf.TheWakuNetworkConf()
- conf.bootstrapNodes = twnClusterConf.discv5BootstrapNodes
- conf.rlnRelayDynamic = twnClusterConf.rlnRelayDynamic
- conf.rlnRelayEthContractAddress = twnClusterConf.rlnRelayEthContractAddress
- conf.rlnEpochSizeSec = twnClusterConf.rlnEpochSizeSec
- conf.rlnRelayUserMessageLimit = twnClusterConf.rlnRelayUserMessageLimit
- conf.numShardsInNetwork = twnClusterConf.numShardsInNetwork
+ conf.bootstrapNodes = twnNetworkConf.discv5BootstrapNodes
+ conf.rlnRelayDynamic = twnNetworkConf.rlnRelayDynamic
+ conf.rlnRelayEthContractAddress = twnNetworkConf.rlnRelayEthContractAddress
+ conf.rlnEpochSizeSec = twnNetworkConf.rlnEpochSizeSec
+ conf.rlnRelayUserMessageLimit = twnNetworkConf.rlnRelayUserMessageLimit
+ conf.numShardsInNetwork = twnNetworkConf.shardingConf.numShardsInCluster
if conf.shards.len == 0:
- conf.shards = toSeq(uint16(0) .. uint16(twnClusterConf.numShardsInNetwork - 1))
+ conf.shards =
+ toSeq(uint16(0) .. uint16(twnNetworkConf.shardingConf.numShardsInCluster - 1))
if conf.logLevel != LogLevel.NONE:
setLogLevel(conf.logLevel)
diff --git a/examples/wakustealthcommitments/node_spec.nim b/examples/wakustealthcommitments/node_spec.nim
index fdcd36986..c3468ccde 100644
--- a/examples/wakustealthcommitments/node_spec.nim
+++ b/examples/wakustealthcommitments/node_spec.nim
@@ -24,26 +24,26 @@ proc setup*(): Waku =
var conf = confRes.get()
- let twnClusterConf = ClusterConf.TheWakuNetworkConf()
+ let twnNetworkConf = NetworkConf.TheWakuNetworkConf()
if len(conf.shards) != 0:
- conf.pubsubTopics = conf.shards.mapIt(twnClusterConf.pubsubTopics[it.uint16])
+ conf.pubsubTopics = conf.shards.mapIt(twnNetworkConf.pubsubTopics[it.uint16])
else:
- conf.pubsubTopics = twnClusterConf.pubsubTopics
+ conf.pubsubTopics = twnNetworkConf.pubsubTopics
# Override configuration
- conf.maxMessageSize = twnClusterConf.maxMessageSize
- conf.clusterId = twnClusterConf.clusterId
- conf.rlnRelayEthContractAddress = twnClusterConf.rlnRelayEthContractAddress
- conf.rlnRelayDynamic = twnClusterConf.rlnRelayDynamic
- conf.discv5Discovery = twnClusterConf.discv5Discovery
+ conf.maxMessageSize = twnNetworkConf.maxMessageSize
+ conf.clusterId = twnNetworkConf.clusterId
+ conf.rlnRelayEthContractAddress = twnNetworkConf.rlnRelayEthContractAddress
+ conf.rlnRelayDynamic = twnNetworkConf.rlnRelayDynamic
+ conf.discv5Discovery = twnNetworkConf.discv5Discovery
conf.discv5BootstrapNodes =
- conf.discv5BootstrapNodes & twnClusterConf.discv5BootstrapNodes
- conf.rlnEpochSizeSec = twnClusterConf.rlnEpochSizeSec
- conf.rlnRelayUserMessageLimit = twnClusterConf.rlnRelayUserMessageLimit
+ conf.discv5BootstrapNodes & twnNetworkConf.discv5BootstrapNodes
+ conf.rlnEpochSizeSec = twnNetworkConf.rlnEpochSizeSec
+ conf.rlnRelayUserMessageLimit = twnNetworkConf.rlnRelayUserMessageLimit
# Only set rlnRelay to true if relay is configured
if conf.relay:
- conf.rlnRelay = twnClusterConf.rlnRelay
+ conf.rlnRelay = twnNetworkConf.rlnRelay
debug "Starting node"
var waku = Waku.new(conf).valueOr:
diff --git a/tests/factory/test_external_config.nim b/tests/factory/test_external_config.nim
index 927246b0d..ecd77826f 100644
--- a/tests/factory/test_external_config.nim
+++ b/tests/factory/test_external_config.nim
@@ -17,10 +17,46 @@ import
../../waku/common/logging,
../../waku/common/utils/parse_size_units
-suite "Waku config - apply preset":
- test "Default preset is TWN":
+suite "Waku external config - default values":
+ test "Default sharding value":
## Setup
- let expectedConf = ClusterConf.TheWakuNetworkConf()
+ let defaultShardingMode = AutoSharding
+ let defaultNumShardsInCluster = 1.uint16
+ let defaultSubscribeShards = @[0.uint16]
+
+ ## Given
+ let preConfig = defaultWakuNodeConf().get()
+
+ ## When
+ let res = preConfig.toWakuConf()
+ assert res.isOk(), $res.error
+
+ ## Then
+ let conf = res.get()
+ check conf.shardingConf.kind == defaultShardingMode
+ check conf.shardingConf.numShardsInCluster == defaultNumShardsInCluster
+ check conf.subscribeShards == defaultSubscribeShards
+
+ test "Default shards value in static sharding":
+ ## Setup
+ let defaultSubscribeShards: seq[uint16] = @[]
+
+ ## Given
+ var preConfig = defaultWakuNodeConf().get()
+ preConfig.numShardsInNetwork = 0.uint16
+
+ ## When
+ let res = preConfig.toWakuConf()
+ assert res.isOk(), $res.error
+
+ ## Then
+ let conf = res.get()
+ check conf.subscribeShards == defaultSubscribeShards
+
+suite "Waku external config - apply preset":
+ test "Preset is TWN":
+ ## Setup
+ let expectedConf = NetworkConf.TheWakuNetworkConf()
## Given
let preConfig = WakuNodeConf(
@@ -48,7 +84,9 @@ suite "Waku config - apply preset":
check rlnRelayConf.chainId == expectedConf.rlnRelayChainId
check rlnRelayConf.epochSizeSec == expectedConf.rlnEpochSizeSec
check rlnRelayConf.userMessageLimit == expectedConf.rlnRelayUserMessageLimit
- check conf.numShardsInNetwork == expectedConf.numShardsInNetwork
+ check conf.shardingConf.kind == expectedConf.shardingConf.kind
+ check conf.shardingConf.numShardsInCluster ==
+ expectedConf.shardingConf.numShardsInCluster
check conf.discv5Conf.isSome() == expectedConf.discv5Discovery
if conf.discv5Conf.isSome():
let discv5Conf = conf.discv5Conf.get()
@@ -56,7 +94,7 @@ suite "Waku config - apply preset":
test "Subscribes to all valid shards in twn":
## Setup
- let expectedConf = ClusterConf.TheWakuNetworkConf()
+ let expectedConf = NetworkConf.TheWakuNetworkConf()
## Given
let shards: seq[uint16] = @[0, 1, 2, 3, 4, 5, 6, 7]
@@ -68,11 +106,11 @@ suite "Waku config - apply preset":
## Then
let conf = res.get()
- check conf.shards.len == expectedConf.numShardsInNetwork.int
+ check conf.subscribeShards.len == expectedConf.shardingConf.numShardsInCluster.int
test "Subscribes to some valid shards in twn":
## Setup
- let expectedConf = ClusterConf.TheWakuNetworkConf()
+ let expectedConf = NetworkConf.TheWakuNetworkConf()
## Given
let shards: seq[uint16] = @[0, 4, 7]
@@ -84,9 +122,9 @@ suite "Waku config - apply preset":
## Then
let conf = resConf.get()
- assert conf.shards.len() == shards.len()
+ assert conf.subscribeShards.len() == shards.len()
for index, shard in shards:
- assert shard in conf.shards
+ assert shard in conf.subscribeShards
test "Subscribes to invalid shards in twn":
## Setup
@@ -103,7 +141,7 @@ suite "Waku config - apply preset":
test "Apply TWN preset when cluster id = 1":
## Setup
- let expectedConf = ClusterConf.TheWakuNetworkConf()
+ let expectedConf = NetworkConf.TheWakuNetworkConf()
## Given
let preConfig = WakuNodeConf(
@@ -131,13 +169,15 @@ suite "Waku config - apply preset":
check rlnRelayConf.chainId == expectedConf.rlnRelayChainId
check rlnRelayConf.epochSizeSec == expectedConf.rlnEpochSizeSec
check rlnRelayConf.userMessageLimit == expectedConf.rlnRelayUserMessageLimit
- check conf.numShardsInNetwork == expectedConf.numShardsInNetwork
+ check conf.shardingConf.kind == expectedConf.shardingConf.kind
+ check conf.shardingConf.numShardsInCluster ==
+ expectedConf.shardingConf.numShardsInCluster
check conf.discv5Conf.isSome() == expectedConf.discv5Discovery
if conf.discv5Conf.isSome():
let discv5Conf = conf.discv5Conf.get()
check discv5Conf.bootstrapNodes == expectedConf.discv5BootstrapNodes
-suite "Waku config - node key":
+suite "Waku external config - node key":
test "Passed node key is used":
## Setup
let nodeKeyStr =
@@ -158,13 +198,13 @@ suite "Waku config - node key":
assert utils.toHex(resKey.getRawBytes().get()) ==
utils.toHex(nodekey.getRawBytes().get())
-suite "Waku config - Shards":
+suite "Waku external config - Shards":
test "Shards are valid":
## Setup
## Given
let shards: seq[uint16] = @[0, 2, 4]
- let numShardsInNetwork = 5.uint32
+ let numShardsInNetwork = 5.uint16
let wakuNodeConf = WakuNodeConf(
cmd: noCommand, shards: shards, numShardsInNetwork: numShardsInNetwork
)
@@ -183,7 +223,7 @@ suite "Waku config - Shards":
## Given
let shards: seq[uint16] = @[0, 2, 5]
- let numShardsInNetwork = 5.uint32
+ let numShardsInNetwork = 5.uint16
let wakuNodeConf = WakuNodeConf(
cmd: noCommand, shards: shards, numShardsInNetwork: numShardsInNetwork
)
@@ -198,7 +238,7 @@ suite "Waku config - Shards":
## Setup
## Given
- let wakuNodeConf = WakuNodeConf.load(version = "", cmdLine = @["--shard=32"])
+ let wakuNodeConf = WakuNodeConf.load(version = "", cmdLine = @["--shard=0"])
## When
let res = wakuNodeConf.toWakuConf()
@@ -207,3 +247,15 @@ suite "Waku config - Shards":
let wakuConf = res.get()
let vRes = wakuConf.validate()
assert vRes.isOk(), $vRes.error
+
+ test "Imvalid shard is passed without num shards":
+ ## Setup
+
+ ## Given
+ let wakuNodeConf = WakuNodeConf.load(version = "", cmdLine = @["--shard=32"])
+
+ ## When
+ let res = wakuNodeConf.toWakuConf()
+
+ ## Then
+ assert res.isErr(), "Invalid shard was accepted"
diff --git a/tests/factory/test_waku_conf.nim b/tests/factory/test_waku_conf.nim
index c18a2c73c..436eb4e40 100644
--- a/tests/factory/test_waku_conf.nim
+++ b/tests/factory/test_waku_conf.nim
@@ -16,7 +16,7 @@ import
suite "Waku Conf - build with cluster conf":
test "Cluster Conf is passed and relay is enabled":
## Setup
- let clusterConf = ClusterConf.TheWakuNetworkConf()
+ let networkConf = NetworkConf.TheWakuNetworkConf()
var builder = WakuConfBuilder.init()
builder.discv5Conf.withUdpPort(9000)
builder.withRelayServiceRatio("50:50")
@@ -25,7 +25,7 @@ suite "Waku Conf - build with cluster conf":
## Given
builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"])
- builder.withClusterConf(clusterConf)
+ builder.withNetworkConf(networkConf)
builder.withRelay(true)
builder.rlnRelayConf.withTreePath("/tmp/test-tree-path")
@@ -37,27 +37,29 @@ suite "Waku Conf - build with cluster conf":
## Then
let resValidate = conf.validate()
assert resValidate.isOk(), $resValidate.error
- check conf.clusterId == clusterConf.clusterId
- check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
- check conf.shards == expectedShards
+ check conf.clusterId == networkConf.clusterId
+ check conf.shardingConf.kind == networkConf.shardingConf.kind
+ check conf.shardingConf.numShardsInCluster ==
+ networkConf.shardingConf.numShardsInCluster
+ check conf.subscribeShards == expectedShards
check conf.maxMessageSizeBytes ==
- uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
- check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
+ uint64(parseCorrectMsgSize(networkConf.maxMessageSize))
+ check conf.discv5Conf.get().bootstrapNodes == networkConf.discv5BootstrapNodes
- if clusterConf.rlnRelay:
+ if networkConf.rlnRelay:
assert conf.rlnRelayConf.isSome(), "RLN Relay conf is disabled"
let rlnRelayConf = conf.rlnRelayConf.get()
check rlnRelayConf.ethContractAddress.string ==
- clusterConf.rlnRelayEthContractAddress
- check rlnRelayConf.dynamic == clusterConf.rlnRelayDynamic
- check rlnRelayConf.chainId == clusterConf.rlnRelayChainId
- check rlnRelayConf.epochSizeSec == clusterConf.rlnEpochSizeSec
- check rlnRelayConf.userMessageLimit == clusterConf.rlnRelayUserMessageLimit
+ networkConf.rlnRelayEthContractAddress
+ check rlnRelayConf.dynamic == networkConf.rlnRelayDynamic
+ check rlnRelayConf.chainId == networkConf.rlnRelayChainId
+ check rlnRelayConf.epochSizeSec == networkConf.rlnEpochSizeSec
+ check rlnRelayConf.userMessageLimit == networkConf.rlnRelayUserMessageLimit
test "Cluster Conf is passed, but relay is disabled":
## Setup
- let clusterConf = ClusterConf.TheWakuNetworkConf()
+ let networkConf = NetworkConf.TheWakuNetworkConf()
var builder = WakuConfBuilder.init()
builder.withRelayServiceRatio("50:50")
builder.discv5Conf.withUdpPort(9000)
@@ -66,7 +68,7 @@ suite "Waku Conf - build with cluster conf":
## Given
builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"])
- builder.withClusterConf(clusterConf)
+ builder.withNetworkConf(networkConf)
builder.withRelay(false)
## When
@@ -77,18 +79,20 @@ suite "Waku Conf - build with cluster conf":
## Then
let resValidate = conf.validate()
assert resValidate.isOk(), $resValidate.error
- check conf.clusterId == clusterConf.clusterId
- check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
- check conf.shards == expectedShards
+ check conf.clusterId == networkConf.clusterId
+ check conf.shardingConf.kind == networkConf.shardingConf.kind
+ check conf.shardingConf.numShardsInCluster ==
+ networkConf.shardingConf.numShardsInCluster
+ check conf.subscribeShards == expectedShards
check conf.maxMessageSizeBytes ==
- uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
- check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
+ uint64(parseCorrectMsgSize(networkConf.maxMessageSize))
+ check conf.discv5Conf.get().bootstrapNodes == networkConf.discv5BootstrapNodes
assert conf.rlnRelayConf.isNone
test "Cluster Conf is passed, but rln relay is disabled":
## Setup
- let clusterConf = ClusterConf.TheWakuNetworkConf()
+ let networkConf = NetworkConf.TheWakuNetworkConf()
var builder = WakuConfBuilder.init()
let # Mount all shards in network
@@ -96,7 +100,7 @@ suite "Waku Conf - build with cluster conf":
## Given
builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"])
- builder.withClusterConf(clusterConf)
+ builder.withNetworkConf(networkConf)
builder.rlnRelayConf.withEnabled(false)
## When
@@ -107,24 +111,26 @@ suite "Waku Conf - build with cluster conf":
## Then
let resValidate = conf.validate()
assert resValidate.isOk(), $resValidate.error
- check conf.clusterId == clusterConf.clusterId
- check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
- check conf.shards == expectedShards
+ check conf.clusterId == networkConf.clusterId
+ check conf.shardingConf.kind == networkConf.shardingConf.kind
+ check conf.shardingConf.numShardsInCluster ==
+ networkConf.shardingConf.numShardsInCluster
+ check conf.subscribeShards == expectedShards
check conf.maxMessageSizeBytes ==
- uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
- check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
+ uint64(parseCorrectMsgSize(networkConf.maxMessageSize))
+ check conf.discv5Conf.get().bootstrapNodes == networkConf.discv5BootstrapNodes
assert conf.rlnRelayConf.isNone
test "Cluster Conf is passed and valid shards are specified":
## Setup
- let clusterConf = ClusterConf.TheWakuNetworkConf()
+ let networkConf = NetworkConf.TheWakuNetworkConf()
var builder = WakuConfBuilder.init()
let shards = @[2.uint16, 3.uint16]
## Given
builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"])
- builder.withClusterConf(clusterConf)
- builder.withShards(shards)
+ builder.withNetworkConf(networkConf)
+ builder.withSubscribeShards(shards)
## When
let resConf = builder.build()
@@ -134,23 +140,25 @@ suite "Waku Conf - build with cluster conf":
## Then
let resValidate = conf.validate()
assert resValidate.isOk(), $resValidate.error
- check conf.clusterId == clusterConf.clusterId
- check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
- check conf.shards == shards
+ check conf.clusterId == networkConf.clusterId
+ check conf.shardingConf.kind == networkConf.shardingConf.kind
+ check conf.shardingConf.numShardsInCluster ==
+ networkConf.shardingConf.numShardsInCluster
+ check conf.subscribeShards == shards
check conf.maxMessageSizeBytes ==
- uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
- check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
+ uint64(parseCorrectMsgSize(networkConf.maxMessageSize))
+ check conf.discv5Conf.get().bootstrapNodes == networkConf.discv5BootstrapNodes
test "Cluster Conf is passed and invalid shards are specified":
## Setup
- let clusterConf = ClusterConf.TheWakuNetworkConf()
+ let networkConf = NetworkConf.TheWakuNetworkConf()
var builder = WakuConfBuilder.init()
let shards = @[2.uint16, 10.uint16]
## Given
builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"])
- builder.withClusterConf(clusterConf)
- builder.withShards(shards)
+ builder.withNetworkConf(networkConf)
+ builder.withSubscribeShards(shards)
## When
let resConf = builder.build()
@@ -160,7 +168,7 @@ suite "Waku Conf - build with cluster conf":
test "Cluster Conf is passed and RLN contract is **not** overridden":
## Setup
- let clusterConf = ClusterConf.TheWakuNetworkConf()
+ let networkConf = NetworkConf.TheWakuNetworkConf()
var builder = WakuConfBuilder.init()
builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"])
@@ -170,7 +178,7 @@ suite "Waku Conf - build with cluster conf":
## Given
builder.rlnRelayConf.withEthContractAddress(contractAddress)
- builder.withClusterConf(clusterConf)
+ builder.withNetworkConf(networkConf)
builder.withRelay(true)
builder.rlnRelayConf.withTreePath("/tmp/test")
@@ -182,24 +190,26 @@ suite "Waku Conf - build with cluster conf":
## Then
let resValidate = conf.validate()
assert resValidate.isOk(), $resValidate.error
- check conf.clusterId == clusterConf.clusterId
- check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
- check conf.shards == expectedShards
+ check conf.clusterId == networkConf.clusterId
+ check conf.shardingConf.kind == networkConf.shardingConf.kind
+ check conf.shardingConf.numShardsInCluster ==
+ networkConf.shardingConf.numShardsInCluster
+ check conf.subscribeShards == expectedShards
check conf.maxMessageSizeBytes ==
- uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
- check conf.discv5Conf.isSome == clusterConf.discv5Discovery
- check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
+ uint64(parseCorrectMsgSize(networkConf.maxMessageSize))
+ check conf.discv5Conf.isSome == networkConf.discv5Discovery
+ check conf.discv5Conf.get().bootstrapNodes == networkConf.discv5BootstrapNodes
- if clusterConf.rlnRelay:
+ if networkConf.rlnRelay:
assert conf.rlnRelayConf.isSome
let rlnRelayConf = conf.rlnRelayConf.get()
check rlnRelayConf.ethContractAddress.string ==
- clusterConf.rlnRelayEthContractAddress
- check rlnRelayConf.dynamic == clusterConf.rlnRelayDynamic
- check rlnRelayConf.chainId == clusterConf.rlnRelayChainId
- check rlnRelayConf.epochSizeSec == clusterConf.rlnEpochSizeSec
- check rlnRelayConf.userMessageLimit == clusterConf.rlnRelayUserMessageLimit
+ networkConf.rlnRelayEthContractAddress
+ check rlnRelayConf.dynamic == networkConf.rlnRelayDynamic
+ check rlnRelayConf.chainId == networkConf.rlnRelayChainId
+ check rlnRelayConf.epochSizeSec == networkConf.rlnEpochSizeSec
+ check rlnRelayConf.userMessageLimit == networkConf.rlnRelayUserMessageLimit
suite "Waku Conf - node key":
test "Node key is generated":
@@ -264,8 +274,8 @@ suite "Waku Conf - extMultiaddrs":
## Then
let resValidate = conf.validate()
assert resValidate.isOk(), $resValidate.error
- check multiaddrs.len == conf.networkConf.extMultiAddrs.len
- let resMultiaddrs = conf.networkConf.extMultiAddrs.map(
+ check multiaddrs.len == conf.endpointConf.extMultiAddrs.len
+ let resMultiaddrs = conf.endpointConf.extMultiAddrs.map(
proc(m: MultiAddress): string =
$m
)
diff --git a/tests/test_peer_manager.nim b/tests/test_peer_manager.nim
index 9ef5ddd90..889e397cc 100644
--- a/tests/test_peer_manager.nim
+++ b/tests/test_peer_manager.nim
@@ -420,7 +420,7 @@ procSuite "Peer Manager":
parseIpAddress("0.0.0.0"),
port,
clusterId = 3,
- shards = @[uint16(0)],
+ subscribeShards = @[uint16(0)],
)
# same network
@@ -429,14 +429,14 @@ procSuite "Peer Manager":
parseIpAddress("0.0.0.0"),
port,
clusterId = 4,
- shards = @[uint16(0)],
+ subscribeShards = @[uint16(0)],
)
node3 = newTestWakuNode(
generateSecp256k1Key(),
parseIpAddress("0.0.0.0"),
port,
clusterId = 4,
- shards = @[uint16(0)],
+ subscribeShards = @[uint16(0)],
)
node1.mountMetadata(3).expect("Mounted Waku Metadata")
diff --git a/tests/test_waku_netconfig.nim b/tests/test_waku_netconfig.nim
index d2c9cc780..712fa4736 100644
--- a/tests/test_waku_netconfig.nim
+++ b/tests/test_waku_netconfig.nim
@@ -18,8 +18,8 @@ suite "Waku NetConfig":
let wakuFlags = defaultTestWakuFlags()
let netConfigRes = NetConfig.init(
- bindIp = conf.networkConf.p2pListenAddress,
- bindPort = conf.networkConf.p2pTcpPort,
+ bindIp = conf.endpointConf.p2pListenAddress,
+ bindPort = conf.endpointConf.p2pTcpPort,
extIp = none(IpAddress),
extPort = none(Port),
extMultiAddrs = @[],
@@ -46,7 +46,8 @@ suite "Waku NetConfig":
let conf = defaultTestWakuConf()
let netConfigRes = NetConfig.init(
- bindIp = conf.networkConf.p2pListenAddress, bindPort = conf.networkConf.p2pTcpPort
+ bindIp = conf.endpointConf.p2pListenAddress,
+ bindPort = conf.endpointConf.p2pTcpPort,
)
assert netConfigRes.isOk(), $netConfigRes.error
@@ -57,7 +58,9 @@ suite "Waku NetConfig":
netConfig.announcedAddresses.len == 1 # Only bind address should be present
netConfig.announcedAddresses[0] ==
formatListenAddress(
- ip4TcpEndPoint(conf.networkConf.p2pListenAddress, conf.networkConf.p2pTcpPort)
+ ip4TcpEndPoint(
+ conf.endpointConf.p2pListenAddress, conf.endpointConf.p2pTcpPort
+ )
)
asyncTest "AnnouncedAddresses contains external address if extIp/Port are provided":
@@ -67,8 +70,8 @@ suite "Waku NetConfig":
extPort = Port(1234)
let netConfigRes = NetConfig.init(
- bindIp = conf.networkConf.p2pListenAddress,
- bindPort = conf.networkConf.p2pTcpPort,
+ bindIp = conf.endpointConf.p2pListenAddress,
+ bindPort = conf.endpointConf.p2pTcpPort,
extIp = some(extIp),
extPort = some(extPort),
)
@@ -88,8 +91,8 @@ suite "Waku NetConfig":
extPort = Port(1234)
let netConfigRes = NetConfig.init(
- bindIp = conf.networkConf.p2pListenAddress,
- bindPort = conf.networkConf.p2pTcpPort,
+ bindIp = conf.endpointConf.p2pListenAddress,
+ bindPort = conf.endpointConf.p2pTcpPort,
dns4DomainName = some(dns4DomainName),
extPort = some(extPort),
)
@@ -110,8 +113,8 @@ suite "Waku NetConfig":
extMultiAddrs = @[ip4TcpEndPoint(extIp, extPort)]
let netConfigRes = NetConfig.init(
- bindIp = conf.networkConf.p2pListenAddress,
- bindPort = conf.networkConf.p2pTcpPort,
+ bindIp = conf.endpointConf.p2pListenAddress,
+ bindPort = conf.endpointConf.p2pTcpPort,
extMultiAddrs = extMultiAddrs,
)
@@ -131,8 +134,8 @@ suite "Waku NetConfig":
extPort = Port(1234)
let netConfigRes = NetConfig.init(
- bindIp = conf.networkConf.p2pListenAddress,
- bindPort = conf.networkConf.p2pTcpPort,
+ bindIp = conf.endpointConf.p2pListenAddress,
+ bindPort = conf.endpointConf.p2pTcpPort,
dns4DomainName = some(dns4DomainName),
extIp = some(extIp),
extPort = some(extPort),
@@ -152,8 +155,8 @@ suite "Waku NetConfig":
wssEnabled = false
var netConfigRes = NetConfig.init(
- bindIp = conf.networkConf.p2pListenAddress,
- bindPort = conf.networkConf.p2pTcpPort,
+ bindIp = conf.endpointConf.p2pListenAddress,
+ bindPort = conf.endpointConf.p2pTcpPort,
wsEnabled = true,
wssEnabled = wssEnabled,
)
@@ -165,8 +168,9 @@ suite "Waku NetConfig":
check:
netConfig.announcedAddresses.len == 2 # Bind address + wsHostAddress
netConfig.announcedAddresses[1] == (
- ip4TcpEndPoint(conf.networkConf.p2pListenAddress, conf.webSocketConf.get().port) &
- wsFlag(wssEnabled)
+ ip4TcpEndPoint(
+ conf.endpointConf.p2pListenAddress, conf.webSocketConf.get().port
+ ) & wsFlag(wssEnabled)
)
## Now try the same for the case of wssEnabled = true
@@ -174,8 +178,8 @@ suite "Waku NetConfig":
wssEnabled = true
netConfigRes = NetConfig.init(
- bindIp = conf.networkConf.p2pListenAddress,
- bindPort = conf.networkConf.p2pTcpPort,
+ bindIp = conf.endpointConf.p2pListenAddress,
+ bindPort = conf.endpointConf.p2pTcpPort,
wsEnabled = true,
wssEnabled = wssEnabled,
)
@@ -187,8 +191,9 @@ suite "Waku NetConfig":
check:
netConfig.announcedAddresses.len == 2 # Bind address + wsHostAddress
netConfig.announcedAddresses[1] == (
- ip4TcpEndPoint(conf.networkConf.p2pListenAddress, conf.websocketConf.get().port) &
- wsFlag(wssEnabled)
+ ip4TcpEndPoint(
+ conf.endpointConf.p2pListenAddress, conf.websocketConf.get().port
+ ) & wsFlag(wssEnabled)
)
asyncTest "Announced WebSocket address contains external IP if provided":
@@ -199,8 +204,8 @@ suite "Waku NetConfig":
wssEnabled = false
let netConfigRes = NetConfig.init(
- bindIp = conf.networkConf.p2pListenAddress,
- bindPort = conf.networkConf.p2pTcpPort,
+ bindIp = conf.endpointConf.p2pListenAddress,
+ bindPort = conf.endpointConf.p2pTcpPort,
extIp = some(extIp),
extPort = some(extPort),
wsEnabled = true,
@@ -224,8 +229,8 @@ suite "Waku NetConfig":
wssEnabled = false
let netConfigRes = NetConfig.init(
- bindIp = conf.networkConf.p2pListenAddress,
- bindPort = conf.networkConf.p2pTcpPort,
+ bindIp = conf.endpointConf.p2pListenAddress,
+ bindPort = conf.endpointConf.p2pTcpPort,
dns4DomainName = some(dns4DomainName),
extPort = some(extPort),
wsEnabled = true,
@@ -252,8 +257,8 @@ suite "Waku NetConfig":
wssEnabled = false
let netConfigRes = NetConfig.init(
- bindIp = conf.networkConf.p2pListenAddress,
- bindPort = conf.networkConf.p2pTcpPort,
+ bindIp = conf.endpointConf.p2pListenAddress,
+ bindPort = conf.endpointConf.p2pTcpPort,
dns4DomainName = some(dns4DomainName),
extIp = some(extIp),
extPort = some(extPort),
@@ -277,7 +282,8 @@ suite "Waku NetConfig":
let conf = defaultTestWakuConf()
let netConfigRes = NetConfig.init(
- bindIp = conf.networkConf.p2pListenAddress, bindPort = conf.networkConf.p2pTcpPort
+ bindIp = conf.endpointConf.p2pListenAddress,
+ bindPort = conf.endpointConf.p2pTcpPort,
)
assert netConfigRes.isOk(), $netConfigRes.error
@@ -285,8 +291,8 @@ suite "Waku NetConfig":
let netConfig = netConfigRes.get()
check:
- netConfig.enrIp.get() == conf.networkConf.p2pListenAddress
- netConfig.enrPort.get() == conf.networkConf.p2pTcpPort
+ netConfig.enrIp.get() == conf.endpointConf.p2pListenAddress
+ netConfig.enrPort.get() == conf.endpointConf.p2pTcpPort
asyncTest "ENR is set with extIp/Port if provided":
let
@@ -295,8 +301,8 @@ suite "Waku NetConfig":
extPort = Port(1234)
let netConfigRes = NetConfig.init(
- bindIp = conf.networkConf.p2pListenAddress,
- bindPort = conf.networkConf.p2pTcpPort,
+ bindIp = conf.endpointConf.p2pListenAddress,
+ bindPort = conf.endpointConf.p2pTcpPort,
extIp = some(extIp),
extPort = some(extPort),
)
@@ -316,8 +322,8 @@ suite "Waku NetConfig":
extPort = Port(1234)
let netConfigRes = NetConfig.init(
- bindIp = conf.networkConf.p2pListenAddress,
- bindPort = conf.networkConf.p2pTcpPort,
+ bindIp = conf.endpointConf.p2pListenAddress,
+ bindPort = conf.endpointConf.p2pTcpPort,
dns4DomainName = some(dns4DomainName),
extPort = some(extPort),
)
@@ -339,8 +345,8 @@ suite "Waku NetConfig":
extMultiAddrs = @[(ip4TcpEndPoint(extAddIp, extAddPort) & wsFlag(wssEnabled))]
var netConfigRes = NetConfig.init(
- bindIp = conf.networkConf.p2pListenAddress,
- bindPort = conf.networkConf.p2pTcpPort,
+ bindIp = conf.endpointConf.p2pListenAddress,
+ bindPort = conf.endpointConf.p2pTcpPort,
extMultiAddrs = extMultiAddrs,
wsEnabled = wsEnabled,
)
@@ -358,8 +364,8 @@ suite "Waku NetConfig":
extMultiAddrs = @[(ip4TcpEndPoint(extAddIp, extAddPort) & wsFlag(wssEnabled))]
netConfigRes = NetConfig.init(
- bindIp = conf.networkConf.p2pListenAddress,
- bindPort = conf.networkConf.p2pTcpPort,
+ bindIp = conf.endpointConf.p2pListenAddress,
+ bindPort = conf.endpointConf.p2pTcpPort,
extMultiAddrs = extMultiAddrs,
wssEnabled = wssEnabled,
)
@@ -380,8 +386,8 @@ suite "Waku NetConfig":
extMultiAddrs = @[ip4TcpEndPoint(extAddIp, extAddPort)]
let netConfigRes = NetConfig.init(
- bindIp = conf.networkConf.p2pListenAddress,
- bindPort = conf.networkConf.p2pTcpPort,
+ bindIp = conf.endpointConf.p2pListenAddress,
+ bindPort = conf.endpointConf.p2pTcpPort,
extMultiAddrs = extMultiAddrs,
extMultiAddrsOnly = true,
)
diff --git a/tests/testlib/wakunode.nim b/tests/testlib/wakunode.nim
index 54719aac1..fe040534e 100644
--- a/tests/testlib/wakunode.nim
+++ b/tests/testlib/wakunode.nim
@@ -37,7 +37,7 @@ proc defaultTestWakuConfBuilder*(): WakuConfBuilder =
builder.withRelayServiceRatio("60:40")
builder.withMaxMessageSize("1024 KiB")
builder.withClusterId(DefaultClusterId)
- builder.withShards(@[DefaultShardId])
+ builder.withSubscribeShards(@[DefaultShardId])
builder.withRelay(true)
builder.withRendezvous(true)
builder.storeServiceConf.withDbMigration(false)
@@ -72,7 +72,7 @@ proc newTestWakuNode*(
agentString = none(string),
peerStoreCapacity = none(int),
clusterId = DefaultClusterId,
- shards = @[DefaultShardId],
+ subscribeShards = @[DefaultShardId],
): WakuNode =
var resolvedExtIp = extIp
@@ -86,7 +86,7 @@ proc newTestWakuNode*(
var conf = defaultTestWakuConf()
conf.clusterId = clusterId
- conf.shards = shards
+ conf.subscribeShards = subscribeShards
if dns4DomainName.isSome() and extIp.isNone():
# If there's an error resolving the IP, an exception is thrown and test fails
@@ -114,7 +114,7 @@ proc newTestWakuNode*(
var enrBuilder = EnrBuilder.init(nodeKey)
enrBuilder.withWakuRelaySharding(
- RelayShards(clusterId: conf.clusterId, shardIds: conf.shards)
+ RelayShards(clusterId: conf.clusterId, shardIds: conf.subscribeShards)
).isOkOr:
raise newException(Defect, "Invalid record: " & $error)
diff --git a/tests/waku_discv5/test_waku_discv5.nim b/tests/waku_discv5/test_waku_discv5.nim
index c5dd1c55e..79913ce92 100644
--- a/tests/waku_discv5/test_waku_discv5.nim
+++ b/tests/waku_discv5/test_waku_discv5.nim
@@ -503,7 +503,7 @@ suite "Waku Discovery v5":
waku.dynamicBootstrapNodes,
waku.rng,
waku.conf.nodeKey,
- waku.conf.networkConf.p2pListenAddress,
+ waku.conf.endpointConf.p2pListenAddress,
waku.conf.portsShift,
)
@@ -534,7 +534,7 @@ suite "Waku Discovery v5":
waku.dynamicBootstrapNodes,
waku.rng,
waku.conf.nodeKey,
- waku.conf.networkConf.p2pListenAddress,
+ waku.conf.endpointConf.p2pListenAddress,
waku.conf.portsShift,
)
diff --git a/tests/waku_lightpush/lightpush_utils.nim b/tests/waku_lightpush/lightpush_utils.nim
index 9b867c707..7bd44a311 100644
--- a/tests/waku_lightpush/lightpush_utils.nim
+++ b/tests/waku_lightpush/lightpush_utils.nim
@@ -18,8 +18,10 @@ proc newTestWakuLightpushNode*(
): Future[WakuLightPush] {.async.} =
let
peerManager = PeerManager.new(switch)
- wakuSharding = Sharding(clusterId: 1, shardCountGenZero: 8)
- proto = WakuLightPush.new(peerManager, rng, handler, wakuSharding, rateLimitSetting)
+ wakuAutoSharding = Sharding(clusterId: 1, shardCountGenZero: 8)
+ proto = WakuLightPush.new(
+ peerManager, rng, handler, some(wakuAutoSharding), rateLimitSetting
+ )
await proto.start()
switch.mount(proto)
diff --git a/tests/waku_relay/test_wakunode_relay.nim b/tests/waku_relay/test_wakunode_relay.nim
index ad8d83361..2b4f32617 100644
--- a/tests/waku_relay/test_wakunode_relay.nim
+++ b/tests/waku_relay/test_wakunode_relay.nim
@@ -657,7 +657,7 @@ suite "WakuNode - Relay":
await node.start()
(await node.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
- require node.mountSharding(1, 1).isOk
+ require node.mountAutoSharding(1, 1).isOk
## Given
let
@@ -670,11 +670,14 @@ suite "WakuNode - Relay":
): Future[void] {.gcsafe, raises: [Defect].} =
discard pubsubTopic
discard message
- assert shard == node.wakuSharding.getShard(contentTopicA).expect("Valid Topic"),
+ assert shard ==
+ node.wakuAutoSharding.get().getShard(contentTopicA).expect("Valid Topic"),
"topic must use the same shard"
- assert shard == node.wakuSharding.getShard(contentTopicB).expect("Valid Topic"),
+ assert shard ==
+ node.wakuAutoSharding.get().getShard(contentTopicB).expect("Valid Topic"),
"topic must use the same shard"
- assert shard == node.wakuSharding.getShard(contentTopicC).expect("Valid Topic"),
+ assert shard ==
+ node.wakuAutoSharding.get().getShard(contentTopicC).expect("Valid Topic"),
"topic must use the same shard"
## When
diff --git a/tests/wakunode2/test_app.nim b/tests/wakunode2/test_app.nim
index 4f52732da..2d62d4956 100644
--- a/tests/wakunode2/test_app.nim
+++ b/tests/wakunode2/test_app.nim
@@ -65,7 +65,7 @@ suite "Wakunode2 - Waku initialization":
test "app properly handles dynamic port configuration":
## Given
var conf = defaultTestWakuConf()
- conf.networkConf.p2pTcpPort = Port(0)
+ conf.endpointConf.p2pTcpPort = Port(0)
## When
var waku = Waku.new(conf).valueOr:
diff --git a/tests/wakunode_rest/test_rest_admin.nim b/tests/wakunode_rest/test_rest_admin.nim
index 4e59b0725..c928140e1 100644
--- a/tests/wakunode_rest/test_rest_admin.nim
+++ b/tests/wakunode_rest/test_rest_admin.nim
@@ -1,7 +1,7 @@
{.used.}
import
- std/[sequtils, strformat, net],
+ std/[sequtils, net],
testutils/unittests,
presto,
presto/client as presto_client,
@@ -42,6 +42,14 @@ suite "Waku v2 Rest API - Admin":
node2 = newTestWakuNode(generateSecp256k1Key(), getPrimaryIPAddr(), Port(60602))
node3 = newTestWakuNode(generateSecp256k1Key(), getPrimaryIPAddr(), Port(60604))
+ let clusterId = 1.uint16
+ node1.mountMetadata(clusterId).isOkOr:
+ assert false, "Failed to mount metadata: " & $error
+ node2.mountMetadata(clusterId).isOkOr:
+ assert false, "Failed to mount metadata: " & $error
+ node3.mountMetadata(clusterId).isOkOr:
+ assert false, "Failed to mount metadata: " & $error
+
await allFutures(node1.start(), node2.start(), node3.start())
await allFutures(
node1.mountRelay(),
@@ -56,7 +64,7 @@ suite "Waku v2 Rest API - Admin":
): Future[void] {.async, gcsafe.} =
await sleepAsync(0.milliseconds)
- let shard = RelayShard(clusterId: 1, shardId: 0)
+ let shard = RelayShard(clusterId: clusterId, shardId: 0)
node1.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr:
assert false, "Failed to subscribe to topic: " & $error
node2.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr:
diff --git a/tests/wakunode_rest/test_rest_relay.nim b/tests/wakunode_rest/test_rest_relay.nim
index 8ea7f2abe..147f6e68f 100644
--- a/tests/wakunode_rest/test_rest_relay.nim
+++ b/tests/wakunode_rest/test_rest_relay.nim
@@ -296,7 +296,7 @@ suite "Waku v2 Rest API - Relay":
await node.start()
(await node.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
- require node.mountSharding(1, 8).isOk
+ require node.mountAutoSharding(1, 8).isOk
var restPort = Port(0)
let restAddress = parseIpAddress("0.0.0.0")
@@ -346,6 +346,7 @@ suite "Waku v2 Rest API - Relay":
await node.start()
(await node.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
+ require node.mountAutoSharding(1, 8).isOk
var restPort = Port(0)
let restAddress = parseIpAddress("0.0.0.0")
@@ -404,6 +405,7 @@ suite "Waku v2 Rest API - Relay":
await node.start()
(await node.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
+ require node.mountAutoSharding(1, 8).isOk
var restPort = Port(0)
let restAddress = parseIpAddress("0.0.0.0")
@@ -469,6 +471,8 @@ suite "Waku v2 Rest API - Relay":
await node.start()
(await node.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
+ require node.mountAutoSharding(1, 8).isOk
+
let wakuRlnConfig = WakuRlnConfig(
dynamic: false,
credIndex: some(1.uint),
@@ -528,6 +532,8 @@ suite "Waku v2 Rest API - Relay":
await node.start()
(await node.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
+ require node.mountAutoSharding(1, 8).isOk
+
let wakuRlnConfig = WakuRlnConfig(
dynamic: false,
credIndex: some(1.uint),
@@ -641,6 +647,8 @@ suite "Waku v2 Rest API - Relay":
await node.start()
(await node.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
+ require node.mountAutoSharding(1, 8).isOk
+
let wakuRlnConfig = WakuRlnConfig(
dynamic: false,
credIndex: some(1.uint),
diff --git a/waku/factory/conf_builder/waku_conf_builder.nim b/waku/factory/conf_builder/waku_conf_builder.nim
index 0b9ca0d88..ee7ca1b8c 100644
--- a/waku/factory/conf_builder/waku_conf_builder.nim
+++ b/waku/factory/conf_builder/waku_conf_builder.nim
@@ -59,8 +59,9 @@ type WakuConfBuilder* = object
nodeKey: Option[crypto.PrivateKey]
clusterId: Option[uint16]
- numShardsInNetwork: Option[uint32]
- shards: Option[seq[uint16]]
+ shardingConf: Option[ShardingConfKind]
+ numShardsInCluster: Option[uint16]
+ subscribeShards: Option[seq[uint16]]
protectedShards: Option[seq[ProtectedShard]]
contentTopics: Option[seq[string]]
@@ -83,7 +84,7 @@ type WakuConfBuilder* = object
# TODO: move within a relayConf
rendezvous: Option[bool]
- clusterConf: Option[ClusterConf]
+ networkConf: Option[NetworkConf]
staticNodes: seq[string]
@@ -135,8 +136,8 @@ proc init*(T: type WakuConfBuilder): WakuConfBuilder =
webSocketConf: WebSocketConfBuilder.init(),
)
-proc withClusterConf*(b: var WakuConfBuilder, clusterConf: ClusterConf) =
- b.clusterConf = some(clusterConf)
+proc withNetworkConf*(b: var WakuConfBuilder, networkConf: NetworkConf) =
+ b.networkConf = some(networkConf)
proc withNodeKey*(b: var WakuConfBuilder, nodeKey: crypto.PrivateKey) =
b.nodeKey = some(nodeKey)
@@ -144,11 +145,14 @@ proc withNodeKey*(b: var WakuConfBuilder, nodeKey: crypto.PrivateKey) =
proc withClusterId*(b: var WakuConfBuilder, clusterId: uint16) =
b.clusterId = some(clusterId)
-proc withNumShardsInNetwork*(b: var WakuConfBuilder, numShardsInNetwork: uint32) =
- b.numShardsInNetwork = some(numShardsInNetwork)
+proc withShardingConf*(b: var WakuConfBuilder, shardingConf: ShardingConfKind) =
+ b.shardingConf = some(shardingConf)
-proc withShards*(b: var WakuConfBuilder, shards: seq[uint16]) =
- b.shards = some(shards)
+proc withNumShardsInCluster*(b: var WakuConfBuilder, numShardsInCluster: uint16) =
+ b.numShardsInCluster = some(numShardsInCluster)
+
+proc withSubscribeShards*(b: var WakuConfBuilder, shards: seq[uint16]) =
+ b.subscribeShards = some(shards)
proc withProtectedShards*(
b: var WakuConfBuilder, protectedShards: seq[ProtectedShard]
@@ -269,6 +273,8 @@ proc withMaxMessageSize*(builder: var WakuConfBuilder, maxMessageSize: string) =
proc withStaticNodes*(builder: var WakuConfBuilder, staticNodes: seq[string]) =
builder.staticNodes = concat(builder.staticNodes, staticNodes)
+## Building
+
proc nodeKey(
builder: WakuConfBuilder, rng: ref HmacDrbgContext
): Result[crypto.PrivateKey, string] =
@@ -281,77 +287,105 @@ proc nodeKey(
return err("Failed to generate key: " & $error)
return ok(nodeKey)
-proc applyClusterConf(builder: var WakuConfBuilder) =
- # Apply cluster conf, overrides most values passed individually
- # If you want to tweak values, don't use clusterConf
- if builder.clusterConf.isNone():
+proc buildShardingConf(
+ bShardingConfKind: Option[ShardingConfKind],
+ bNumShardsInCluster: Option[uint16],
+ bSubscribeShards: Option[seq[uint16]],
+): (ShardingConf, seq[uint16]) =
+ echo "bSubscribeShards: ", bSubscribeShards
+ case bShardingConfKind.get(AutoSharding)
+ of StaticSharding:
+ (ShardingConf(kind: StaticSharding), bSubscribeShards.get(@[]))
+ of AutoSharding:
+ let numShardsInCluster = bNumShardsInCluster.get(1)
+ let shardingConf =
+ ShardingConf(kind: AutoSharding, numShardsInCluster: numShardsInCluster)
+ let upperShard = uint16(numShardsInCluster - 1)
+ (shardingConf, bSubscribeShards.get(toSeq(0.uint16 .. upperShard)))
+
+proc applyNetworkConf(builder: var WakuConfBuilder) =
+ # Apply network conf, overrides most values passed individually
+ # If you want to tweak values, don't use networkConf
+ # TODO: networkconf should be one field of the conf builder so that this function becomes unnecessary
+ if builder.networkConf.isNone():
return
- let clusterConf = builder.clusterConf.get()
+ let networkConf = builder.networkConf.get()
if builder.clusterId.isSome():
- warn "Cluster id was provided alongside a cluster conf",
- used = clusterConf.clusterId, discarded = builder.clusterId.get()
- builder.clusterId = some(clusterConf.clusterId)
+ warn "Cluster id was provided alongside a network conf",
+ used = networkConf.clusterId, discarded = builder.clusterId.get()
+ builder.clusterId = some(networkConf.clusterId)
# Apply relay parameters
- if builder.relay.get(false) and clusterConf.rlnRelay:
+ if builder.relay.get(false) and networkConf.rlnRelay:
if builder.rlnRelayConf.enabled.isSome():
- warn "RLN Relay was provided alongside a cluster conf",
- used = clusterConf.rlnRelay, discarded = builder.rlnRelayConf.enabled
+ warn "RLN Relay was provided alongside a network conf",
+ used = networkConf.rlnRelay, discarded = builder.rlnRelayConf.enabled
builder.rlnRelayConf.withEnabled(true)
if builder.rlnRelayConf.ethContractAddress.get("") != "":
- warn "RLN Relay ETH Contract Address was provided alongside a cluster conf",
- used = clusterConf.rlnRelayEthContractAddress.string,
+ warn "RLN Relay ETH Contract Address was provided alongside a network conf",
+ used = networkConf.rlnRelayEthContractAddress.string,
discarded = builder.rlnRelayConf.ethContractAddress.get().string
- builder.rlnRelayConf.withEthContractAddress(clusterConf.rlnRelayEthContractAddress)
+ builder.rlnRelayConf.withEthContractAddress(networkConf.rlnRelayEthContractAddress)
if builder.rlnRelayConf.chainId.isSome():
- warn "RLN Relay Chain Id was provided alongside a cluster conf",
- used = clusterConf.rlnRelayChainId, discarded = builder.rlnRelayConf.chainId
- builder.rlnRelayConf.withChainId(clusterConf.rlnRelayChainId)
+ warn "RLN Relay Chain Id was provided alongside a network conf",
+ used = networkConf.rlnRelayChainId, discarded = builder.rlnRelayConf.chainId
+ builder.rlnRelayConf.withChainId(networkConf.rlnRelayChainId)
if builder.rlnRelayConf.dynamic.isSome():
- warn "RLN Relay Dynamic was provided alongside a cluster conf",
- used = clusterConf.rlnRelayDynamic, discarded = builder.rlnRelayConf.dynamic
- builder.rlnRelayConf.withDynamic(clusterConf.rlnRelayDynamic)
+ warn "RLN Relay Dynamic was provided alongside a network conf",
+ used = networkConf.rlnRelayDynamic, discarded = builder.rlnRelayConf.dynamic
+ builder.rlnRelayConf.withDynamic(networkConf.rlnRelayDynamic)
if builder.rlnRelayConf.epochSizeSec.isSome():
- warn "RLN Epoch Size in Seconds was provided alongside a cluster conf",
- used = clusterConf.rlnEpochSizeSec,
+ warn "RLN Epoch Size in Seconds was provided alongside a network conf",
+ used = networkConf.rlnEpochSizeSec,
discarded = builder.rlnRelayConf.epochSizeSec
- builder.rlnRelayConf.withEpochSizeSec(clusterConf.rlnEpochSizeSec)
+ builder.rlnRelayConf.withEpochSizeSec(networkConf.rlnEpochSizeSec)
if builder.rlnRelayConf.userMessageLimit.isSome():
- warn "RLN Relay Dynamic was provided alongside a cluster conf",
- used = clusterConf.rlnRelayUserMessageLimit,
+ warn "RLN Relay Dynamic was provided alongside a network conf",
+ used = networkConf.rlnRelayUserMessageLimit,
discarded = builder.rlnRelayConf.userMessageLimit
- builder.rlnRelayConf.withUserMessageLimit(clusterConf.rlnRelayUserMessageLimit)
+ builder.rlnRelayConf.withUserMessageLimit(networkConf.rlnRelayUserMessageLimit)
# End Apply relay parameters
case builder.maxMessageSize.kind
of mmskNone:
discard
of mmskStr, mmskInt:
- warn "Max Message Size was provided alongside a cluster conf",
- used = clusterConf.maxMessageSize, discarded = $builder.maxMessageSize
- builder.withMaxMessageSize(parseCorrectMsgSize(clusterConf.maxMessageSize))
+ warn "Max Message Size was provided alongside a network conf",
+ used = networkConf.maxMessageSize, discarded = $builder.maxMessageSize
+ builder.withMaxMessageSize(parseCorrectMsgSize(networkConf.maxMessageSize))
- if builder.numShardsInNetwork.isSome():
- warn "Num Shards In Network was provided alongside a cluster conf",
- used = clusterConf.numShardsInNetwork, discarded = builder.numShardsInNetwork
- builder.numShardsInNetwork = some(clusterConf.numShardsInNetwork)
+ if builder.shardingConf.isSome():
+ warn "Sharding Conf was provided alongside a network conf",
+ used = networkConf.shardingConf.kind, discarded = builder.shardingConf
- if clusterConf.discv5Discovery:
+ if builder.numShardsInCluster.isSome():
+ warn "Num Shards In Cluster was provided alongside a network conf",
+ used = networkConf.shardingConf.numShardsInCluster,
+ discarded = builder.numShardsInCluster
+
+ case networkConf.shardingConf.kind
+ of StaticSharding:
+ builder.shardingConf = some(StaticSharding)
+ of AutoSharding:
+ builder.shardingConf = some(AutoSharding)
+ builder.numShardsInCluster = some(networkConf.shardingConf.numShardsInCluster)
+
+ if networkConf.discv5Discovery:
if builder.discv5Conf.enabled.isNone:
- builder.discv5Conf.withEnabled(clusterConf.discv5Discovery)
+ builder.discv5Conf.withEnabled(networkConf.discv5Discovery)
if builder.discv5Conf.bootstrapNodes.len == 0 and
- clusterConf.discv5BootstrapNodes.len > 0:
- warn "Discv5 Boostrap nodes were provided alongside a cluster conf",
- used = clusterConf.discv5BootstrapNodes,
+ networkConf.discv5BootstrapNodes.len > 0:
+ warn "Discv5 Bootstrap nodes were provided alongside a network conf",
+ used = networkConf.discv5BootstrapNodes,
discarded = builder.discv5Conf.bootstrapNodes
- builder.discv5Conf.withBootstrapNodes(clusterConf.discv5BootstrapNodes)
+ builder.discv5Conf.withBootstrapNodes(networkConf.discv5BootstrapNodes)
proc build*(
builder: var WakuConfBuilder, rng: ref HmacDrbgContext = crypto.newRng()
@@ -361,7 +395,7 @@ proc build*(
## of libwaku. It aims to be agnostic so it does not apply a
## default when it is opinionated.
- applyClusterConf(builder)
+ applyNetworkConf(builder)
let relay =
if builder.relay.isSome():
@@ -411,24 +445,14 @@ proc build*(
else:
builder.clusterId.get().uint16
- let numShardsInNetwork =
- if builder.numShardsInNetwork.isSome():
- builder.numShardsInNetwork.get()
- else:
- warn "Number of shards in network not specified, defaulting to zero (improve is wip)"
- 0
-
- let shards =
- if builder.shards.isSome():
- builder.shards.get()
- else:
- warn "shards not specified, defaulting to all shards in network"
- # TODO: conversion should not be needed
- let upperShard: uint16 = uint16(numShardsInNetwork - 1)
- toSeq(0.uint16 .. upperShard)
-
+ let (shardingConf, subscribeShards) = buildShardingConf(
+ builder.shardingConf, builder.numShardsInCluster, builder.subscribeShards
+ )
let protectedShards = builder.protectedShards.get(@[])
+ info "Sharding configuration: ",
+ shardingConf = $shardingConf, subscribeShards = $subscribeShards
+
let maxMessageSizeBytes =
case builder.maxMessageSize.kind
of mmskInt:
@@ -584,9 +608,9 @@ proc build*(
# end confs
nodeKey: nodeKey,
clusterId: clusterId,
- numShardsInNetwork: numShardsInNetwork,
+ shardingConf: shardingConf,
contentTopics: contentTopics,
- shards: shards,
+ subscribeShards: subscribeShards,
protectedShards: protectedShards,
relay: relay,
lightPush: lightPush,
@@ -601,7 +625,7 @@ proc build*(
logLevel: logLevel,
logFormat: logFormat,
# TODO: Separate builders
- networkConf: NetworkConfig(
+ endpointConf: EndpointConf(
natStrategy: natStrategy,
p2pTcpPort: p2pTcpPort,
dns4DomainName: dns4DomainName,
diff --git a/waku/factory/conf_builder/web_socket_conf_builder.nim b/waku/factory/conf_builder/web_socket_conf_builder.nim
index 5ed3d230a..88edc0941 100644
--- a/waku/factory/conf_builder/web_socket_conf_builder.nim
+++ b/waku/factory/conf_builder/web_socket_conf_builder.nim
@@ -1,5 +1,5 @@
import chronicles, std/[net, options], results
-import ../networks_config
+import waku/factory/waku_conf
logScope:
topics = "waku conf builder websocket"
diff --git a/waku/factory/external_config.nim b/waku/factory/external_config.nim
index 704c6d4e5..2d7205e87 100644
--- a/waku/factory/external_config.nim
+++ b/waku/factory/external_config.nim
@@ -314,28 +314,16 @@ hence would have reachability issues.""",
name: "staticnode"
.}: seq[string]
- # TODO: This is trying to do too much, this should only be used for autosharding, which itself should be configurable
- # If numShardsInNetwork is not set, we use the number of shards configured as numShardsInNetwork
numShardsInNetwork* {.
- desc: "Number of shards in the network",
- defaultValue: 0,
+ desc:
+ "Enables autosharding and set number of shards in the cluster, set to `0` to use static sharding",
+ defaultValue: 1,
name: "num-shards-in-network"
- .}: uint32
+ .}: uint16
shards* {.
desc:
- "Shards index to subscribe to [0..NUM_SHARDS_IN_NETWORK-1]. Argument may be repeated.",
- defaultValue:
- @[
- uint16(0),
- uint16(1),
- uint16(2),
- uint16(3),
- uint16(4),
- uint16(5),
- uint16(6),
- uint16(7),
- ],
+ "Shards index to subscribe to [0..NUM_SHARDS_IN_NETWORK-1]. Argument may be repeated. Subscribes to all shards by default in auto-sharding, no shard for static sharding",
name: "shard"
.}: seq[uint16]
@@ -858,9 +846,9 @@ proc toKeystoreGeneratorConf*(n: WakuNodeConf): RlnKeystoreGeneratorConf =
proc toInspectRlnDbConf*(n: WakuNodeConf): InspectRlnDbConf =
return InspectRlnDbConf(treePath: n.treePath)
-proc toClusterConf(
+proc toNetworkConf(
preset: string, clusterId: Option[uint16]
-): ConfResult[Option[ClusterConf]] =
+): ConfResult[Option[NetworkConf]] =
var lcPreset = toLowerAscii(preset)
if clusterId.isSome() and clusterId.get() == 1:
warn(
@@ -870,9 +858,9 @@ proc toClusterConf(
case lcPreset
of "":
- ok(none(ClusterConf))
+ ok(none(NetworkConf))
of "twn":
- ok(some(ClusterConf.TheWakuNetworkConf()))
+ ok(some(NetworkConf.TheWakuNetworkConf()))
else:
err("Invalid --preset value passed: " & lcPreset)
@@ -909,11 +897,11 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] =
b.withProtectedShards(n.protectedShards)
b.withClusterId(n.clusterId)
- let clusterConf = toClusterConf(n.preset, some(n.clusterId)).valueOr:
+ let networkConf = toNetworkConf(n.preset, some(n.clusterId)).valueOr:
return err("Error determining cluster from preset: " & $error)
- if clusterConf.isSome():
- b.withClusterConf(clusterConf.get())
+ if networkConf.isSome():
+ b.withNetworkConf(networkConf.get())
b.withAgentString(n.agentString)
@@ -948,9 +936,16 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] =
b.withStaticNodes(n.staticNodes)
if n.numShardsInNetwork != 0:
- b.withNumShardsInNetwork(n.numShardsInNetwork)
+ b.withNumShardsInCluster(n.numShardsInNetwork)
+ b.withShardingConf(AutoSharding)
+ else:
+ b.withShardingConf(StaticSharding)
+
+ # It is not possible to pass an empty sequence on the CLI
+ # If this is empty, it means the user did not specify any shards
+ if n.shards.len != 0:
+ b.withSubscribeShards(n.shards)
- b.withShards(n.shards)
b.withContentTopics(n.contentTopics)
b.storeServiceConf.withEnabled(n.store)
diff --git a/waku/factory/internal_config.nim b/waku/factory/internal_config.nim
index 4f252fd00..9fc3602a0 100644
--- a/waku/factory/internal_config.nim
+++ b/waku/factory/internal_config.nim
@@ -6,13 +6,7 @@ import
libp2p/nameresolving/dnsresolver,
std/[options, sequtils, net],
results
-import
- ../common/utils/nat,
- ../node/net_config,
- ../waku_enr,
- ../waku_core,
- ./waku_conf,
- ./networks_config
+import ../common/utils/nat, ../node/net_config, ../waku_enr, ../waku_core, ./waku_conf
proc enrConfiguration*(
conf: WakuConf, netConfig: NetConfig
@@ -29,7 +23,7 @@ proc enrConfiguration*(
enrBuilder.withMultiaddrs(netConfig.enrMultiaddrs)
enrBuilder.withWakuRelaySharding(
- RelayShards(clusterId: conf.clusterId, shardIds: conf.shards)
+ RelayShards(clusterId: conf.clusterId, shardIds: conf.subscribeShards)
).isOkOr:
return err("could not initialize ENR with shards")
@@ -64,7 +58,7 @@ proc dnsResolve*(
# TODO: Reduce number of parameters, can be done once the same is done on Netconfig.init
proc networkConfiguration*(
clusterId: uint16,
- conf: NetworkConfig,
+ conf: EndpointConf,
discv5Conf: Option[Discv5Conf],
webSocketConf: Option[WebSocketConf],
wakuFlags: CapabilitiesBitfield,
@@ -143,11 +137,3 @@ proc networkConfiguration*(
)
return netConfigRes
-
-# TODO: numShardsInNetwork should be mandatory with autosharding, and unneeded otherwise
-proc getNumShardsInNetwork*(conf: WakuConf): uint32 =
- if conf.numShardsInNetwork != 0:
- return conf.numShardsInNetwork
- # If conf.numShardsInNetwork is not set, use 1024 - the maximum possible as per the static sharding spec
- # https://github.com/waku-org/specs/blob/master/standards/core/relay-sharding.md#static-sharding
- return uint32(MaxShardIndex + 1)
diff --git a/waku/factory/networks_config.nim b/waku/factory/networks_config.nim
index 9d1da0ace..c7193aa9c 100644
--- a/waku/factory/networks_config.nim
+++ b/waku/factory/networks_config.nim
@@ -1,18 +1,23 @@
{.push raises: [].}
-import stint, std/[nativesockets, options]
+import chronicles, results, stint
-type WebSocketSecureConf* {.requiresInit.} = object
- keyPath*: string
- certPath*: string
+logScope:
+ topics = "waku networks conf"
-type WebSocketConf* = object
- port*: Port
- secureConf*: Option[WebSocketSecureConf]
+type
+ ShardingConfKind* = enum
+ AutoSharding
+ StaticSharding
-# TODO: Rename this type to match file name
+ ShardingConf* = object
+ case kind*: ShardingConfKind
+ of AutoSharding:
+ numShardsInCluster*: uint16
+ of StaticSharding:
+ discard
-type ClusterConf* = object
+type NetworkConf* = object
maxMessageSize*: string # TODO: static convert to a uint64
clusterId*: uint16
rlnRelay*: bool
@@ -21,17 +26,16 @@ type ClusterConf* = object
rlnRelayDynamic*: bool
rlnEpochSizeSec*: uint64
rlnRelayUserMessageLimit*: uint64
- # TODO: should be uint16 like the `shards` parameter
- numShardsInNetwork*: uint32
+ shardingConf*: ShardingConf
discv5Discovery*: bool
discv5BootstrapNodes*: seq[string]
# cluster-id=1 (aka The Waku Network)
# Cluster configuration corresponding to The Waku Network. Note that it
# overrides existing cli configuration
-proc TheWakuNetworkConf*(T: type ClusterConf): ClusterConf =
+proc TheWakuNetworkConf*(T: type NetworkConf): NetworkConf =
const RelayChainId = 59141'u256
- return ClusterConf(
+ return NetworkConf(
maxMessageSize: "150KiB",
clusterId: 1,
rlnRelay: true,
@@ -40,7 +44,7 @@ proc TheWakuNetworkConf*(T: type ClusterConf): ClusterConf =
rlnRelayChainId: RelayChainId,
rlnEpochSizeSec: 600,
rlnRelayUserMessageLimit: 100,
- numShardsInNetwork: 8,
+ shardingConf: ShardingConf(kind: AutoSharding, numShardsInCluster: 8),
discv5Discovery: true,
discv5BootstrapNodes:
@[
@@ -49,3 +53,21 @@ proc TheWakuNetworkConf*(T: type ClusterConf): ClusterConf =
"enr:-QEkuEBfEzJm_kigJ2HoSS_RBFJYhKHocGdkhhBr6jSUAWjLdFPp6Pj1l4yiTQp7TGHyu1kC6FyaU573VN8klLsEm-XuAYJpZIJ2NIJpcIQI2SVcim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOwsS69tgD7u1K50r5-qG5hweuTwa0W26aYPnvivpNlrYN0Y3CCdl-DdWRwgiMohXdha3UyDw",
],
)
+
+proc validateShards*(
+ shardingConf: ShardingConf, shards: seq[uint16]
+): Result[void, string] =
+ case shardingConf.kind
+ of StaticSharding:
+ return ok()
+ of AutoSharding:
+ let numShardsInCluster = shardingConf.numShardsInCluster
+ for shard in shards:
+ if shard >= numShardsInCluster:
+ let msg =
+ "validateShards invalid shard: " & $shard & " when numShardsInCluster: " &
+ $numShardsInCluster
+ error "validateShards failed", error = msg
+ return err(msg)
+
+ return ok()
diff --git a/waku/factory/node_factory.nim b/waku/factory/node_factory.nim
index 5298fa2b9..5e038ee0d 100644
--- a/waku/factory/node_factory.nim
+++ b/waku/factory/node_factory.nim
@@ -10,6 +10,7 @@ import
import
./internal_config,
+ ./networks_config,
./waku_conf,
./builder,
./validator_signed,
@@ -137,10 +138,12 @@ proc initNode(
proc getAutoshards*(
node: WakuNode, contentTopics: seq[string]
): Result[seq[RelayShard], string] =
+ if node.wakuAutoSharding.isNone():
+ return err("Static sharding used, cannot get shards from content topics")
var autoShards: seq[RelayShard]
for contentTopic in contentTopics:
- let shard = node.wakuSharding.getShard(contentTopic).valueOr:
- return err("Could not parse content topic: " & error)
+ let shard = node.wakuAutoSharding.get().getShard(contentTopic).valueOr:
+ return err("Could not parse content topic: " & error)
autoShards.add(shard)
return ok(autoshards)
@@ -258,16 +261,11 @@ proc setupProtocols(
if conf.storeServiceConf.isSome and conf.storeServiceConf.get().resume:
node.setupStoreResume()
- # If conf.numShardsInNetwork is not set, use the number of shards configured as numShardsInNetwork
- let numShardsInNetwork = getNumShardsInNetwork(conf)
-
- if conf.numShardsInNetwork == 0:
- warn "Number of shards in network not configured, setting it to",
- # TODO: If not configured, it mounts 1024 shards! Make it a mandatory configuration instead
- numShardsInNetwork = $numShardsInNetwork
-
- node.mountSharding(conf.clusterId, numShardsInNetwork).isOkOr:
- return err("failed to mount waku sharding: " & error)
+ if conf.shardingConf.kind == AutoSharding:
+ node.mountAutoSharding(conf.clusterId, conf.shardingConf.numShardsInCluster).isOkOr:
+ return err("failed to mount waku auto sharding: " & error)
+ else:
+ warn("Auto sharding is disabled")
# Mount relay on all nodes
var peerExchangeHandler = none(RoutingRecordsHandler)
@@ -290,14 +288,22 @@ proc setupProtocols(
peerExchangeHandler = some(handlePeerExchange)
- let autoShards = node.getAutoshards(conf.contentTopics).valueOr:
- return err("Could not get autoshards: " & error)
+ # TODO: when using autosharding, the user should not be expected to pass any shards, but only content topics
+ # Hence, this joint logic should be removed in favour of an either logic:
+ # use passed shards (static) or deduce shards from content topics (auto)
+ let autoShards =
+ if node.wakuAutoSharding.isSome():
+ node.getAutoshards(conf.contentTopics).valueOr:
+ return err("Could not get autoshards: " & error)
+ else:
+ @[]
debug "Shards created from content topics",
contentTopics = conf.contentTopics, shards = autoShards
- let confShards =
- conf.shards.mapIt(RelayShard(clusterId: conf.clusterId, shardId: uint16(it)))
+ let confShards = conf.subscribeShards.mapIt(
+ RelayShard(clusterId: conf.clusterId, shardId: uint16(it))
+ )
let shards = confShards & autoShards
if conf.relay:
@@ -313,7 +319,7 @@ proc setupProtocols(
# Add validation keys to protected topics
var subscribedProtectedShards: seq[ProtectedShard]
for shardKey in conf.protectedShards:
- if shardKey.shard notin conf.shards:
+ if shardKey.shard notin conf.subscribeShards:
warn "protected shard not in subscribed shards, skipping adding validator",
protectedShard = shardKey.shard, subscribedShards = shards
continue
@@ -472,7 +478,7 @@ proc setupNode*(
wakuConf: WakuConf, rng: ref HmacDrbgContext = crypto.newRng(), relay: Relay
): Result[WakuNode, string] =
let netConfig = networkConfiguration(
- wakuConf.clusterId, wakuConf.networkConf, wakuConf.discv5Conf,
+ wakuConf.clusterId, wakuConf.endpointConf, wakuConf.discv5Conf,
wakuConf.webSocketConf, wakuConf.wakuFlags, wakuConf.dnsAddrsNameServers,
wakuConf.portsShift, clientId,
).valueOr:
diff --git a/waku/factory/waku.nim b/waku/factory/waku.nim
index faca627a4..d733c6bf5 100644
--- a/waku/factory/waku.nim
+++ b/waku/factory/waku.nim
@@ -130,8 +130,9 @@ proc setupAppCallbacks(
let autoShards = node.getAutoshards(conf.contentTopics).valueOr:
return err("Could not get autoshards: " & error)
- let confShards =
- conf.shards.mapIt(RelayShard(clusterId: conf.clusterId, shardId: uint16(it)))
+ let confShards = conf.subscribeShards.mapIt(
+ RelayShard(clusterId: conf.clusterId, shardId: uint16(it))
+ )
let shards = confShards & autoShards
let uniqueShards = deduplicate(shards)
@@ -249,14 +250,14 @@ proc getRunningNetConfig(waku: ptr Waku): Result[NetConfig, string] =
return err("Could not retrieve ports: " & error)
if tcpPort.isSome():
- conf.networkConf.p2pTcpPort = tcpPort.get()
+ conf.endpointConf.p2pTcpPort = tcpPort.get()
if websocketPort.isSome() and conf.webSocketConf.isSome():
conf.webSocketConf.get().port = websocketPort.get()
# Rebuild NetConfig with bound port values
let netConf = networkConfiguration(
- conf.clusterId, conf.networkConf, conf.discv5Conf, conf.webSocketConf,
+ conf.clusterId, conf.endpointConf, conf.discv5Conf, conf.webSocketConf,
conf.wakuFlags, conf.dnsAddrsNameServers, conf.portsShift, clientId,
).valueOr:
return err("Could not update NetConfig: " & error)
@@ -306,7 +307,7 @@ proc updateAddressInENR(waku: ptr Waku): Result[void, string] =
proc updateWaku(waku: ptr Waku): Result[void, string] =
let conf = waku[].conf
- if conf.networkConf.p2pTcpPort == Port(0) or
+ if conf.endpointConf.p2pTcpPort == Port(0) or
(conf.websocketConf.isSome() and conf.websocketConf.get.port == Port(0)):
updateEnr(waku).isOkOr:
return err("error calling updateEnr: " & $error)
@@ -389,7 +390,7 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} =
waku.dynamicBootstrapNodes,
waku.rng,
conf.nodeKey,
- conf.networkConf.p2pListenAddress,
+ conf.endpointConf.p2pListenAddress,
conf.portsShift,
)
@@ -413,7 +414,7 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} =
conf.relay,
conf.lightPush,
conf.clusterId,
- conf.shards,
+ conf.subscribeShards,
conf.contentTopics,
).isOkOr:
return err ("Starting protocols support REST server failed: " & $error)
diff --git a/waku/factory/waku_conf.nim b/waku/factory/waku_conf.nim
index 584c60f33..6ffda1c14 100644
--- a/waku/factory/waku_conf.nim
+++ b/waku/factory/waku_conf.nim
@@ -20,6 +20,14 @@ export RlnRelayConf, RlnRelayCreds, RestServerConf, Discv5Conf, MetricsServerCon
logScope:
topics = "waku conf"
+type WebSocketSecureConf* {.requiresInit.} = object
+ keyPath*: string
+ certPath*: string
+
+type WebSocketConf* = object
+ port*: Port
+ secureConf*: Option[WebSocketSecureConf]
+
# TODO: should be defined in validator_signed.nim and imported here
type ProtectedShard* {.requiresInit.} = object
shard*: uint16
@@ -50,7 +58,7 @@ type FilterServiceConf* {.requiresInit.} = object
subscriptionTimeout*: uint16
maxCriteria*: uint32
-type NetworkConfig* = object # TODO: make enum
+type EndpointConf* = object # TODO: make enum
natStrategy*: string
p2pTcpPort*: Port
dns4DomainName*: Option[string]
@@ -68,11 +76,10 @@ type WakuConf* {.requiresInit.} = ref object
nodeKey*: crypto.PrivateKey
clusterId*: uint16
- shards*: seq[uint16]
+ subscribeShards*: seq[uint16]
protectedShards*: seq[ProtectedShard]
- # TODO: move to an autoShardingConf
- numShardsInNetwork*: uint32
+ shardingConf*: ShardingConf
contentTopics*: seq[string]
relay*: bool
@@ -95,7 +102,7 @@ type WakuConf* {.requiresInit.} = ref object
portsShift*: uint16
dnsAddrsNameServers*: seq[IpAddress]
- networkConf*: NetworkConfig
+ endpointConf*: EndpointConf
wakuFlags*: CapabilitiesBitfield
# TODO: could probably make it a `PeerRemoteInfo`
@@ -142,8 +149,8 @@ proc logConf*(conf: WakuConf) =
info "Configuration. Network", cluster = conf.clusterId
- for shard in conf.shards:
- info "Configuration. Shards", shard = shard
+ for shard in conf.subscribeShards:
+ info "Configuration. Active Relay Shards", shard = shard
if conf.discv5Conf.isSome():
for i in conf.discv5Conf.get().bootstrapNodes:
@@ -165,26 +172,9 @@ proc validateNodeKey(wakuConf: WakuConf): Result[void, string] =
return err("nodekey param is invalid")
return ok()
-proc validateShards(wakuConf: WakuConf): Result[void, string] =
- let numShardsInNetwork = wakuConf.numShardsInNetwork
-
- # TODO: fix up this behaviour
- if numShardsInNetwork == 0:
- return ok()
-
- for shard in wakuConf.shards:
- if shard >= numShardsInNetwork:
- let msg =
- "validateShards invalid shard: " & $shard & " when numShardsInNetwork: " &
- $numShardsInNetwork # fmt doesn't work
- error "validateShards failed", error = msg
- return err(msg)
-
- return ok()
-
proc validateNoEmptyStrings(wakuConf: WakuConf): Result[void, string] =
- if wakuConf.networkConf.dns4DomainName.isSome() and
- isEmptyOrWhiteSpace(wakuConf.networkConf.dns4DomainName.get().string):
+ if wakuConf.endpointConf.dns4DomainName.isSome() and
+ isEmptyOrWhiteSpace(wakuConf.endpointConf.dns4DomainName.get().string):
return err("dns4-domain-name is an empty string, set it to none(string) instead")
if isEmptyOrWhiteSpace(wakuConf.relayServiceRatio):
@@ -236,6 +226,6 @@ proc validateNoEmptyStrings(wakuConf: WakuConf): Result[void, string] =
proc validate*(wakuConf: WakuConf): Result[void, string] =
?wakuConf.validateNodeKey()
- ?wakuConf.validateShards()
+ ?wakuConf.shardingConf.validateShards(wakuConf.subscribeShards)
?wakuConf.validateNoEmptyStrings()
return ok()
diff --git a/waku/node/waku_node.nim b/waku/node/waku_node.nim
index 6a5c3fdb0..ccd62664f 100644
--- a/waku/node/waku_node.nim
+++ b/waku/node/waku_node.nim
@@ -112,7 +112,7 @@ type
wakuLightpushClient*: WakuLightPushClient
wakuPeerExchange*: WakuPeerExchange
wakuMetadata*: WakuMetadata
- wakuSharding*: Sharding
+ wakuAutoSharding*: Option[Sharding]
enr*: enr.Record
libp2pPing*: Ping
rng*: ref rand.HmacDrbgContext
@@ -198,12 +198,13 @@ proc mountMetadata*(node: WakuNode, clusterId: uint32): Result[void, string] =
return ok()
-## Waku Sharding
-proc mountSharding*(
+## Waku AutoSharding
+proc mountAutoSharding*(
node: WakuNode, clusterId: uint16, shardCount: uint32
): Result[void, string] =
- info "mounting sharding", clusterId = clusterId, shardCount = shardCount
- node.wakuSharding = Sharding(clusterId: clusterId, shardCountGenZero: shardCount)
+ info "mounting auto sharding", clusterId = clusterId, shardCount = shardCount
+ node.wakuAutoSharding =
+ some(Sharding(clusterId: clusterId, shardCountGenZero: shardCount))
return ok()
## Waku Sync
@@ -322,11 +323,15 @@ proc subscribe*(
let (pubsubTopic, contentTopicOp) =
case subscription.kind
of ContentSub:
- let shard = node.wakuSharding.getShard((subscription.topic)).valueOr:
- error "Autosharding error", error = error
- return err("Autosharding error: " & error)
-
- ($shard, some(subscription.topic))
+ if node.wakuAutoSharding.isSome():
+ let shard = node.wakuAutoSharding.get().getShard((subscription.topic)).valueOr:
+ error "Autosharding error", error = error
+ return err("Autosharding error: " & error)
+ ($shard, some(subscription.topic))
+ else:
+ return err(
+ "Static sharding is used, relay subscriptions must specify a pubsub topic"
+ )
of PubsubSub:
(subscription.topic, none(ContentTopic))
else:
@@ -353,11 +358,15 @@ proc unsubscribe*(
let (pubsubTopic, contentTopicOp) =
case subscription.kind
of ContentUnsub:
- let shard = node.wakuSharding.getShard((subscription.topic)).valueOr:
- error "Autosharding error", error = error
- return err("Autosharding error: " & error)
-
- ($shard, some(subscription.topic))
+ if node.wakuAutoSharding.isSome():
+ let shard = node.wakuAutoSharding.get().getShard((subscription.topic)).valueOr:
+ error "Autosharding error", error = error
+ return err("Autosharding error: " & error)
+ ($shard, some(subscription.topic))
+ else:
+ return err(
+ "Static sharding is used, relay subscriptions must specify a pubsub topic"
+ )
of PubsubUnsub:
(subscription.topic, none(ContentTopic))
else:
@@ -388,9 +397,10 @@ proc publish*(
return err(msg)
let pubsubTopic = pubsubTopicOp.valueOr:
- node.wakuSharding.getShard(message.contentTopic).valueOr:
+ if node.wakuAutoSharding.isNone():
+ return err("Pubsub topic must be specified when static sharding is enabled.")
+ node.wakuAutoSharding.get().getShard(message.contentTopic).valueOr:
let msg = "Autosharding error: " & error
- error "publish error", err = msg
return err(msg)
#TODO instead of discard return error when 0 peers received the message
@@ -564,8 +574,14 @@ proc filterSubscribe*(
waku_node_errors.inc(labelValues = ["subscribe_filter_failure"])
return subRes
+ elif node.wakuAutoSharding.isNone():
+ error "Failed filter subscription, pubsub topic must be specified with static sharding"
+ waku_node_errors.inc(labelValues = ["subscribe_filter_failure"])
else:
- let topicMapRes = node.wakuSharding.parseSharding(pubsubTopic, contentTopics)
+ # No pubsub topic, autosharding is used to deduce it
+ # but content topics must be well-formed for this
+ let topicMapRes =
+ node.wakuAutoSharding.get().getShardsFromContentTopics(contentTopics)
let topicMap =
if topicMapRes.isErr():
@@ -575,11 +591,11 @@ proc filterSubscribe*(
topicMapRes.get()
var futures = collect(newSeq):
- for pubsub, topics in topicMap.pairs:
+ for shard, topics in topicMap.pairs:
info "registering filter subscription to content",
- pubsubTopic = pubsub, contentTopics = topics, peer = remotePeer.peerId
+ shard = shard, contentTopics = topics, peer = remotePeer.peerId
let content = topics.mapIt($it)
- node.wakuFilterClient.subscribe(remotePeer, $pubsub, content)
+ node.wakuFilterClient.subscribe(remotePeer, $shard, content)
var subRes: FilterSubscribeResult = FilterSubscribeResult.ok()
try:
@@ -643,8 +659,12 @@ proc filterUnsubscribe*(
waku_node_errors.inc(labelValues = ["unsubscribe_filter_failure"])
return unsubRes
+ elif node.wakuAutoSharding.isNone():
+ error "Failed filter un-subscription, pubsub topic must be specified with static sharding"
+ waku_node_errors.inc(labelValues = ["unsubscribe_filter_failure"])
else: # pubsubTopic.isNone
- let topicMapRes = node.wakuSharding.parseSharding(pubsubTopic, contentTopics)
+ let topicMapRes =
+ node.wakuAutoSharding.get().getShardsFromContentTopics(contentTopics)
let topicMap =
if topicMapRes.isErr():
@@ -654,11 +674,11 @@ proc filterUnsubscribe*(
topicMapRes.get()
var futures = collect(newSeq):
- for pubsub, topics in topicMap.pairs:
+ for shard, topics in topicMap.pairs:
info "deregistering filter subscription to content",
- pubsubTopic = pubsub, contentTopics = topics, peer = remotePeer.peerId
+ shard = shard, contentTopics = topics, peer = remotePeer.peerId
let content = topics.mapIt($it)
- node.wakuFilterClient.unsubscribe(remotePeer, $pubsub, content)
+ node.wakuFilterClient.unsubscribe(remotePeer, $shard, content)
var unsubRes: FilterSubscribeResult = FilterSubscribeResult.ok()
try:
@@ -1064,7 +1084,10 @@ proc legacyLightpushPublish*(
if pubsubTopic.isSome():
return await internalPublish(node, pubsubTopic.get(), message, peer)
- let topicMapRes = node.wakuSharding.parseSharding(pubsubTopic, message.contentTopic)
+ if node.wakuAutoSharding.isNone():
+ return err("Pubsub topic must be specified when static sharding is enabled")
+ let topicMapRes =
+ node.wakuAutoSharding.get().getShardsFromContentTopics(message.contentTopic)
let topicMap =
if topicMapRes.isErr():
@@ -1120,7 +1143,7 @@ proc mountLightPush*(
lightpush_protocol.getRelayPushHandler(node.wakuRelay, rlnPeer)
node.wakuLightPush = WakuLightPush.new(
- node.peerManager, node.rng, pushHandler, node.wakuSharding, some(rateLimit)
+ node.peerManager, node.rng, pushHandler, node.wakuAutoSharding, some(rateLimit)
)
if node.started:
@@ -1181,12 +1204,17 @@ proc lightpushPublish*(
return lighpushErrorResult(NO_PEERS_TO_RELAY, "no suitable remote peers")
let pubsubForPublish = pubSubTopic.valueOr:
+ if node.wakuAutoSharding.isNone():
+ let msg = "Pubsub topic must be specified when static sharding is enabled"
+ error "lightpush publish error", error = msg
+ return lighpushErrorResult(INVALID_MESSAGE_ERROR, msg)
+
let parsedTopic = NsContentTopic.parse(message.contentTopic).valueOr:
let msg = "Invalid content-topic:" & $error
error "lightpush request handling error", error = msg
return lighpushErrorResult(INVALID_MESSAGE_ERROR, msg)
- node.wakuSharding.getShard(parsedTopic).valueOr:
+ node.wakuAutoSharding.get().getShard(parsedTopic).valueOr:
let msg = "Autosharding error: " & error
error "lightpush publish error", error = msg
return lighpushErrorResult(INTERNAL_SERVER_ERROR, msg)
diff --git a/waku/waku_api/rest/admin/handlers.nim b/waku/waku_api/rest/admin/handlers.nim
index 6bf44e8a2..04cc31010 100644
--- a/waku/waku_api/rest/admin/handlers.nim
+++ b/waku/waku_api/rest/admin/handlers.nim
@@ -241,13 +241,20 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) =
let shard = shardId.valueOr:
return RestApiResponse.badRequest(fmt("Invalid shardId: {error}"))
+ if node.wakuMetadata.isNil():
+ return RestApiResponse.serviceUnavailable(
+ "Error: Metadata Protocol is not mounted to the node"
+ )
+
if node.wakuRelay.isNil():
return RestApiResponse.serviceUnavailable(
"Error: Relay Protocol is not mounted to the node"
)
- let topic =
- toPubsubTopic(RelayShard(clusterId: node.wakuSharding.clusterId, shardId: shard))
+ # TODO: clusterId and shards should be uint16 across all codebase and probably be defined as a type
+ let topic = toPubsubTopic(
+ RelayShard(clusterId: node.wakuMetadata.clusterId.uint16, shardId: shard)
+ )
let pubsubPeers =
node.wakuRelay.getConnectedPubSubPeers(topic).get(initHashSet[PubSubPeer](0))
let relayPeer = PeersOfShard(
@@ -284,13 +291,19 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) =
let shard = shardId.valueOr:
return RestApiResponse.badRequest(fmt("Invalid shardId: {error}"))
+ if node.wakuMetadata.isNil():
+ return RestApiResponse.serviceUnavailable(
+ "Error: Metadata Protocol is not mounted to the node"
+ )
+
if node.wakuRelay.isNil():
return RestApiResponse.serviceUnavailable(
"Error: Relay Protocol is not mounted to the node"
)
- let topic =
- toPubsubTopic(RelayShard(clusterId: node.wakuSharding.clusterId, shardId: shard))
+ let topic = toPubsubTopic(
+ RelayShard(clusterId: node.wakuMetadata.clusterId.uint16, shardId: shard)
+ )
let peers =
node.wakuRelay.getPubSubPeersInMesh(topic).get(initHashSet[PubSubPeer](0))
let relayPeer = PeersOfShard(
diff --git a/waku/waku_api/rest/builder.nim b/waku/waku_api/rest/builder.nim
index 6725aaeec..eb514439f 100644
--- a/waku/waku_api/rest/builder.nim
+++ b/waku/waku_api/rest/builder.nim
@@ -151,17 +151,19 @@ proc startRestServerProtocolSupport*(
error "Could not subscribe", pubsubTopic, error
continue
- for contentTopic in contentTopics:
- cache.contentSubscribe(contentTopic)
+ if node.wakuAutoSharding.isSome():
+ # Only deduce pubsub topics to subscribe to from content topics if autosharding is enabled
+ for contentTopic in contentTopics:
+ cache.contentSubscribe(contentTopic)
- let shard = node.wakuSharding.getShard(contentTopic).valueOr:
- error "Autosharding error in REST", error = error
- continue
- let pubsubTopic = $shard
+ let shard = node.wakuAutoSharding.get().getShard(contentTopic).valueOr:
+ error "Autosharding error in REST", error = error
+ continue
+ let pubsubTopic = $shard
- node.subscribe((kind: PubsubSub, topic: pubsubTopic), handler).isOkOr:
- error "Could not subscribe", pubsubTopic, error
- continue
+ node.subscribe((kind: PubsubSub, topic: pubsubTopic), handler).isOkOr:
+ error "Could not subscribe", pubsubTopic, error
+ continue
installRelayApiHandlers(router, node, cache)
else:
diff --git a/waku/waku_api/rest/relay/handlers.nim b/waku/waku_api/rest/relay/handlers.nim
index 06bbc0c06..c268870d7 100644
--- a/waku/waku_api/rest/relay/handlers.nim
+++ b/waku/waku_api/rest/relay/handlers.nim
@@ -272,11 +272,16 @@ proc installRelayApiHandlers*(
var message: WakuMessage = req.toWakuMessage(version = 0).valueOr:
return RestApiResponse.badRequest()
- let pubsubTopic = node.wakuSharding.getShard(message.contentTopic).valueOr:
- let msg = "Autosharding error: " & error
+ if node.wakuAutoSharding.isNone():
+ let msg = "Autosharding is disabled"
error "publish error", err = msg
return RestApiResponse.badRequest("Failed to publish. " & msg)
+ let pubsubTopic = node.wakuAutoSharding.get().getShard(message.contentTopic).valueOr:
+ let msg = "Autosharding error: " & error
+ error "publish error", err = msg
+ return RestApiResponse.badRequest("Failed to publish. " & msg)
+
# if RLN is mounted, append the proof to the message
if not node.wakuRlnRelay.isNil():
node.wakuRlnRelay.appendRLNProof(message, float64(getTime().toUnix())).isOkOr:
diff --git a/waku/waku_core/topics/content_topic.nim b/waku/waku_core/topics/content_topic.nim
index b897c4c44..5984a760b 100644
--- a/waku/waku_core/topics/content_topic.nim
+++ b/waku/waku_core/topics/content_topic.nim
@@ -122,6 +122,18 @@ proc parse*(
"Invalid content topic structure. Expected either //// or /////"
return err(ParsingError.invalidFormat(errMsg))
+proc parse*(
+ T: type NsContentTopic, topics: seq[ContentTopic]
+): ParsingResult[seq[NsContentTopic]] =
+ var res: seq[NsContentTopic] = @[]
+ for contentTopic in topics:
+ let parseRes = NsContentTopic.parse(contentTopic)
+ if parseRes.isErr():
+ let error: ParsingError = parseRes.error
+ return ParsingResult[seq[NsContentTopic]].err(error)
+ res.add(parseRes.value)
+ return ParsingResult[seq[NsContentTopic]].ok(res)
+
# Content topic compatibility
converter toContentTopic*(topic: NsContentTopic): ContentTopic =
diff --git a/waku/waku_core/topics/sharding.nim b/waku/waku_core/topics/sharding.nim
index 4a4af4cb5..d2f652161 100644
--- a/waku/waku_core/topics/sharding.nim
+++ b/waku/waku_core/topics/sharding.nim
@@ -8,6 +8,7 @@ import nimcrypto, std/options, std/tables, stew/endians2, results, stew/byteutil
import ./content_topic, ./pubsub_topic
+# TODO: this is autosharding, not just "sharding"
type Sharding* = object
clusterId*: uint16
# TODO: generations could be stored in a table here
@@ -50,48 +51,32 @@ proc getShard*(s: Sharding, topic: ContentTopic): Result[RelayShard, string] =
ok(shard)
-proc parseSharding*(
- s: Sharding,
- pubsubTopic: Option[PubsubTopic],
- contentTopics: ContentTopic | seq[ContentTopic],
+proc getShardsFromContentTopics*(
+ s: Sharding, contentTopics: ContentTopic | seq[ContentTopic]
): Result[Table[RelayShard, seq[NsContentTopic]], string] =
- var topics: seq[ContentTopic]
- when contentTopics is seq[ContentTopic]:
- topics = contentTopics
- else:
- topics = @[contentTopics]
+ let topics =
+ when contentTopics is seq[ContentTopic]:
+ contentTopics
+ else:
+ @[contentTopics]
+
+ let parseRes = NsContentTopic.parse(topics)
+ let nsContentTopics =
+ if parseRes.isErr():
+ return err("Cannot parse content topic: " & $parseRes.error)
+ else:
+ parseRes.get()
var topicMap = initTable[RelayShard, seq[NsContentTopic]]()
- for contentTopic in topics:
- let parseRes = NsContentTopic.parse(contentTopic)
+ for content in nsContentTopics:
+ let shard = s.getShard(content).valueOr:
+ return err("Cannot deduce shard from content topic: " & $error)
- let content =
- if parseRes.isErr():
- return err("Cannot parse content topic: " & $parseRes.error)
- else:
- parseRes.get()
-
- let pubsub =
- if pubsubTopic.isSome():
- let parseRes = RelayShard.parse(pubsubTopic.get())
-
- if parseRes.isErr():
- return err("Cannot parse pubsub topic: " & $parseRes.error)
- else:
- parseRes.get()
- else:
- let shardsRes = s.getShard(content)
-
- if shardsRes.isErr():
- return err("Cannot autoshard content topic: " & $shardsRes.error)
- else:
- shardsRes.get()
-
- if not topicMap.hasKey(pubsub):
- topicMap[pubsub] = @[]
+ if not topicMap.hasKey(shard):
+ topicMap[shard] = @[]
try:
- topicMap[pubsub].add(content)
+ topicMap[shard].add(content)
except CatchableError:
return err(getCurrentExceptionMsg())
diff --git a/waku/waku_lightpush/protocol.nim b/waku/waku_lightpush/protocol.nim
index 7831fb20f..45dc7c3c1 100644
--- a/waku/waku_lightpush/protocol.nim
+++ b/waku/waku_lightpush/protocol.nim
@@ -26,12 +26,19 @@ type WakuLightPush* = ref object of LPProtocol
peerManager*: PeerManager
pushHandler*: PushMessageHandler
requestRateLimiter*: RequestRateLimiter
- sharding: Sharding
+ autoSharding: Option[Sharding]
proc handleRequest(
- wl: WakuLightPush, peerId: PeerId, pushRequest: LightPushRequest
+ wl: WakuLightPush, peerId: PeerId, pushRequest: LightpushRequest
): Future[WakuLightPushResult] {.async.} =
let pubsubTopic = pushRequest.pubSubTopic.valueOr:
+ if wl.autoSharding.isNone():
+ let msg = "Pubsub topic must be specified when static sharding is enabled"
+ error "lightpush request handling error", error = msg
+ return WakuLightPushResult.err(
+ (code: LightpushStatusCode.INVALID_MESSAGE_ERROR, desc: some(msg))
+ )
+
let parsedTopic = NsContentTopic.parse(pushRequest.message.contentTopic).valueOr:
let msg = "Invalid content-topic:" & $error
error "lightpush request handling error", error = msg
@@ -39,8 +46,8 @@ proc handleRequest(
(code: LightPushStatusCode.INVALID_MESSAGE_ERROR, desc: some(msg))
)
- wl.sharding.getShard(parsedTopic).valueOr:
- let msg = "Sharding error: " & error
+ wl.autoSharding.get().getShard(parsedTopic).valueOr:
+ let msg = "Auto-sharding error: " & error
error "lightpush request handling error", error = msg
return WakuLightPushResult.err(
(code: LightPushStatusCode.INTERNAL_SERVER_ERROR, desc: some(msg))
@@ -149,7 +156,7 @@ proc new*(
peerManager: PeerManager,
rng: ref rand.HmacDrbgContext,
pushHandler: PushMessageHandler,
- sharding: Sharding,
+ autoSharding: Option[Sharding],
rateLimitSetting: Option[RateLimitSetting] = none[RateLimitSetting](),
): T =
let wl = WakuLightPush(
@@ -157,7 +164,7 @@ proc new*(
peerManager: peerManager,
pushHandler: pushHandler,
requestRateLimiter: newRequestRateLimiter(rateLimitSetting),
- sharding: sharding,
+ autoSharding: autoSharding,
)
wl.initProtocolHandler()
setServiceLimitMetric(WakuLightpushCodec, rateLimitSetting)
diff --git a/waku/waku_metadata/protocol.nim b/waku/waku_metadata/protocol.nim
index 13a2916b3..75f021dbe 100644
--- a/waku/waku_metadata/protocol.nim
+++ b/waku/waku_metadata/protocol.nim
@@ -29,7 +29,7 @@ proc respond(
m: WakuMetadata, conn: Connection
): Future[Result[void, string]] {.async, gcsafe.} =
let response =
- WakuMetadataResponse(clusterId: some(m.clusterId), shards: toSeq(m.shards))
+ WakuMetadataResponse(clusterId: some(m.clusterId.uint32), shards: toSeq(m.shards))
let res = catch:
await conn.writeLP(response.encode().buffer)
From dde023eacf11dae7311275103615e115fe424c7e Mon Sep 17 00:00:00 2001
From: gabrielmer <101006718+gabrielmer@users.noreply.github.com>
Date: Fri, 4 Jul 2025 15:08:15 +0200
Subject: [PATCH 44/47] fix: static sharding setup (#3494)
---
waku/factory/waku.nim | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/waku/factory/waku.nim b/waku/factory/waku.nim
index d733c6bf5..137e3531c 100644
--- a/waku/factory/waku.nim
+++ b/waku/factory/waku.nim
@@ -127,8 +127,12 @@ proc setupAppCallbacks(
if node.wakuRelay.isNil():
return err("Cannot configure relayHandler callback without Relay mounted")
- let autoShards = node.getAutoshards(conf.contentTopics).valueOr:
- return err("Could not get autoshards: " & error)
+ let autoShards =
+ if node.wakuAutoSharding.isSome():
+ node.getAutoshards(conf.contentTopics).valueOr:
+ return err("Could not get autoshards: " & error)
+ else:
+ @[]
let confShards = conf.subscribeShards.mapIt(
RelayShard(clusterId: conf.clusterId, shardId: uint16(it))
From b713b6e5f454847fe4f24c55497644ec02a784ed Mon Sep 17 00:00:00 2001
From: Darshan K <35736874+darshankabariya@users.noreply.github.com>
Date: Tue, 8 Jul 2025 18:25:36 +0530
Subject: [PATCH 45/47] fix: make test configuration (#3480)
---
.github/workflows/ci.yml | 5 +++--
Makefile | 21 +++++++++++++++++----
tests/all_tests_waku.nim | 6 ++----
waku.nimble | 9 ++++++---
4 files changed, 28 insertions(+), 13 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 41f5500a0..30708ead9 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -119,12 +119,13 @@ jobs:
sudo docker run --rm -d -e POSTGRES_PASSWORD=test123 -p 5432:5432 postgres:15.4-alpine3.18
postgres_enabled=1
fi
-
+
export MAKEFLAGS="-j1"
export NIMFLAGS="--colors:off -d:chronicles_colors:none"
export USE_LIBBACKTRACE=0
- make V=1 LOG_LEVEL=DEBUG QUICK_AND_DIRTY_COMPILER=1 POSTGRES=$postgres_enabled test testwakunode2
+ make V=1 LOG_LEVEL=DEBUG QUICK_AND_DIRTY_COMPILER=1 POSTGRES=$postgres_enabled test
+ make V=1 LOG_LEVEL=DEBUG QUICK_AND_DIRTY_COMPILER=1 POSTGRES=$postgres_enabled testwakunode2
build-docker-image:
needs: changes
diff --git a/Makefile b/Makefile
index 555a20472..e41b4207a 100644
--- a/Makefile
+++ b/Makefile
@@ -53,7 +53,19 @@ endif
# default target, because it's the first one that doesn't start with '.'
all: | wakunode2 example2 chat2 chat2bridge libwaku
-test: | testcommon testwaku
+TEST_FILE := $(word 2,$(MAKECMDGOALS))
+TEST_NAME := $(wordlist 3,$(words $(MAKECMDGOALS)),$(MAKECMDGOALS))
+
+test:
+ifeq ($(strip $(TEST_FILE)),)
+ $(MAKE) testcommon
+ $(MAKE) testwaku
+else
+ $(MAKE) compile-test $(TEST_FILE) $(TEST_NAME)
+endif
+# this prevents make from erroring on unknown targets like "Index"
+%:
+ @true
waku.nims:
ln -s waku.nimble $@
@@ -244,9 +256,10 @@ build/%: | build deps librln
echo -e $(BUILD_MSG) "build/$*" && \
$(ENV_SCRIPT) nim buildone $(NIM_PARAMS) waku.nims $*
-test/%: | build deps librln
- echo -e $(BUILD_MSG) "test/$*" && \
- $(ENV_SCRIPT) nim testone $(NIM_PARAMS) waku.nims $*
+compile-test: | build deps librln
+ echo -e $(BUILD_MSG) "$(TEST_FILE)" && \
+ $(ENV_SCRIPT) nim buildTest $(NIM_PARAMS) waku.nims $(TEST_FILE) && \
+ $(ENV_SCRIPT) nim execTest $(NIM_PARAMS) waku.nims $(TEST_FILE) "$(TEST_NAME)"
################
## Waku tools ##
diff --git a/tests/all_tests_waku.nim b/tests/all_tests_waku.nim
index 98ea0e36f..aac92863a 100644
--- a/tests/all_tests_waku.nim
+++ b/tests/all_tests_waku.nim
@@ -38,7 +38,8 @@ when os == "Linux" and
#./waku_archive_legacy/test_driver_postgres_query,
#./waku_archive_legacy/test_driver_postgres,
./factory/test_node_factory,
- ./wakunode_rest/test_rest_store
+ ./wakunode_rest/test_rest_store,
+ ./wakunode_rest/test_all
# Waku store test suite
import
@@ -91,9 +92,6 @@ import
# Waku Keystore test suite
import ./test_waku_keystore_keyfile, ./test_waku_keystore
-## Wakunode Rest API test suite
-import ./wakunode_rest/test_all
-
import ./waku_rln_relay/test_all
# Node Factory
diff --git a/waku.nimble b/waku.nimble
index 3790b0333..3d2d7de88 100644
--- a/waku.nimble
+++ b/waku.nimble
@@ -161,10 +161,13 @@ task buildone, "Build custom target":
let filepath = paramStr(paramCount())
discard buildModule filepath
-task testone, "Test custom target":
+task buildTest, "Test custom target":
let filepath = paramStr(paramCount())
- if buildModule(filepath):
- exec "build/" & filepath & ".bin"
+ discard buildModule(filepath)
+
+task execTest, "Run test":
+ let filepath = paramStr(paramCount() - 1)
+ exec "build/" & filepath & ".bin" & " test \"" & paramStr(paramCount()) & "\""
### C Bindings
let chroniclesParams =
From 4e527ee04540fdc0ae651e9c0df80790abb1073b Mon Sep 17 00:00:00 2001
From: fryorcraken <110212804+fryorcraken@users.noreply.github.com>
Date: Wed, 9 Jul 2025 15:57:38 +1000
Subject: [PATCH 46/47] chore: use type for rate limit config (#3489)
* chore: use type for rate limit config
Use type instead of `seq[string]` for rate limit config earlier.
Enables to fail faster (at config time) if the string is malformated
Also enables using object in some scenarios.
* test: remove import warnings
* improve naming and add tests
---
apps/chat2bridge/chat2bridge.nim | 1 +
tests/common/test_all.nim | 2 ++
tests/factory/test_all.nim | 2 ++
tests/factory/test_waku_conf.nim | 15 ++++++++++
tests/incentivization/test_all.nim | 2 ++
tests/node/test_all.nim | 2 ++
tests/waku_lightpush/test_all.nim | 2 ++
tests/waku_lightpush_legacy/test_all.nim | 2 ++
tests/waku_peer_exchange/test_all.nim | 2 ++
waku/factory/builder.nim | 10 +++----
waku/factory/conf_builder/conf_builder.nim | 3 +-
.../conf_builder/rate_limit_conf_builder.nim | 29 +++++++++++++++++++
.../conf_builder/waku_conf_builder.nim | 15 +++++-----
waku/factory/external_config.nim | 2 +-
waku/factory/node_factory.nim | 2 +-
waku/factory/waku_conf.nim | 4 +--
waku/node/waku_node.nim | 10 ++-----
17 files changed, 79 insertions(+), 26 deletions(-)
create mode 100644 waku/factory/conf_builder/rate_limit_conf_builder.nim
diff --git a/apps/chat2bridge/chat2bridge.nim b/apps/chat2bridge/chat2bridge.nim
index a62d98261..c2bf9c032 100644
--- a/apps/chat2bridge/chat2bridge.nim
+++ b/apps/chat2bridge/chat2bridge.nim
@@ -23,6 +23,7 @@ import
waku_store,
factory/builder,
common/utils/matterbridge_client,
+ common/rate_limit/setting,
],
# Chat 2 imports
../chat2/chat2,
diff --git a/tests/common/test_all.nim b/tests/common/test_all.nim
index 7756f23ad..ae37337cd 100644
--- a/tests/common/test_all.nim
+++ b/tests/common/test_all.nim
@@ -1,3 +1,5 @@
+{.used.}
+
import
./test_base64_codec,
./test_confutils_envvar,
diff --git a/tests/factory/test_all.nim b/tests/factory/test_all.nim
index b704a8ef3..683bc3b10 100644
--- a/tests/factory/test_all.nim
+++ b/tests/factory/test_all.nim
@@ -1 +1,3 @@
+{.used.}
+
import ./test_external_config, ./test_node_factory, ./test_waku_conf
diff --git a/tests/factory/test_waku_conf.nim b/tests/factory/test_waku_conf.nim
index 436eb4e40..7ecdb01bb 100644
--- a/tests/factory/test_waku_conf.nim
+++ b/tests/factory/test_waku_conf.nim
@@ -281,3 +281,18 @@ suite "Waku Conf - extMultiaddrs":
)
for m in multiaddrs:
check m in resMultiaddrs
+
+suite "Waku Conf Builder - rate limits":
+ test "Valid rate limit passed via string":
+ ## Setup
+ var builder = RateLimitConfBuilder.init()
+
+ ## Given
+ let rateLimitsStr = @["lightpush:2/2ms", "10/2m", "store: 3/3s"]
+ builder.withRateLimits(rateLimitsStr)
+
+ ## When
+ let res = builder.build()
+
+ ## Then
+ assert res.isOk(), $res.error
diff --git a/tests/incentivization/test_all.nim b/tests/incentivization/test_all.nim
index 4657ea0d3..dc488c4da 100644
--- a/tests/incentivization/test_all.nim
+++ b/tests/incentivization/test_all.nim
@@ -1 +1,3 @@
+{.used.}
+
import ./test_rpc_codec, ./test_poc_eligibility, ./test_poc_reputation
diff --git a/tests/node/test_all.nim b/tests/node/test_all.nim
index 4840f49a2..f6e7507b7 100644
--- a/tests/node/test_all.nim
+++ b/tests/node/test_all.nim
@@ -1,3 +1,5 @@
+{.used.}
+
import
./test_wakunode_filter,
./test_wakunode_legacy_lightpush,
diff --git a/tests/waku_lightpush/test_all.nim b/tests/waku_lightpush/test_all.nim
index 4e4980929..b5edd72fb 100644
--- a/tests/waku_lightpush/test_all.nim
+++ b/tests/waku_lightpush/test_all.nim
@@ -1 +1,3 @@
+{.used.}
+
import ./test_client, ./test_ratelimit
diff --git a/tests/waku_lightpush_legacy/test_all.nim b/tests/waku_lightpush_legacy/test_all.nim
index 4e4980929..b5edd72fb 100644
--- a/tests/waku_lightpush_legacy/test_all.nim
+++ b/tests/waku_lightpush_legacy/test_all.nim
@@ -1 +1,3 @@
+{.used.}
+
import ./test_client, ./test_ratelimit
diff --git a/tests/waku_peer_exchange/test_all.nim b/tests/waku_peer_exchange/test_all.nim
index 069de6e7b..903b47cb9 100644
--- a/tests/waku_peer_exchange/test_all.nim
+++ b/tests/waku_peer_exchange/test_all.nim
@@ -1 +1,3 @@
+{.used.}
+
import ./test_protocol, ./test_rpc_codec
diff --git a/waku/factory/builder.nim b/waku/factory/builder.nim
index b05d5d054..772cfbffd 100644
--- a/waku/factory/builder.nim
+++ b/waku/factory/builder.nim
@@ -43,8 +43,8 @@ type
switchSendSignedPeerRecord: Option[bool]
circuitRelay: Relay
- #Rate limit configs for non-relay req-resp protocols
- rateLimitSettings: Option[seq[string]]
+ # Rate limit configs for non-relay req-resp protocols
+ rateLimitSettings: Option[ProtocolRateLimitSettings]
WakuNodeBuilderResult* = Result[void, string]
@@ -127,7 +127,7 @@ proc withPeerManagerConfig*(
proc withColocationLimit*(builder: var WakuNodeBuilder, colocationLimit: int) =
builder.colocationLimit = colocationLimit
-proc withRateLimit*(builder: var WakuNodeBuilder, limits: seq[string]) =
+proc withRateLimit*(builder: var WakuNodeBuilder, limits: ProtocolRateLimitSettings) =
builder.rateLimitSettings = some(limits)
proc withCircuitRelay*(builder: var WakuNodeBuilder, circuitRelay: Relay) =
@@ -219,11 +219,9 @@ proc build*(builder: WakuNodeBuilder): Result[WakuNode, string] =
switch = switch,
peerManager = peerManager,
rng = rng,
+ rateLimitSettings = builder.rateLimitSettings.get(DefaultProtocolRateLimit),
)
except Exception:
return err("failed to build WakuNode instance: " & getCurrentExceptionMsg())
- if builder.rateLimitSettings.isSome():
- ?node.setRateLimits(builder.rateLimitSettings.get())
-
ok(node)
diff --git a/waku/factory/conf_builder/conf_builder.nim b/waku/factory/conf_builder/conf_builder.nim
index 9b7f44ada..14b762756 100644
--- a/waku/factory/conf_builder/conf_builder.nim
+++ b/waku/factory/conf_builder/conf_builder.nim
@@ -8,10 +8,11 @@ import
./discv5_conf_builder,
./web_socket_conf_builder,
./metrics_server_conf_builder,
+ ./rate_limit_conf_builder,
./rln_relay_conf_builder
export
waku_conf_builder, filter_service_conf_builder, store_sync_conf_builder,
store_service_conf_builder, rest_server_conf_builder, dns_discovery_conf_builder,
discv5_conf_builder, web_socket_conf_builder, metrics_server_conf_builder,
- rln_relay_conf_builder
+ rate_limit_conf_builder, rln_relay_conf_builder
diff --git a/waku/factory/conf_builder/rate_limit_conf_builder.nim b/waku/factory/conf_builder/rate_limit_conf_builder.nim
new file mode 100644
index 000000000..0d466a132
--- /dev/null
+++ b/waku/factory/conf_builder/rate_limit_conf_builder.nim
@@ -0,0 +1,29 @@
+import chronicles, std/[net, options], results
+import waku/common/rate_limit/setting
+
+logScope:
+ topics = "waku conf builder rate limit"
+
+type RateLimitConfBuilder* = object
+ strValue: Option[seq[string]]
+ objValue: Option[ProtocolRateLimitSettings]
+
+proc init*(T: type RateLimitConfBuilder): RateLimitConfBuilder =
+ RateLimitConfBuilder()
+
+proc withRateLimits*(b: var RateLimitConfBuilder, rateLimits: seq[string]) =
+ b.strValue = some(rateLimits)
+
+proc build*(b: RateLimitConfBuilder): Result[ProtocolRateLimitSettings, string] =
+ if b.strValue.isSome() and b.objValue.isSome():
+ return err("Rate limits conf must only be set once on the builder")
+
+ if b.objValue.isSome():
+ return ok(b.objValue.get())
+
+ if b.strValue.isSome():
+ let rateLimits = ProtocolRateLimitSettings.parse(b.strValue.get()).valueOr:
+ return err("Invalid rate limits settings:" & $error)
+ return ok(rateLimits)
+
+ return ok(DefaultProtocolRateLimit)
diff --git a/waku/factory/conf_builder/waku_conf_builder.nim b/waku/factory/conf_builder/waku_conf_builder.nim
index ee7ca1b8c..32631e1d7 100644
--- a/waku/factory/conf_builder/waku_conf_builder.nim
+++ b/waku/factory/conf_builder/waku_conf_builder.nim
@@ -23,6 +23,7 @@ import
./discv5_conf_builder,
./web_socket_conf_builder,
./metrics_server_conf_builder,
+ ./rate_limit_conf_builder,
./rln_relay_conf_builder
logScope:
@@ -74,6 +75,7 @@ type WakuConfBuilder* = object
rlnRelayConf*: RlnRelayConfBuilder
storeServiceConf*: StoreServiceConfBuilder
webSocketConf*: WebSocketConfBuilder
+ rateLimitConf*: RateLimitConfBuilder
# End conf builders
relay: Option[bool]
lightPush: Option[bool]
@@ -116,8 +118,6 @@ type WakuConfBuilder* = object
agentString: Option[string]
- rateLimits: Option[seq[string]]
-
maxRelayPeers: Option[int]
relayShardedPeerManagement: Option[bool]
relayServiceRatio: Option[string]
@@ -134,6 +134,7 @@ proc init*(T: type WakuConfBuilder): WakuConfBuilder =
rlnRelayConf: RlnRelayConfBuilder.init(),
storeServiceConf: StoreServiceConfBuilder.init(),
webSocketConf: WebSocketConfBuilder.init(),
+ rateLimitConf: RateLimitConfBuilder.init(),
)
proc withNetworkConf*(b: var WakuConfBuilder, networkConf: NetworkConf) =
@@ -241,9 +242,6 @@ proc withAgentString*(b: var WakuConfBuilder, agentString: string) =
proc withColocationLimit*(b: var WakuConfBuilder, colocationLimit: int) =
b.colocationLimit = some(colocationLimit)
-proc withRateLimits*(b: var WakuConfBuilder, rateLimits: seq[string]) =
- b.rateLimits = some(rateLimits)
-
proc withMaxRelayPeers*(b: var WakuConfBuilder, maxRelayPeers: int) =
b.maxRelayPeers = some(maxRelayPeers)
@@ -489,6 +487,10 @@ proc build*(
let webSocketConf = builder.webSocketConf.build().valueOr:
return err("WebSocket Conf building failed: " & $error)
+
+ let rateLimit = builder.rateLimitConf.build().valueOr:
+ return err("Rate limits Conf building failed: " & $error)
+
# End - Build sub-configs
let logLevel =
@@ -583,7 +585,6 @@ proc build*(
# TODO: use `DefaultColocationLimit`. the user of this value should
# probably be defining a config object
let colocationLimit = builder.colocationLimit.get(5)
- let rateLimits = builder.rateLimits.get(newSeq[string](0))
# TODO: is there a strategy for experimental features? delete vs promote
let relayShardedPeerManagement = builder.relayShardedPeerManagement.get(false)
@@ -643,7 +644,7 @@ proc build*(
colocationLimit: colocationLimit,
maxRelayPeers: builder.maxRelayPeers,
relayServiceRatio: builder.relayServiceRatio.get("60:40"),
- rateLimits: rateLimits,
+ rateLimit: rateLimit,
circuitRelayClient: builder.circuitRelayClient.get(false),
staticNodes: builder.staticNodes,
relayShardedPeerManagement: relayShardedPeerManagement,
diff --git a/waku/factory/external_config.nim b/waku/factory/external_config.nim
index 2d7205e87..43b37b01a 100644
--- a/waku/factory/external_config.nim
+++ b/waku/factory/external_config.nim
@@ -1016,6 +1016,6 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] =
b.webSocketConf.withKeyPath(n.websocketSecureKeyPath)
b.webSocketConf.withCertPath(n.websocketSecureCertPath)
- b.withRateLimits(n.rateLimits)
+ b.rateLimitConf.withRateLimits(n.rateLimits)
return b.build()
diff --git a/waku/factory/node_factory.nim b/waku/factory/node_factory.nim
index 5e038ee0d..95693cc79 100644
--- a/waku/factory/node_factory.nim
+++ b/waku/factory/node_factory.nim
@@ -122,7 +122,7 @@ proc initNode(
relayServiceRatio = conf.relayServiceRatio,
shardAware = conf.relayShardedPeerManagement,
)
- builder.withRateLimit(conf.rateLimits)
+ builder.withRateLimit(conf.rateLimit)
builder.withCircuitRelay(relay)
let node =
diff --git a/waku/factory/waku_conf.nim b/waku/factory/waku_conf.nim
index 6ffda1c14..4a0504906 100644
--- a/waku/factory/waku_conf.nim
+++ b/waku/factory/waku_conf.nim
@@ -12,6 +12,7 @@ import
../discovery/waku_discv5,
../node/waku_metrics,
../common/logging,
+ ../common/rate_limit/setting,
../waku_enr/capabilities,
./networks_config
@@ -127,8 +128,7 @@ type WakuConf* {.requiresInit.} = ref object
colocationLimit*: int
- # TODO: use proper type
- rateLimits*: seq[string]
+ rateLimit*: ProtocolRateLimitSettings
# TODO: those could be in a relay conf object
maxRelayPeers*: Option[int]
diff --git a/waku/node/waku_node.nim b/waku/node/waku_node.nim
index ccd62664f..b507b385e 100644
--- a/waku/node/waku_node.nim
+++ b/waku/node/waku_node.nim
@@ -128,6 +128,7 @@ proc new*(
enr: enr.Record,
switch: Switch,
peerManager: PeerManager,
+ rateLimitSettings: ProtocolRateLimitSettings = DefaultProtocolRateLimit,
# TODO: make this argument required after tests are updated
rng: ref HmacDrbgContext = crypto.newRng(),
): T {.raises: [Defect, LPError, IOError, TLSStreamProtocolError].} =
@@ -144,7 +145,7 @@ proc new*(
enr: enr,
announcedAddresses: netConfig.announcedAddresses,
topicSubscriptionQueue: queue,
- rateLimitSettings: DefaultProtocolRateLimit,
+ rateLimitSettings: rateLimitSettings,
)
return node
@@ -1563,10 +1564,3 @@ proc isReady*(node: WakuNode): Future[bool] {.async: (raises: [Exception]).} =
return true
return await node.wakuRlnRelay.isReady()
## TODO: add other protocol `isReady` checks
-
-proc setRateLimits*(node: WakuNode, limits: seq[string]): Result[void, string] =
- let rateLimitConfig = ProtocolRateLimitSettings.parse(limits)
- if rateLimitConfig.isErr():
- return err("invalid rate limit settings:" & rateLimitConfig.error)
- node.rateLimitSettings = rateLimitConfig.get()
- return ok()
From 3133aaaf71a5feb0c30bcdd4638f320514b239e6 Mon Sep 17 00:00:00 2001
From: fryorcraken <110212804+fryorcraken@users.noreply.github.com>
Date: Thu, 10 Jul 2025 10:56:02 +1000
Subject: [PATCH 47/47] chore: use distinct type for Light push status codes
(#3488)
* chore: use distinct type for Light push status codes
* Make naming more explicit
* test: use new light push error code in tests
* fix missed line
* fix thing
---
tests/node/test_wakunode_lightpush.nim | 6 +--
tests/waku_lightpush/test_client.nim | 15 +++----
tests/waku_lightpush/test_ratelimit.nim | 2 +-
waku/node/waku_node.nim | 16 +++++---
waku/waku_api/rest/lightpush/handlers.nim | 2 +-
waku/waku_lightpush/callbacks.nim | 4 +-
waku/waku_lightpush/client.nim | 11 +++--
waku/waku_lightpush/common.nim | 49 ++++++++++++-----------
waku/waku_lightpush/protocol.nim | 20 +++++----
waku/waku_lightpush/rpc.nim | 6 ++-
waku/waku_lightpush/rpc_codec.nim | 4 +-
11 files changed, 73 insertions(+), 62 deletions(-)
diff --git a/tests/node/test_wakunode_lightpush.nim b/tests/node/test_wakunode_lightpush.nim
index 6a42c899b..ee68710d1 100644
--- a/tests/node/test_wakunode_lightpush.nim
+++ b/tests/node/test_wakunode_lightpush.nim
@@ -76,7 +76,7 @@ suite "Waku Lightpush - End To End":
# Then the message is not relayed but not due to RLN
assert publishResponse.isErr(), "We expect an error response"
- assert (publishResponse.error.code == NO_PEERS_TO_RELAY),
+ assert (publishResponse.error.code == LightPushErrorCode.NO_PEERS_TO_RELAY),
"incorrect error response"
suite "Waku LightPush Validation Tests":
@@ -93,7 +93,7 @@ suite "Waku Lightpush - End To End":
check:
publishResponse.isErr()
- publishResponse.error.code == INVALID_MESSAGE_ERROR
+ publishResponse.error.code == LightPushErrorCode.INVALID_MESSAGE
publishResponse.error.desc ==
some(fmt"Message size exceeded maximum of {DefaultMaxWakuMessageSize} bytes")
@@ -168,7 +168,7 @@ suite "RLN Proofs as a Lightpush Service":
# Then the message is not relayed but not due to RLN
assert publishResponse.isErr(), "We expect an error response"
- check publishResponse.error.code == NO_PEERS_TO_RELAY
+ check publishResponse.error.code == LightPushErrorCode.NO_PEERS_TO_RELAY
suite "Waku Lightpush message delivery":
asyncTest "lightpush message flow succeed":
diff --git a/tests/waku_lightpush/test_client.nim b/tests/waku_lightpush/test_client.nim
index d7a1b6b24..af22ffa5d 100644
--- a/tests/waku_lightpush/test_client.nim
+++ b/tests/waku_lightpush/test_client.nim
@@ -42,8 +42,9 @@ suite "Waku Lightpush Client":
): Future[WakuLightPushResult] {.async.} =
let msgLen = message.encode().buffer.len
if msgLen > int(DefaultMaxWakuMessageSize) + 64 * 1024:
- return
- lighpushErrorResult(PAYLOAD_TOO_LARGE, "length greater than maxMessageSize")
+ return lighpushErrorResult(
+ LightPushErrorCode.PAYLOAD_TOO_LARGE, "length greater than maxMessageSize"
+ )
handlerFuture.complete((pubsubTopic, message))
# return that we published the message to 1 peer.
return ok(1)
@@ -263,7 +264,7 @@ suite "Waku Lightpush Client":
# Then the message is not received by the server
check:
publishResponse5.isErr()
- publishResponse5.error.code == PAYLOAD_TOO_LARGE
+ publishResponse5.error.code == LightPushErrorCode.PAYLOAD_TOO_LARGE
(await handlerFuture.waitForResult()).isErr()
asyncTest "Invalid Encoding Payload":
@@ -276,7 +277,7 @@ suite "Waku Lightpush Client":
# And the error is returned
check:
publishResponse.requestId == "N/A"
- publishResponse.statusCode == LightpushStatusCode.BAD_REQUEST.uint32
+ publishResponse.statusCode == LightPushErrorCode.BAD_REQUEST
publishResponse.statusDesc.isSome()
scanf(publishResponse.statusDesc.get(), decodeRpcFailure)
@@ -289,7 +290,7 @@ suite "Waku Lightpush Client":
peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage
): Future[WakuLightPushResult] {.async.} =
handlerFuture2.complete()
- return lighpushErrorResult(PAYLOAD_TOO_LARGE, handlerError)
+ return lighpushErrorResult(LightPushErrorCode.PAYLOAD_TOO_LARGE, handlerError)
let
serverSwitch2 = newTestSwitch()
@@ -305,7 +306,7 @@ suite "Waku Lightpush Client":
# Then the response is negative
check:
- publishResponse.error.code == PAYLOAD_TOO_LARGE
+ publishResponse.error.code == LightPushErrorCode.PAYLOAD_TOO_LARGE
publishResponse.error.desc == some(handlerError)
(await handlerFuture2.waitForResult()).isOk()
@@ -369,4 +370,4 @@ suite "Waku Lightpush Client":
# Then the response is negative
check not publishResponse.isOk()
- check publishResponse.error.code == LightpushStatusCode.NO_PEERS_TO_RELAY
+ check publishResponse.error.code == LightPushErrorCode.NO_PEERS_TO_RELAY
diff --git a/tests/waku_lightpush/test_ratelimit.nim b/tests/waku_lightpush/test_ratelimit.nim
index 0dd7913d1..b2dcdc7b5 100644
--- a/tests/waku_lightpush/test_ratelimit.nim
+++ b/tests/waku_lightpush/test_ratelimit.nim
@@ -119,7 +119,7 @@ suite "Rate limited push service":
check:
requestRes.isErr()
- requestRes.error.code == TOO_MANY_REQUESTS
+ requestRes.error.code == LightPushErrorCode.TOO_MANY_REQUESTS
requestRes.error.desc == some(TooManyRequestsMessage)
for testCnt in 0 .. 2:
diff --git a/waku/node/waku_node.nim b/waku/node/waku_node.nim
index b507b385e..db689e8a0 100644
--- a/waku/node/waku_node.nim
+++ b/waku/node/waku_node.nim
@@ -1191,7 +1191,9 @@ proc lightpushPublish*(
): Future[lightpush_protocol.WakuLightPushResult] {.async.} =
if node.wakuLightpushClient.isNil() and node.wakuLightPush.isNil():
error "failed to publish message as lightpush not available"
- return lighpushErrorResult(SERVICE_NOT_AVAILABLE, "Waku lightpush not available")
+ return lighpushErrorResult(
+ LightPushErrorCode.SERVICE_NOT_AVAILABLE, "Waku lightpush not available"
+ )
let toPeer: RemotePeerInfo = peerOpt.valueOr:
if not node.wakuLightPush.isNil():
@@ -1200,25 +1202,27 @@ proc lightpushPublish*(
node.peerManager.selectPeer(WakuLightPushCodec).valueOr:
let msg = "no suitable remote peers"
error "failed to publish message", msg = msg
- return lighpushErrorResult(NO_PEERS_TO_RELAY, msg)
+ return lighpushErrorResult(LightPushErrorCode.NO_PEERS_TO_RELAY, msg)
else:
- return lighpushErrorResult(NO_PEERS_TO_RELAY, "no suitable remote peers")
+ return lighpushErrorResult(
+ LightPushErrorCode.NO_PEERS_TO_RELAY, "no suitable remote peers"
+ )
let pubsubForPublish = pubSubTopic.valueOr:
if node.wakuAutoSharding.isNone():
let msg = "Pubsub topic must be specified when static sharding is enabled"
error "lightpush publish error", error = msg
- return lighpushErrorResult(INVALID_MESSAGE_ERROR, msg)
+ return lighpushErrorResult(LightPushErrorCode.INVALID_MESSAGE, msg)
let parsedTopic = NsContentTopic.parse(message.contentTopic).valueOr:
let msg = "Invalid content-topic:" & $error
error "lightpush request handling error", error = msg
- return lighpushErrorResult(INVALID_MESSAGE_ERROR, msg)
+ return lighpushErrorResult(LightPushErrorCode.INVALID_MESSAGE, msg)
node.wakuAutoSharding.get().getShard(parsedTopic).valueOr:
let msg = "Autosharding error: " & error
error "lightpush publish error", error = msg
- return lighpushErrorResult(INTERNAL_SERVER_ERROR, msg)
+ return lighpushErrorResult(LightPushErrorCode.INTERNAL_SERVER_ERROR, msg)
return await lightpushPublishHandler(node, pubsubForPublish, message, toPeer)
diff --git a/waku/waku_api/rest/lightpush/handlers.nim b/waku/waku_api/rest/lightpush/handlers.nim
index cafcd89d2..a724aa1c9 100644
--- a/waku/waku_api/rest/lightpush/handlers.nim
+++ b/waku/waku_api/rest/lightpush/handlers.nim
@@ -32,7 +32,7 @@ const NoPeerNoneFoundError = "No suitable service peer & none discovered"
proc useSelfHostedLightPush(node: WakuNode): bool =
return node.wakuLightPush != nil and node.wakuLightPushClient == nil
-proc convertErrorKindToHttpStatus(statusCode: LightpushStatusCode): HttpCode =
+proc convertErrorKindToHttpStatus(statusCode: LightPushStatusCode): HttpCode =
## Lightpush status codes are matching HTTP status codes by design
return toHttpCode(statusCode.int).get(Http500)
diff --git a/waku/waku_lightpush/callbacks.nim b/waku/waku_lightpush/callbacks.nim
index 3cfc3fe90..4b362e6bb 100644
--- a/waku/waku_lightpush/callbacks.nim
+++ b/waku/waku_lightpush/callbacks.nim
@@ -44,10 +44,10 @@ proc getRelayPushHandler*(
): Future[WakuLightPushResult] {.async.} =
# append RLN proof
let msgWithProof = checkAndGenerateRLNProof(rlnPeer, message).valueOr:
- return lighpushErrorResult(OUT_OF_RLN_PROOF, error)
+ return lighpushErrorResult(LightPushErrorCode.OUT_OF_RLN_PROOF, error)
(await wakuRelay.validateMessage(pubSubTopic, msgWithProof)).isOkOr:
- return lighpushErrorResult(INVALID_MESSAGE_ERROR, $error)
+ return lighpushErrorResult(LightPushErrorCode.INVALID_MESSAGE, $error)
let publishedResult = await wakuRelay.publish(pubsubTopic, msgWithProof)
diff --git a/waku/waku_lightpush/client.nim b/waku/waku_lightpush/client.nim
index 2f03b0847..efb330d91 100644
--- a/waku/waku_lightpush/client.nim
+++ b/waku/waku_lightpush/client.nim
@@ -35,7 +35,8 @@ proc sendPushRequest(
let connection = (await wl.peerManager.dialPeer(peer, WakuLightPushCodec)).valueOr:
waku_lightpush_v3_errors.inc(labelValues = [dialFailure])
return lighpushErrorResult(
- NO_PEERS_TO_RELAY, dialFailure & ": " & $peer & " is not accessible"
+ LightPushErrorCode.NO_PEERS_TO_RELAY,
+ dialFailure & ": " & $peer & " is not accessible",
)
await connection.writeLP(req.encode().buffer)
@@ -44,7 +45,7 @@ proc sendPushRequest(
try:
buffer = await connection.readLp(DefaultMaxRpcSize.int)
except LPStreamRemoteClosedError:
- error "Failed to read responose from peer", error = getCurrentExceptionMsg()
+ error "Failed to read response from peer", error = getCurrentExceptionMsg()
return lightpushResultInternalError(
"Failed to read response from peer: " & getCurrentExceptionMsg()
)
@@ -55,7 +56,7 @@ proc sendPushRequest(
return lightpushResultInternalError(decodeRpcFailure)
if response.requestId != req.requestId and
- response.statusCode != TOO_MANY_REQUESTS.uint32:
+ response.statusCode != LightPushErrorCode.TOO_MANY_REQUESTS:
error "response failure, requestId mismatch",
requestId = req.requestId, responseRequestId = response.requestId
return lightpushResultInternalError("response failure, requestId mismatch")
@@ -105,7 +106,9 @@ proc publishToAny*(
let peer = wl.peerManager.selectPeer(WakuLightPushCodec).valueOr:
# TODO: check if it is matches the situation - shall we distinguish client side missing peers from server side?
- return lighpushErrorResult(NO_PEERS_TO_RELAY, "no suitable remote peers")
+ return lighpushErrorResult(
+ LightPushErrorCode.NO_PEERS_TO_RELAY, "no suitable remote peers"
+ )
let pushRequest = LightpushRequest(
requestId: generateRequestId(wl.rng),
diff --git a/waku/waku_lightpush/common.nim b/waku/waku_lightpush/common.nim
index 4c2984e8f..f2687834e 100644
--- a/waku/waku_lightpush/common.nim
+++ b/waku/waku_lightpush/common.nim
@@ -5,18 +5,21 @@ import ../waku_core, ./rpc, ../waku_relay/protocol
from ../waku_core/codecs import WakuLightPushCodec
export WakuLightPushCodec
+export LightPushStatusCode
-type LightpushStatusCode* = enum
- SUCCESS = uint32(200)
- BAD_REQUEST = uint32(400)
- PAYLOAD_TOO_LARGE = uint32(413)
- INVALID_MESSAGE_ERROR = uint32(420)
- UNSUPPORTED_PUBSUB_TOPIC = uint32(421)
- TOO_MANY_REQUESTS = uint32(429)
- INTERNAL_SERVER_ERROR = uint32(500)
- SERVICE_NOT_AVAILABLE = uint32(503)
- OUT_OF_RLN_PROOF = uint32(504)
- NO_PEERS_TO_RELAY = uint32(505)
+const LightPushSuccessCode* = (SUCCESS: LightPushStatusCode(200))
+
+const LightPushErrorCode* = (
+ BAD_REQUEST: LightPushStatusCode(400),
+ PAYLOAD_TOO_LARGE: LightPushStatusCode(413),
+ INVALID_MESSAGE: LightPushStatusCode(420),
+ UNSUPPORTED_PUBSUB_TOPIC: LightPushStatusCode(421),
+ TOO_MANY_REQUESTS: LightPushStatusCode(429),
+ INTERNAL_SERVER_ERROR: LightPushStatusCode(500),
+ SERVICE_NOT_AVAILABLE: LightPushStatusCode(503),
+ OUT_OF_RLN_PROOF: LightPushStatusCode(504),
+ NO_PEERS_TO_RELAY: LightPushStatusCode(505),
+)
type ErrorStatus* = tuple[code: LightpushStatusCode, desc: Option[string]]
type WakuLightPushResult* = Result[uint32, ErrorStatus]
@@ -28,25 +31,25 @@ type PushMessageHandler* = proc(
const TooManyRequestsMessage* = "Request rejected due to too many requests"
func isSuccess*(response: LightPushResponse): bool =
- return response.statusCode == LightpushStatusCode.SUCCESS.uint32
+ return response.statusCode == LightPushSuccessCode.SUCCESS
func toPushResult*(response: LightPushResponse): WakuLightPushResult =
if isSuccess(response):
return ok(response.relayPeerCount.get(0))
else:
- return err((response.statusCode.LightpushStatusCode, response.statusDesc))
+ return err((response.statusCode, response.statusDesc))
func lightpushSuccessResult*(relayPeerCount: uint32): WakuLightPushResult =
return ok(relayPeerCount)
func lightpushResultInternalError*(msg: string): WakuLightPushResult =
- return err((LightpushStatusCode.INTERNAL_SERVER_ERROR, some(msg)))
+ return err((LightPushErrorCode.INTERNAL_SERVER_ERROR, some(msg)))
func lightpushResultBadRequest*(msg: string): WakuLightPushResult =
- return err((LightpushStatusCode.BAD_REQUEST, some(msg)))
+ return err((LightPushErrorCode.BAD_REQUEST, some(msg)))
func lightpushResultServiceUnavailable*(msg: string): WakuLightPushResult =
- return err((LightpushStatusCode.SERVICE_NOT_AVAILABLE, some(msg)))
+ return err((LightPushErrorCode.SERVICE_NOT_AVAILABLE, some(msg)))
func lighpushErrorResult*(
statusCode: LightpushStatusCode, desc: Option[string]
@@ -63,24 +66,22 @@ func mapPubishingErrorToPushResult*(
): WakuLightPushResult =
case publishOutcome
of NoTopicSpecified:
- return err(
- (LightpushStatusCode.INVALID_MESSAGE_ERROR, some("Empty topic, skipping publish"))
- )
+ return
+ err((LightPushErrorCode.INVALID_MESSAGE, some("Empty topic, skipping publish")))
of DuplicateMessage:
- return err(
- (LightpushStatusCode.INVALID_MESSAGE_ERROR, some("Dropping already-seen message"))
- )
+ return
+ err((LightPushErrorCode.INVALID_MESSAGE, some("Dropping already-seen message")))
of NoPeersToPublish:
return err(
(
- LightpushStatusCode.NO_PEERS_TO_RELAY,
+ LightPushErrorCode.NO_PEERS_TO_RELAY,
some("No peers for topic, skipping publish"),
)
)
of CannotGenerateMessageId:
return err(
(
- LightpushStatusCode.INTERNAL_SERVER_ERROR,
+ LightPushErrorCode.INTERNAL_SERVER_ERROR,
some("Error generating message id, skipping publish"),
)
)
diff --git a/waku/waku_lightpush/protocol.nim b/waku/waku_lightpush/protocol.nim
index 45dc7c3c1..955b1ade5 100644
--- a/waku/waku_lightpush/protocol.nim
+++ b/waku/waku_lightpush/protocol.nim
@@ -36,21 +36,21 @@ proc handleRequest(
let msg = "Pubsub topic must be specified when static sharding is enabled"
error "lightpush request handling error", error = msg
return WakuLightPushResult.err(
- (code: LightpushStatusCode.INVALID_MESSAGE_ERROR, desc: some(msg))
+ (code: LightPushErrorCode.INVALID_MESSAGE, desc: some(msg))
)
let parsedTopic = NsContentTopic.parse(pushRequest.message.contentTopic).valueOr:
let msg = "Invalid content-topic:" & $error
error "lightpush request handling error", error = msg
return WakuLightPushResult.err(
- (code: LightPushStatusCode.INVALID_MESSAGE_ERROR, desc: some(msg))
+ (code: LightPushErrorCode.INVALID_MESSAGE, desc: some(msg))
)
wl.autoSharding.get().getShard(parsedTopic).valueOr:
let msg = "Auto-sharding error: " & error
error "lightpush request handling error", error = msg
return WakuLightPushResult.err(
- (code: LightPushStatusCode.INTERNAL_SERVER_ERROR, desc: some(msg))
+ (code: LightPushErrorCode.INTERNAL_SERVER_ERROR, desc: some(msg))
)
# ensure checking topic will not cause error at gossipsub level
@@ -58,7 +58,7 @@ proc handleRequest(
let msg = "topic must not be empty"
error "lightpush request handling error", error = msg
return
- WakuLightPushResult.err((code: LightPushStatusCode.BAD_REQUEST, desc: some(msg)))
+ WakuLightPushResult.err((code: LightPushErrorCode.BAD_REQUEST, desc: some(msg)))
waku_lightpush_v3_messages.inc(labelValues = ["PushRequest"])
@@ -78,16 +78,14 @@ proc handleRequest(
proc handleRequest*(
wl: WakuLightPush, peerId: PeerId, buffer: seq[byte]
): Future[LightPushResponse] {.async.} =
- var pushResponse: LightPushResponse
-
let pushRequest = LightPushRequest.decode(buffer).valueOr:
let desc = decodeRpcFailure & ": " & $error
error "failed to push message", error = desc
- let errorCode = LightPushStatusCode.BAD_REQUEST.uint32
+ let errorCode = LightPushErrorCode.BAD_REQUEST
waku_lightpush_v3_errors.inc(labelValues = [$errorCode])
return LightPushResponse(
requestId: "N/A", # due to decode failure we don't know requestId
- statusCode: errorCode.uint32,
+ statusCode: errorCode,
statusDesc: some(desc),
)
@@ -96,12 +94,12 @@ proc handleRequest*(
waku_lightpush_v3_errors.inc(labelValues = [$error.code])
error "failed to push message", error = desc
return LightPushResponse(
- requestId: pushRequest.requestId, statusCode: error.code.uint32, statusDesc: desc
+ requestId: pushRequest.requestId, statusCode: error.code, statusDesc: desc
)
return LightPushResponse(
requestId: pushRequest.requestId,
- statusCode: LightPushStatusCode.SUCCESS.uint32,
+ statusCode: LightPushSuccessCode.SUCCESS,
statusDesc: none[string](),
relayPeerCount: some(relayPeerCount),
)
@@ -135,7 +133,7 @@ proc initProtocolHandler(wl: WakuLightPush) =
## in reject case as it is comparably too expensive and opens possible
## attack surface
requestId: "N/A",
- statusCode: LightpushStatusCode.TOO_MANY_REQUESTS.uint32,
+ statusCode: LightPushErrorCode.TOO_MANY_REQUESTS,
statusDesc: some(TooManyRequestsMessage),
)
)
diff --git a/waku/waku_lightpush/rpc.nim b/waku/waku_lightpush/rpc.nim
index 5a1a6647d..f19563b99 100644
--- a/waku/waku_lightpush/rpc.nim
+++ b/waku/waku_lightpush/rpc.nim
@@ -3,6 +3,10 @@
import std/options
import ../waku_core
+type LightPushStatusCode* = distinct uint32
+proc `==`*(a, b: LightPushStatusCode): bool {.borrow.}
+proc `$`*(code: LightPushStatusCode): string {.borrow.}
+
type
LightpushRequest* = object
requestId*: string
@@ -11,6 +15,6 @@ type
LightPushResponse* = object
requestId*: string
- statusCode*: uint32
+ statusCode*: LightPushStatusCode
statusDesc*: Option[string]
relayPeerCount*: Option[uint32]
diff --git a/waku/waku_lightpush/rpc_codec.nim b/waku/waku_lightpush/rpc_codec.nim
index 53bdda4c0..0a4f934d6 100644
--- a/waku/waku_lightpush/rpc_codec.nim
+++ b/waku/waku_lightpush/rpc_codec.nim
@@ -43,7 +43,7 @@ proc encode*(rpc: LightPushResponse): ProtoBuffer =
var pb = initProtoBuffer()
pb.write3(1, rpc.requestId)
- pb.write3(10, rpc.statusCode)
+ pb.write3(10, rpc.statusCode.uint32)
pb.write3(11, rpc.statusDesc)
pb.write3(12, rpc.relayPeerCount)
pb.finish3()
@@ -64,7 +64,7 @@ proc decode*(T: type LightPushResponse, buffer: seq[byte]): ProtobufResult[T] =
if not ?pb.getField(10, statusCode):
return err(ProtobufError.missingRequiredField("status_code"))
else:
- rpc.statusCode = statusCode
+ rpc.statusCode = statusCode.LightPushStatusCode
var statusDesc: string
if not ?pb.getField(11, statusDesc):