From 8397d45f51859ab39f691a43ed7adaa2f40f18b4 Mon Sep 17 00:00:00 2001 From: stubbsta Date: Mon, 10 Feb 2025 08:00:41 +0200 Subject: [PATCH 01/48] Update all references to RLN contract address --- apps/sonda/docker-compose.yml | 2 +- apps/sonda/register_rln.sh | 2 +- waku/factory/networks_config.nim | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/apps/sonda/docker-compose.yml b/apps/sonda/docker-compose.yml index 2141bbfc8..d6594428e 100644 --- a/apps/sonda/docker-compose.yml +++ b/apps/sonda/docker-compose.yml @@ -9,7 +9,7 @@ x-logging: &logging x-rln-relay-eth-client-address: &rln_relay_eth_client_address ${RLN_RELAY_ETH_CLIENT_ADDRESS:-} # Add your RLN_RELAY_ETH_CLIENT_ADDRESS after the "-" x-rln-environment: &rln_env - RLN_RELAY_CONTRACT_ADDRESS: ${RLN_RELAY_CONTRACT_ADDRESS:-0xCB33Aa5B38d79E3D9Fa8B10afF38AA201399a7e3} + RLN_RELAY_CONTRACT_ADDRESS: ${RLN_RELAY_CONTRACT_ADDRESS:-0xB9cd878C90E49F797B4431fBF4fb333108CB90e6} RLN_RELAY_CRED_PATH: ${RLN_RELAY_CRED_PATH:-} # Optional: Add your RLN_RELAY_CRED_PATH after the "-" RLN_RELAY_CRED_PASSWORD: ${RLN_RELAY_CRED_PASSWORD:-} # Optional: Add your RLN_RELAY_CRED_PASSWORD after the "-" diff --git a/apps/sonda/register_rln.sh b/apps/sonda/register_rln.sh index ab660f1d8..4fb373b3a 100755 --- a/apps/sonda/register_rln.sh +++ b/apps/sonda/register_rln.sh @@ -24,7 +24,7 @@ fi docker run -v $(pwd)/keystore:/keystore/:Z harbor.status.im/wakuorg/nwaku:v0.30.1 generateRlnKeystore \ --rln-relay-eth-client-address=${RLN_RELAY_ETH_CLIENT_ADDRESS} \ --rln-relay-eth-private-key=${ETH_TESTNET_KEY} \ ---rln-relay-eth-contract-address=0xCB33Aa5B38d79E3D9Fa8B10afF38AA201399a7e3 \ +--rln-relay-eth-contract-address=0xB9cd878C90E49F797B4431fBF4fb333108CB90e6 \ --rln-relay-cred-path=/keystore/keystore.json \ --rln-relay-cred-password="${RLN_RELAY_CRED_PASSWORD}" \ --rln-relay-user-message-limit=20 \ diff --git a/waku/factory/networks_config.nim b/waku/factory/networks_config.nim index 41678f590..5faa50cf6 100644 --- a/waku/factory/networks_config.nim +++ b/waku/factory/networks_config.nim @@ -22,7 +22,7 @@ proc TheWakuNetworkConf*(T: type ClusterConf): ClusterConf = maxMessageSize: "150KiB", clusterId: 1, rlnRelay: true, - rlnRelayEthContractAddress: "0xCB33Aa5B38d79E3D9Fa8B10afF38AA201399a7e3", + rlnRelayEthContractAddress: "0xB9cd878C90E49F797B4431fBF4fb333108CB90e6", rlnRelayDynamic: true, rlnRelayChainId: 11155111, rlnRelayBandwidthThreshold: 0, From afa0bfbd3702fd2627664f7169e540696ff2bab8 Mon Sep 17 00:00:00 2001 From: Ivan Folgueira Bande Date: Sun, 30 Mar 2025 13:39:24 +0200 Subject: [PATCH 02/48] CHANGELOG v0.35.1 --- CHANGELOG.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ec377ef5b..cdf87e076 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,19 @@ +## v0.35.1 (2025-03-30) + +### Bug fixes + +* Update RLN references ([3287](https://github.com/waku-org/nwaku/pull/3287)) ([ea961fa](https://github.com/waku-org/nwaku/pull/3287/commits/ea961faf4ed4f8287a2043a6b5d84b660745072b)) + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | +| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` | +| [`WAKU-SYNC`](https://github.com/waku-org/specs/blob/feat--waku-sync/standards/core/sync.md) | `draft` | `/vac/waku/sync/1.0.0` | + ## v0.35.0 (2025-03-03) ### Notes From 9a14446e32689936ab2e23f446da9cd5cdfd8c08 Mon Sep 17 00:00:00 2001 From: Ivan Folgueira Bande Date: Mon, 31 Mar 2025 09:18:14 +0200 Subject: [PATCH 03/48] setting correct contract address recommeded by Tanya --- apps/sonda/docker-compose.yml | 2 +- apps/sonda/register_rln.sh | 2 +- waku/factory/networks_config.nim | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/apps/sonda/docker-compose.yml b/apps/sonda/docker-compose.yml index d6594428e..c6235ef32 100644 --- a/apps/sonda/docker-compose.yml +++ b/apps/sonda/docker-compose.yml @@ -9,7 +9,7 @@ x-logging: &logging x-rln-relay-eth-client-address: &rln_relay_eth_client_address ${RLN_RELAY_ETH_CLIENT_ADDRESS:-} # Add your RLN_RELAY_ETH_CLIENT_ADDRESS after the "-" x-rln-environment: &rln_env - RLN_RELAY_CONTRACT_ADDRESS: ${RLN_RELAY_CONTRACT_ADDRESS:-0xB9cd878C90E49F797B4431fBF4fb333108CB90e6} + RLN_RELAY_CONTRACT_ADDRESS: ${RLN_RELAY_CONTRACT_ADDRESS:-0xfe7a9eabcE779a090FD702346Fd0bFAc02ce6Ac8} RLN_RELAY_CRED_PATH: ${RLN_RELAY_CRED_PATH:-} # Optional: Add your RLN_RELAY_CRED_PATH after the "-" RLN_RELAY_CRED_PASSWORD: ${RLN_RELAY_CRED_PASSWORD:-} # Optional: Add your RLN_RELAY_CRED_PASSWORD after the "-" diff --git a/apps/sonda/register_rln.sh b/apps/sonda/register_rln.sh index 4fb373b3a..aca1007a8 100755 --- a/apps/sonda/register_rln.sh +++ b/apps/sonda/register_rln.sh @@ -24,7 +24,7 @@ fi docker run -v $(pwd)/keystore:/keystore/:Z harbor.status.im/wakuorg/nwaku:v0.30.1 generateRlnKeystore \ --rln-relay-eth-client-address=${RLN_RELAY_ETH_CLIENT_ADDRESS} \ --rln-relay-eth-private-key=${ETH_TESTNET_KEY} \ ---rln-relay-eth-contract-address=0xB9cd878C90E49F797B4431fBF4fb333108CB90e6 \ +--rln-relay-eth-contract-address=0xfe7a9eabcE779a090FD702346Fd0bFAc02ce6Ac8 \ --rln-relay-cred-path=/keystore/keystore.json \ --rln-relay-cred-password="${RLN_RELAY_CRED_PASSWORD}" \ --rln-relay-user-message-limit=20 \ diff --git a/waku/factory/networks_config.nim b/waku/factory/networks_config.nim index 5faa50cf6..f0fd97a0e 100644 --- a/waku/factory/networks_config.nim +++ b/waku/factory/networks_config.nim @@ -22,7 +22,7 @@ proc TheWakuNetworkConf*(T: type ClusterConf): ClusterConf = maxMessageSize: "150KiB", clusterId: 1, rlnRelay: true, - rlnRelayEthContractAddress: "0xB9cd878C90E49F797B4431fBF4fb333108CB90e6", + rlnRelayEthContractAddress: "0xfe7a9eabcE779a090FD702346Fd0bFAc02ce6Ac8", rlnRelayDynamic: true, rlnRelayChainId: 11155111, rlnRelayBandwidthThreshold: 0, From 00808c949572d94bb270cfa6cdcdef994320a96b Mon Sep 17 00:00:00 2001 From: fryorcraken <110212804+fryorcraken@users.noreply.github.com> Date: Thu, 3 Apr 2025 21:11:18 +1100 Subject: [PATCH 04/48] chore!: remove pubsub topics arguments (#3350) Use `--shards` instead. --- waku/factory/external_config.nim | 7 ------- waku/factory/waku.nim | 25 ------------------------- 2 files changed, 32 deletions(-) diff --git a/waku/factory/external_config.nim b/waku/factory/external_config.nim index 100d1b644..41fc25582 100644 --- a/waku/factory/external_config.nim +++ b/waku/factory/external_config.nim @@ -344,13 +344,6 @@ hence would have reachability issues.""", name: "num-shards-in-network" .}: uint32 - pubsubTopics* {. - desc: - "Deprecated. Default pubsub topic to subscribe to. Argument may be repeated.", - defaultValue: @[], - name: "pubsub-topic" - .}: seq[string] - shards* {. desc: "Shards index to subscribe to [0..NUM_SHARDS_IN_NETWORK-1]. Argument may be repeated.", diff --git a/waku/factory/waku.nim b/waku/factory/waku.nim index 854df8dde..91f3cee2e 100644 --- a/waku/factory/waku.nim +++ b/waku/factory/waku.nim @@ -177,31 +177,6 @@ proc new*( logging.setupLog(confCopy.logLevel, confCopy.logFormat) - # TODO: remove after pubsubtopic config gets removed - var shards = newSeq[uint16]() - if confCopy.pubsubTopics.len > 0: - let shardsRes = topicsToRelayShards(confCopy.pubsubTopics) - if shardsRes.isErr(): - error "failed to parse pubsub topic, please format according to static shard specification", - error = shardsRes.error - return err("failed to parse pubsub topic: " & $shardsRes.error) - - let shardsOpt = shardsRes.get() - - if shardsOpt.isSome(): - let relayShards = shardsOpt.get() - if relayShards.clusterId != confCopy.clusterId: - error "clusterId of the pubsub topic should match the node's cluster. e.g. --pubsub-topic=/waku/2/rs/22/1 and --cluster-id=22", - nodeCluster = confCopy.clusterId, pubsubCluster = relayShards.clusterId - return err( - "clusterId of the pubsub topic should match the node's cluster. e.g. --pubsub-topic=/waku/2/rs/22/1 and --cluster-id=22" - ) - - for shard in relayShards.shardIds: - shards.add(shard) - confCopy.shards = shards - - # Why can't I replace this block with a concise `.valueOr`? confCopy = block: let res = applyPresetConfiguration(confCopy) if res.isErr(): From 8b443edd98a3a0e75eac7c4e323a1cdfec4e6a6b Mon Sep 17 00:00:00 2001 From: gabrielmer <101006718+gabrielmer@users.noreply.github.com> Date: Thu, 3 Apr 2025 14:27:27 +0300 Subject: [PATCH 05/48] feat: add waku_relay_get_connected_peers to libwaku (#3353) --- library/libwaku.h | 5 +++ library/libwaku.nim | 21 +++++++++++++ .../requests/protocols/relay_request.nim | 14 +++++++-- waku/waku_relay/protocol.nim | 31 ++++++++++++++----- 4 files changed, 60 insertions(+), 11 deletions(-) diff --git a/library/libwaku.h b/library/libwaku.h index bd9b6bfed..bb6fa20cf 100644 --- a/library/libwaku.h +++ b/library/libwaku.h @@ -117,6 +117,11 @@ int waku_relay_get_num_connected_peers(void* ctx, WakuCallBack callback, void* userData); +int waku_relay_get_connected_peers(void* ctx, + const char* pubSubTopic, + WakuCallBack callback, + void* userData); + int waku_relay_get_num_peers_in_mesh(void* ctx, const char* pubSubTopic, WakuCallBack callback, diff --git a/library/libwaku.nim b/library/libwaku.nim index 258ac27b2..d1ea0d082 100644 --- a/library/libwaku.nim +++ b/library/libwaku.nim @@ -429,6 +429,27 @@ proc waku_relay_get_num_connected_peers( initializeLibrary() checkLibwakuParams(ctx, callback, userData) + let pst = pubSubTopic.alloc() + defer: + deallocShared(pst) + + handleRequest( + ctx, + RequestType.RELAY, + RelayRequest.createShared(RelayMsgType.NUM_CONNECTED_PEERS, pst), + callback, + userData, + ) + +proc waku_relay_get_connected_peers( + ctx: ptr WakuContext, + pubSubTopic: cstring, + callback: WakuCallBack, + userData: pointer, +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + let pst = pubSubTopic.alloc() defer: deallocShared(pst) diff --git a/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim b/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim index 232630591..3b5059972 100644 --- a/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim +++ b/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim @@ -1,4 +1,4 @@ -import std/net +import std/[net, sequtils, strutils] import chronicles, chronos, stew/byteutils, results import ../../../../../waku/waku_core/message/message, @@ -8,12 +8,14 @@ import ../../../../../waku/waku_core/time, # Timestamp ../../../../../waku/waku_core/topics/pubsub_topic, ../../../../../waku/waku_relay/protocol, + ../../../../../waku/node/peer_manager, ../../../../alloc type RelayMsgType* = enum SUBSCRIBE UNSUBSCRIBE PUBLISH + NUM_CONNECTED_PEERS LIST_CONNECTED_PEERS ## to return the list of all connected peers to an specific pubsub topic LIST_MESH_PEERS @@ -122,11 +124,17 @@ proc process*( let msgHash = computeMessageHash(pubSubTopic, msg).to0xHex return ok(msgHash) - of LIST_CONNECTED_PEERS: + of NUM_CONNECTED_PEERS: let numConnPeers = waku.node.wakuRelay.getNumConnectedPeers($self.pubsubTopic).valueOr: - error "LIST_CONNECTED_PEERS failed", error = error + error "NUM_CONNECTED_PEERS failed", error = error return err($error) return ok($numConnPeers) + of LIST_CONNECTED_PEERS: + let connPeers = waku.node.wakuRelay.getConnectedPeers($self.pubsubTopic).valueOr: + error "LIST_CONNECTED_PEERS failed", error = error + return err($error) + ## returns a comma-separated string of peerIDs + return ok(connPeers.mapIt($it).join(",")) of LIST_MESH_PEERS: let numPeersInMesh = waku.node.wakuRelay.getNumPeersInMesh($self.pubsubTopic).valueOr: error "LIST_MESH_PEERS failed", error = error diff --git a/waku/waku_relay/protocol.nim b/waku/waku_relay/protocol.nim index 0222db0d1..1af5f0d4f 100644 --- a/waku/waku_relay/protocol.nim +++ b/waku/waku_relay/protocol.nim @@ -539,22 +539,23 @@ proc publish*( return ok(relayedPeerCount) -proc getNumConnectedPeers*( +proc getConnectedPeers*( w: WakuRelay, pubsubTopic: PubsubTopic -): Result[int, string] = - ## Returns the number of connected peers and subscribed to the passed pubsub topic. +): Result[seq[PeerId], string] = + ## Returns the list of peerIds of connected peers and subscribed to the passed pubsub topic. ## The 'gossipsub' atribute is defined in the GossipSub ref object. if pubsubTopic == "": ## Return all the connected peers - var numConnPeers = 0 + var peerIds = newSeq[PeerId]() for k, v in w.gossipsub: - numConnPeers.inc(v.len) - return ok(numConnPeers) + peerIds.add(toSeq(v).mapIt(it.peerId)) + # alternatively: peerIds &= toSeq(v).mapIt(it.peerId) + return ok(peerIds) if not w.gossipsub.hasKey(pubsubTopic): return err( - "getNumConnectedPeers - there is no gossipsub peer for the given pubsub topic: " & + "getConnectedPeers - there is no gossipsub peer for the given pubsub topic: " & pubsubTopic ) @@ -562,8 +563,22 @@ proc getNumConnectedPeers*( w.gossipsub[pubsubTopic] let peers: HashSet[PubSubPeer] = peersRes.valueOr: + return + err("getConnectedPeers - exception accessing " & pubsubTopic & ": " & error.msg) + + let peerIds = toSeq(peers).mapIt(it.peerId) + return ok(peerIds) + +proc getNumConnectedPeers*( + w: WakuRelay, pubsubTopic: PubsubTopic +): Result[int, string] = + ## Returns the number of connected peers and subscribed to the passed pubsub topic. + + ## Return all the connected peers + let peers = w.getConnectedPeers(pubsubTopic).valueOr: return err( - "getNumConnectedPeers - exception accessing " & pubsubTopic & ": " & error.msg + "getNumConnectedPeers - failed retrieving peers in mesh: " & pubsubTopic & ": " & + error ) return ok(peers.len) From 6d3c758540a77a2558c7debaeb9da47d7e59d8ab Mon Sep 17 00:00:00 2001 From: gabrielmer <101006718+gabrielmer@users.noreply.github.com> Date: Thu, 3 Apr 2025 15:13:10 +0300 Subject: [PATCH 06/48] feat: waku_relay_get_peers_in_mesh to libwaku (#3352) --- library/libwaku.h | 5 ++++ library/libwaku.nim | 21 +++++++++++++++ .../requests/protocols/relay_request.nim | 11 ++++++-- waku/waku_relay/protocol.nim | 26 ++++++++++++++----- 4 files changed, 55 insertions(+), 8 deletions(-) diff --git a/library/libwaku.h b/library/libwaku.h index bb6fa20cf..d49a40076 100644 --- a/library/libwaku.h +++ b/library/libwaku.h @@ -127,6 +127,11 @@ int waku_relay_get_num_peers_in_mesh(void* ctx, WakuCallBack callback, void* userData); +int waku_relay_get_peers_in_mesh(void* ctx, + const char* pubSubTopic, + WakuCallBack callback, + void* userData); + int waku_store_query(void* ctx, const char* jsonQuery, const char* peerAddr, diff --git a/library/libwaku.nim b/library/libwaku.nim index d1ea0d082..f7c14c061 100644 --- a/library/libwaku.nim +++ b/library/libwaku.nim @@ -471,6 +471,27 @@ proc waku_relay_get_num_peers_in_mesh( initializeLibrary() checkLibwakuParams(ctx, callback, userData) + let pst = pubSubTopic.alloc() + defer: + deallocShared(pst) + + handleRequest( + ctx, + RequestType.RELAY, + RelayRequest.createShared(RelayMsgType.NUM_MESH_PEERS, pst), + callback, + userData, + ) + +proc waku_relay_get_peers_in_mesh( + ctx: ptr WakuContext, + pubSubTopic: cstring, + callback: WakuCallBack, + userData: pointer, +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + let pst = pubSubTopic.alloc() defer: deallocShared(pst) diff --git a/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim b/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim index 3b5059972..97f01488a 100644 --- a/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim +++ b/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim @@ -18,6 +18,7 @@ type RelayMsgType* = enum NUM_CONNECTED_PEERS LIST_CONNECTED_PEERS ## to return the list of all connected peers to an specific pubsub topic + NUM_MESH_PEERS LIST_MESH_PEERS ## to return the list of only the peers that conform the mesh for a particular pubsub topic ADD_PROTECTED_SHARD ## Protects a shard with a public key @@ -135,11 +136,17 @@ proc process*( return err($error) ## returns a comma-separated string of peerIDs return ok(connPeers.mapIt($it).join(",")) - of LIST_MESH_PEERS: + of NUM_MESH_PEERS: let numPeersInMesh = waku.node.wakuRelay.getNumPeersInMesh($self.pubsubTopic).valueOr: - error "LIST_MESH_PEERS failed", error = error + error "NUM_MESH_PEERS failed", error = error return err($error) return ok($numPeersInMesh) + of LIST_MESH_PEERS: + let meshPeers = waku.node.wakuRelay.getPeersInMesh($self.pubsubTopic).valueOr: + error "LIST_MESH_PEERS failed", error = error + return err($error) + ## returns a comma-separated string of peerIDs + return ok(meshPeers.mapIt($it).join(",")) of ADD_PROTECTED_SHARD: try: let relayShard = diff --git a/waku/waku_relay/protocol.nim b/waku/waku_relay/protocol.nim index 1af5f0d4f..126ff608c 100644 --- a/waku/waku_relay/protocol.nim +++ b/waku/waku_relay/protocol.nim @@ -323,21 +323,35 @@ proc addObserver*(w: WakuRelay, observer: PubSubObserver) {.gcsafe.} = proc getDHigh*(T: type WakuRelay): int = return GossipsubParameters.dHigh -proc getNumPeersInMesh*(w: WakuRelay, pubsubTopic: PubsubTopic): Result[int, string] = - ## Returns the number of peers in a mesh defined by the passed pubsub topic. +proc getPeersInMesh*( + w: WakuRelay, pubsubTopic: PubsubTopic +): Result[seq[PeerId], string] = + ## Returns the list of peerIds in a mesh defined by the passed pubsub topic. ## The 'mesh' atribute is defined in the GossipSub ref object. if not w.mesh.hasKey(pubsubTopic): - debug "getNumPeersInMesh - there is no mesh peer for the given pubsub topic", + debug "getPeersInMesh - there is no mesh peer for the given pubsub topic", pubsubTopic = pubsubTopic - return ok(0) + return ok(newSeq[PeerId]()) let peersRes = catch: w.mesh[pubsubTopic] let peers: HashSet[PubSubPeer] = peersRes.valueOr: - return - err("getNumPeersInMesh - exception accessing " & pubsubTopic & ": " & error.msg) + return err("getPeersInMesh - exception accessing " & pubsubTopic & ": " & error.msg) + + let peerIds = toSeq(peers).mapIt(it.peerId) + + return ok(peerIds) + +proc getNumPeersInMesh*(w: WakuRelay, pubsubTopic: PubsubTopic): Result[int, string] = + ## Returns the number of peers in a mesh defined by the passed pubsub topic. + + let peers = w.getPeersInMesh(pubsubTopic).valueOr: + return err( + "getNumPeersInMesh - failed retrieving peers in mesh: " & pubsubTopic & ": " & + error + ) return ok(peers.len) From 93698a0a88fd7354fd6cf13cf56fd84155565b04 Mon Sep 17 00:00:00 2001 From: gabrielmer <101006718+gabrielmer@users.noreply.github.com> Date: Fri, 4 Apr 2025 11:52:33 +0300 Subject: [PATCH 07/48] feat: add waku_get_connected_peers_info to libwaku (#3356) --- library/libwaku.h | 4 +++ library/libwaku.nim | 14 +++++++++++ .../requests/peer_manager_request.nim | 25 ++++++++++++++++++- 3 files changed, 42 insertions(+), 1 deletion(-) diff --git a/library/libwaku.h b/library/libwaku.h index d49a40076..3c15b36f9 100644 --- a/library/libwaku.h +++ b/library/libwaku.h @@ -168,6 +168,10 @@ int waku_get_peerids_from_peerstore(void* ctx, WakuCallBack callback, void* userData); +int waku_get_connected_peers_info(void* ctx, + WakuCallBack callback, + void* userData); + int waku_get_peerids_by_protocol(void* ctx, const char* protocol, WakuCallBack callback, diff --git a/library/libwaku.nim b/library/libwaku.nim index f7c14c061..ebe730da8 100644 --- a/library/libwaku.nim +++ b/library/libwaku.nim @@ -692,6 +692,20 @@ proc waku_get_peerids_from_peerstore( userData, ) +proc waku_get_connected_peers_info( + ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.PEER_MANAGER, + PeerManagementRequest.createShared(PeerManagementMsgType.GET_CONNECTED_PEERS_INFO), + callback, + userData, + ) + proc waku_get_connected_peers( ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer ): cint {.dynlib, exportc.} = diff --git a/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim b/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim index 73b5a320d..d8a0a57af 100644 --- a/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim +++ b/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim @@ -1,5 +1,5 @@ import std/[sequtils, strutils] -import chronicles, chronos, results, options +import chronicles, chronos, results, options, json import ../../../../waku/factory/waku, ../../../../waku/node/waku_node, @@ -9,6 +9,7 @@ import type PeerManagementMsgType* {.pure.} = enum CONNECT_TO GET_ALL_PEER_IDS + GET_CONNECTED_PEERS_INFO GET_PEER_IDS_BY_PROTOCOL DISCONNECT_PEER_BY_ID DIAL_PEER @@ -22,6 +23,10 @@ type PeerManagementRequest* = object protocol: cstring peerId: cstring +type PeerInfo = object + protocols: seq[string] + addresses: seq[string] + proc createShared*( T: type PeerManagementRequest, op: PeerManagementMsgType, @@ -83,6 +88,24 @@ proc process*( let peerIDs = waku.node.peerManager.wakuPeerStore.peers().mapIt($it.peerId).join(",") return ok(peerIDs) + of GET_CONNECTED_PEERS_INFO: + ## returns a JSON string mapping peerIDs to objects with protocols and addresses + + var peersMap = initTable[string, PeerInfo]() + let peers = waku.node.peerManager.wakuPeerStore.peers().filterIt( + it.connectedness == Connected + ) + + # Build a map of peer IDs to peer info objects + for peer in peers: + let peerIdStr = $peer.peerId + peersMap[peerIdStr] = + PeerInfo(protocols: peer.protocols, addresses: peer.addrs.mapIt($it)) + + # Convert the map to JSON string + let jsonObj = %*peersMap + let jsonStr = $jsonObj + return ok(jsonStr) of GET_PEER_IDS_BY_PROTOCOL: ## returns a comma-separated string of peerIDs that mount the given protocol let connectedPeers = waku.node.peerManager.wakuPeerStore From 15a8779842381e68c1623425e93090b62f906def Mon Sep 17 00:00:00 2001 From: Ivan Folgueira Bande Date: Fri, 4 Apr 2025 11:45:29 +0200 Subject: [PATCH 08/48] inform in changelog that rln_tree needs to be removed --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index cdf87e076..4f3715a75 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,9 @@ * Update RLN references ([3287](https://github.com/waku-org/nwaku/pull/3287)) ([ea961fa](https://github.com/waku-org/nwaku/pull/3287/commits/ea961faf4ed4f8287a2043a6b5d84b660745072b)) +**Info:** before upgrading to this node, make sure you delete the previous rln_tree folder, i.e., +the one that is passed through this CLI: `--rln-relay-tree-path`. + This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): | Protocol | Spec status | Protocol id | | ---: | :---: | :--- | From 947f6364d12bf4a2c1f1c0b7162fc1cb51559610 Mon Sep 17 00:00:00 2001 From: Ivan Folgueira Bande Date: Fri, 4 Apr 2025 12:01:19 +0200 Subject: [PATCH 09/48] node -> version in a comment within changelog.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f3715a75..151392f1b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,7 @@ * Update RLN references ([3287](https://github.com/waku-org/nwaku/pull/3287)) ([ea961fa](https://github.com/waku-org/nwaku/pull/3287/commits/ea961faf4ed4f8287a2043a6b5d84b660745072b)) -**Info:** before upgrading to this node, make sure you delete the previous rln_tree folder, i.e., +**Info:** before upgrading to this version, make sure you delete the previous rln_tree folder, i.e., the one that is passed through this CLI: `--rln-relay-tree-path`. This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): From b1344bb3b1ef4108f83cd2659c301003841ad0c2 Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Fri, 4 Apr 2025 19:19:38 +0200 Subject: [PATCH 10/48] chore: better keystore management (#3358) --- waku/waku_keystore/keystore.nim | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/waku/waku_keystore/keystore.nim b/waku/waku_keystore/keystore.nim index 9741761ff..6cc4ef701 100644 --- a/waku/waku_keystore/keystore.nim +++ b/waku/waku_keystore/keystore.nim @@ -61,7 +61,9 @@ proc loadAppKeystore*( return err( AppKeystoreError(kind: KeystoreOsError, msg: "Cannot open file for reading") ) - let fileContents = readAll(f) + + ## the next blocks expect the whole keystore.json content to be compacted in one single line + let fileContents = readAll(f).replace(" ", "").replace("\n", "") # We iterate over each substring split by separator (which we expect to correspond to a single keystore json) for keystore in fileContents.split(separator): @@ -159,8 +161,7 @@ proc loadAppKeystore*( return err( AppKeystoreError( - kind: KeystoreKeystoreDoesNotExist, - msg: "No keystore found for the passed parameters", + kind: KeystoreKeystoreDoesNotExist, msg: "The keystore file could not be parsed" ) ) From 75b8838fbfae66f7a9e4dcd56fc9ea3b6668c188 Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Mon, 7 Apr 2025 12:24:03 +0200 Subject: [PATCH 11/48] chore: retrieve protocols in new added peer from discv5 (#3354) * add new unit test to validate that any peer can be retrieved * add new discv5 test and better peer store management * wakuPeerStore -> switch.peerStore * simplify waku_peer_store, better logs and peer_manager enhancements --- .../diagnose_connections.nim | 10 +- .../service_peer_management.nim | 6 +- apps/wakucanary/wakucanary.nim | 2 +- examples/publisher.nim | 2 +- examples/subscriber.nim | 2 +- .../requests/peer_manager_request.nim | 6 +- tests/all_tests_waku.nim | 3 +- tests/node/peer_manager/peer_store/utils.nim | 3 - tests/node/test_wakunode_peer_exchange.nim | 26 +- tests/node/test_wakunode_peer_manager.nim | 34 +- tests/test_peer_manager.nim | 304 ++++++++++-------- tests/test_peer_store_extended.nim | 4 +- tests/test_waku_dnsdisc.nim | 12 +- tests/waku_discv5/test_all.nim | 1 - tests/waku_discv5/test_waku_discv5.nim | 84 ++++- waku/node/peer_manager/peer_manager.nim | 148 ++++----- waku/node/peer_manager/waku_peer_store.nim | 210 ++++++------ waku/node/waku_node.nim | 4 +- waku/waku_api/rest/admin/handlers.nim | 16 +- waku/waku_filter_v2/protocol.nim | 2 +- waku/waku_peer_exchange/protocol.nim | 2 +- 21 files changed, 485 insertions(+), 396 deletions(-) delete mode 100644 tests/waku_discv5/test_all.nim diff --git a/apps/liteprotocoltester/diagnose_connections.nim b/apps/liteprotocoltester/diagnose_connections.nim index 788f83c68..a4007d59c 100644 --- a/apps/liteprotocoltester/diagnose_connections.nim +++ b/apps/liteprotocoltester/diagnose_connections.nim @@ -42,7 +42,7 @@ proc `$`*(cap: Capabilities): string = proc allPeers(pm: PeerManager): string = var allStr: string = "" - for idx, peer in pm.wakuPeerStore.peers(): + for idx, peer in pm.switch.peerStore.peers(): allStr.add( " " & $idx & ". | " & constructMultiaddrStr(peer) & " | agent: " & peer.getAgent() & " | protos: " & $peer.protocols & " | caps: " & @@ -51,10 +51,10 @@ proc allPeers(pm: PeerManager): string = return allStr proc logSelfPeers*(pm: PeerManager) = - let selfLighpushPeers = pm.wakuPeerStore.getPeersByProtocol(WakuLightPushCodec) - let selfRelayPeers = pm.wakuPeerStore.getPeersByProtocol(WakuRelayCodec) - let selfFilterPeers = pm.wakuPeerStore.getPeersByProtocol(WakuFilterSubscribeCodec) - let selfPxPeers = pm.wakuPeerStore.getPeersByProtocol(WakuPeerExchangeCodec) + let selfLighpushPeers = pm.switch.peerStore.getPeersByProtocol(WakuLightPushCodec) + let selfRelayPeers = pm.switch.peerStore.getPeersByProtocol(WakuRelayCodec) + let selfFilterPeers = pm.switch.peerStore.getPeersByProtocol(WakuFilterSubscribeCodec) + let selfPxPeers = pm.switch.peerStore.getPeersByProtocol(WakuPeerExchangeCodec) let printable = catch: """*------------------------------------------------------------------------------------------* diff --git a/apps/liteprotocoltester/service_peer_management.nim b/apps/liteprotocoltester/service_peer_management.nim index 8fd6de973..83216ae3b 100644 --- a/apps/liteprotocoltester/service_peer_management.nim +++ b/apps/liteprotocoltester/service_peer_management.nim @@ -61,7 +61,7 @@ proc selectRandomCapablePeer*( elif codec.contains("filter"): cap = Capabilities.Filter - var supportivePeers = pm.wakuPeerStore.getPeersByCapability(cap) + var supportivePeers = pm.switch.peerStore.getPeersByCapability(cap) trace "Found supportive peers count", count = supportivePeers.len() trace "Found supportive peers", supportivePeers = $supportivePeers @@ -102,7 +102,7 @@ proc tryCallAllPxPeers*( elif codec.contains("filter"): capability = Capabilities.Filter - var supportivePeers = pm.wakuPeerStore.getPeersByCapability(capability) + var supportivePeers = pm.switch.peerStore.getPeersByCapability(capability) lpt_px_peers.set(supportivePeers.len) debug "Found supportive peers count", count = supportivePeers.len() @@ -215,7 +215,7 @@ proc selectRandomServicePeer*( if actualPeer.isSome(): alreadyUsedServicePeers.add(actualPeer.get()) - let supportivePeers = pm.wakuPeerStore.getPeersByProtocol(codec).filterIt( + let supportivePeers = pm.switch.peerStore.getPeersByProtocol(codec).filterIt( it notin alreadyUsedServicePeers ) if supportivePeers.len == 0: diff --git a/apps/wakucanary/wakucanary.nim b/apps/wakucanary/wakucanary.nim index 914d76e70..ea5220248 100644 --- a/apps/wakucanary/wakucanary.nim +++ b/apps/wakucanary/wakucanary.nim @@ -246,7 +246,7 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} = return 1 let lp2pPeerStore = node.switch.peerStore - let conStatus = node.peerManager.wakuPeerStore[ConnectionBook][peer.peerId] + let conStatus = node.peerManager.switch.peerStore[ConnectionBook][peer.peerId] if conf.ping: discard await pingFut diff --git a/examples/publisher.nim b/examples/publisher.nim index 654f40601..5b1ca9f18 100644 --- a/examples/publisher.nim +++ b/examples/publisher.nim @@ -95,7 +95,7 @@ proc setupAndPublish(rng: ref HmacDrbgContext) {.async.} = # wait for a minimum of peers to be connected, otherwise messages wont be gossiped while true: - let numConnectedPeers = node.peerManager.wakuPeerStore[ConnectionBook].book + let numConnectedPeers = node.peerManager.switch.peerStore[ConnectionBook].book .values() .countIt(it == Connected) if numConnectedPeers >= 6: diff --git a/examples/subscriber.nim b/examples/subscriber.nim index 0dd22f469..90440aabc 100644 --- a/examples/subscriber.nim +++ b/examples/subscriber.nim @@ -93,7 +93,7 @@ proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} = # wait for a minimum of peers to be connected, otherwise messages wont be gossiped while true: - let numConnectedPeers = node.peerManager.wakuPeerStore[ConnectionBook].book + let numConnectedPeers = node.peerManager.switch.peerStore[ConnectionBook].book .values() .countIt(it == Connected) if numConnectedPeers >= 6: diff --git a/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim b/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim index d8a0a57af..1e5202891 100644 --- a/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim +++ b/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim @@ -86,13 +86,13 @@ proc process*( of GET_ALL_PEER_IDS: ## returns a comma-separated string of peerIDs let peerIDs = - waku.node.peerManager.wakuPeerStore.peers().mapIt($it.peerId).join(",") + waku.node.peerManager.switch.peerStore.peers().mapIt($it.peerId).join(",") return ok(peerIDs) of GET_CONNECTED_PEERS_INFO: ## returns a JSON string mapping peerIDs to objects with protocols and addresses var peersMap = initTable[string, PeerInfo]() - let peers = waku.node.peerManager.wakuPeerStore.peers().filterIt( + let peers = waku.node.peerManager.switch.peerStore.peers().filterIt( it.connectedness == Connected ) @@ -108,7 +108,7 @@ proc process*( return ok(jsonStr) of GET_PEER_IDS_BY_PROTOCOL: ## returns a comma-separated string of peerIDs that mount the given protocol - let connectedPeers = waku.node.peerManager.wakuPeerStore + let connectedPeers = waku.node.peerManager.switch.peerStore .peers($self[].protocol) .filterIt(it.connectedness == Connected) .mapIt($it.peerId) diff --git a/tests/all_tests_waku.nim b/tests/all_tests_waku.nim index 3e847ae86..f23f4249c 100644 --- a/tests/all_tests_waku.nim +++ b/tests/all_tests_waku.nim @@ -85,7 +85,8 @@ import ./test_waku_noise_sessions, ./test_waku_netconfig, ./test_waku_switch, - ./test_waku_rendezvous + ./test_waku_rendezvous, + ./waku_discv5/test_waku_discv5 # Waku Keystore test suite import ./test_waku_keystore_keyfile, ./test_waku_keystore diff --git a/tests/node/peer_manager/peer_store/utils.nim b/tests/node/peer_manager/peer_store/utils.nim index 1d5dc6e22..b087dc471 100644 --- a/tests/node/peer_manager/peer_store/utils.nim +++ b/tests/node/peer_manager/peer_store/utils.nim @@ -7,6 +7,3 @@ import proc newTestWakuPeerStorage*(path: Option[string] = string.none()): WakuPeerStorage = let db = newSqliteDatabase(path) WakuPeerStorage.new(db).value() - -proc peerExists*(peerStore: PeerStore, peerId: PeerId): bool = - return peerStore[AddressBook].contains(peerId) diff --git a/tests/node/test_wakunode_peer_exchange.nim b/tests/node/test_wakunode_peer_exchange.nim index edb262b0e..afd808a2c 100644 --- a/tests/node/test_wakunode_peer_exchange.nim +++ b/tests/node/test_wakunode_peer_exchange.nim @@ -83,7 +83,7 @@ suite "Waku Peer Exchange": # Then no peers are fetched check: - node.peerManager.wakuPeerStore.peers.len == 0 + node.peerManager.switch.peerStore.peers.len == 0 res.error.status_code == SERVICE_UNAVAILABLE res.error.status_desc == some("PeerExchange is not mounted") @@ -98,12 +98,12 @@ suite "Waku Peer Exchange": res.error.status_desc == some("peer_not_found_failure") # Then no peers are fetched - check node.peerManager.wakuPeerStore.peers.len == 0 + check node.peerManager.switch.peerStore.peers.len == 0 asyncTest "Node succesfully exchanges px peers with faked discv5": # Given both nodes mount peer exchange await allFutures([node.mountPeerExchange(), node2.mountPeerExchange()]) - check node.peerManager.wakuPeerStore.peers.len == 0 + check node.peerManager.switch.peerStore.peers.len == 0 # Mock that we discovered a node (to avoid running discv5) var enr = enr.Record() @@ -124,8 +124,8 @@ suite "Waku Peer Exchange": # Check that the peer ended up in the peerstore let rpInfo = enr.toRemotePeerInfo.get() check: - node.peerManager.wakuPeerStore.peers.anyIt(it.peerId == rpInfo.peerId) - node.peerManager.wakuPeerStore.peers.anyIt(it.addrs == rpInfo.addrs) + node.peerManager.switch.peerStore.peers.anyIt(it.peerId == rpInfo.peerId) + node.peerManager.switch.peerStore.peers.anyIt(it.addrs == rpInfo.addrs) suite "setPeerExchangePeer": var node2 {.threadvar.}: WakuNode @@ -142,7 +142,7 @@ suite "Waku Peer Exchange": asyncTest "peer set successfully": # Given a node with peer exchange mounted await node.mountPeerExchange() - let initialPeers = node.peerManager.wakuPeerStore.peers.len + let initialPeers = node.peerManager.switch.peerStore.peers.len # And a valid peer info let remotePeerInfo2 = node2.peerInfo.toRemotePeerInfo() @@ -152,12 +152,12 @@ suite "Waku Peer Exchange": # Then the peer is added to the peer store check: - node.peerManager.wakuPeerStore.peers.len == (initialPeers + 1) + node.peerManager.switch.peerStore.peers.len == (initialPeers + 1) asyncTest "peer exchange not mounted": # Given a node without peer exchange mounted check node.wakuPeerExchange == nil - let initialPeers = node.peerManager.wakuPeerStore.peers.len + let initialPeers = node.peerManager.switch.peerStore.peers.len # And a valid peer info let invalidMultiAddress = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() @@ -167,12 +167,12 @@ suite "Waku Peer Exchange": # Then no peer is added to the peer store check: - node.peerManager.wakuPeerStore.peers.len == initialPeers + node.peerManager.switch.peerStore.peers.len == initialPeers asyncTest "peer info parse error": # Given a node with peer exchange mounted await node.mountPeerExchange() - let initialPeers = node.peerManager.wakuPeerStore.peers.len + let initialPeers = node.peerManager.switch.peerStore.peers.len # And given a peer info with an invalid peer id var remotePeerInfo2 = node2.peerInfo.toRemotePeerInfo() @@ -183,7 +183,7 @@ suite "Waku Peer Exchange": # Then no peer is added to the peer store check: - node.peerManager.wakuPeerStore.peers.len == initialPeers + node.peerManager.switch.peerStore.peers.len == initialPeers suite "Waku Peer Exchange with discv5": asyncTest "Node successfully exchanges px peers with real discv5": @@ -286,13 +286,13 @@ suite "Waku Peer Exchange with discv5": let requestPeers = 1 - currentPeers = node3.peerManager.wakuPeerStore.peers.len + currentPeers = node3.peerManager.switch.peerStore.peers.len let res = await node3.fetchPeerExchangePeers(1) check res.tryGet() == 1 # Then node3 has received 1 peer from node1 check: - node3.peerManager.wakuPeerStore.peers.len == currentPeers + requestPeers + node3.peerManager.switch.peerStore.peers.len == currentPeers + requestPeers await allFutures( [node1.stop(), node2.stop(), node3.stop(), disc1.stop(), disc2.stop()] diff --git a/tests/node/test_wakunode_peer_manager.nim b/tests/node/test_wakunode_peer_manager.nim index 0fd80271b..6b8fb2fa6 100644 --- a/tests/node/test_wakunode_peer_manager.nim +++ b/tests/node/test_wakunode_peer_manager.nim @@ -45,9 +45,9 @@ suite "Peer Manager": var server {.threadvar.}: WakuNode - serverPeerStore {.threadvar.}: WakuPeerStore + serverPeerStore {.threadvar.}: PeerStore client {.threadvar.}: WakuNode - clientPeerStore {.threadvar.}: WakuPeerStore + clientPeerStore {.threadvar.}: PeerStore var serverRemotePeerInfo {.threadvar.}: RemotePeerInfo @@ -64,9 +64,9 @@ suite "Peer Manager": clientKey = generateSecp256k1Key() server = newTestWakuNode(serverKey, listenIp, Port(3000)) - serverPeerStore = server.peerManager.wakuPeerStore + serverPeerStore = server.peerManager.switch.peerStore client = newTestWakuNode(clientKey, listenIp, Port(3001)) - clientPeerStore = client.peerManager.wakuPeerStore + clientPeerStore = client.peerManager.switch.peerStore await allFutures(server.start(), client.start()) @@ -140,7 +140,7 @@ suite "Peer Manager": clientPeerStore.peers().len == 1 # Given the server is marked as CannotConnect - client.peerManager.wakuPeerStore[ConnectionBook].book[serverPeerId] = + client.peerManager.switch.peerStore[ConnectionBook].book[serverPeerId] = CannotConnect # When pruning the client's store @@ -177,7 +177,7 @@ suite "Peer Manager": clientPeerStore.peers().len == 1 # Given the server is marked as having 1 failed connection - client.peerManager.wakuPeerStore[NumberFailedConnBook].book[serverPeerId] = 1 + client.peerManager.switch.peerStore[NumberFailedConnBook].book[serverPeerId] = 1 # When pruning the client's store client.peerManager.prunePeerStore() @@ -196,7 +196,7 @@ suite "Peer Manager": clientPeerStore.peers().len == 1 # Given the server is marked as not connected - client.peerManager.wakuPeerStore[ConnectionBook].book[serverPeerId] = + client.peerManager.switch.peerStore[ConnectionBook].book[serverPeerId] = CannotConnect # When pruning the client's store @@ -220,7 +220,7 @@ suite "Peer Manager": # Given the server is marked as not connected # (There's only one shard in the ENR so avg shards will be the same as the shard count; hence it will be purged.) - client.peerManager.wakuPeerStore[ConnectionBook].book[serverPeerId] = + client.peerManager.switch.peerStore[ConnectionBook].book[serverPeerId] = CannotConnect # When pruning the client's store @@ -714,8 +714,8 @@ suite "Persistence Check": client = newTestWakuNode( clientKey, listenIp, listenPort, peerStorage = clientPeerStorage ) - serverPeerStore = server.peerManager.wakuPeerStore - clientPeerStore = client.peerManager.wakuPeerStore + serverPeerStore = server.peerManager.switch.peerStore + clientPeerStore = client.peerManager.switch.peerStore await allFutures(server.start(), client.start()) @@ -731,7 +731,7 @@ suite "Persistence Check": newClient = newTestWakuNode( clientKey, listenIp, listenPort, peerStorage = newClientPeerStorage ) - newClientPeerStore = newClient.peerManager.wakuPeerStore + newClientPeerStore = newClient.peerManager.switch.peerStore await newClient.start() @@ -756,8 +756,8 @@ suite "Persistence Check": client = newTestWakuNode( clientKey, listenIp, listenPort, peerStorage = clientPeerStorage ) - serverPeerStore = server.peerManager.wakuPeerStore - clientPeerStore = client.peerManager.wakuPeerStore + serverPeerStore = server.peerManager.switch.peerStore + clientPeerStore = client.peerManager.switch.peerStore await allFutures(server.start(), client.start()) @@ -776,8 +776,8 @@ suite "Persistence Check": clientKey = generateSecp256k1Key() server = newTestWakuNode(serverKey, listenIp, listenPort) client = newTestWakuNode(clientKey, listenIp, listenPort) - serverPeerStore = server.peerManager.wakuPeerStore - clientPeerStore = client.peerManager.wakuPeerStore + serverPeerStore = server.peerManager.switch.peerStore + clientPeerStore = client.peerManager.switch.peerStore await allFutures(server.start(), client.start()) @@ -792,13 +792,13 @@ suite "Mount Order": var client {.threadvar.}: WakuNode clientRemotePeerInfo {.threadvar.}: RemotePeerInfo - clientPeerStore {.threadvar.}: WakuPeerStore + clientPeerStore {.threadvar.}: PeerStore asyncSetup: let clientKey = generateSecp256k1Key() client = newTestWakuNode(clientKey, listenIp, listenPort) - clientPeerStore = client.peerManager.wakuPeerStore + clientPeerStore = client.peerManager.switch.peerStore await client.start() diff --git a/tests/test_peer_manager.nim b/tests/test_peer_manager.nim index 4fd148b81..4ca08e46f 100644 --- a/tests/test_peer_manager.nim +++ b/tests/test_peer_manager.nim @@ -50,10 +50,10 @@ procSuite "Peer Manager": check: connOk == true - nodes[0].peerManager.wakuPeerStore.peers().anyIt( + nodes[0].peerManager.switch.peerStore.peers().anyIt( it.peerId == nodes[1].peerInfo.peerId ) - nodes[0].peerManager.wakuPeerStore.connectedness(nodes[1].peerInfo.peerId) == + nodes[0].peerManager.switch.peerStore.connectedness(nodes[1].peerInfo.peerId) == Connectedness.Connected asyncTest "dialPeer() works": @@ -80,13 +80,13 @@ procSuite "Peer Manager": # Check that node2 is being managed in node1 check: - nodes[0].peerManager.wakuPeerStore.peers().anyIt( + nodes[0].peerManager.switch.peerStore.peers().anyIt( it.peerId == nodes[1].peerInfo.peerId ) # Check connectedness check: - nodes[0].peerManager.wakuPeerStore.connectedness(nodes[1].peerInfo.peerId) == + nodes[0].peerManager.switch.peerStore.connectedness(nodes[1].peerInfo.peerId) == Connectedness.Connected await allFutures(nodes.mapIt(it.stop())) @@ -141,12 +141,12 @@ procSuite "Peer Manager": # Check peers were successfully added to peer manager check: - node.peerManager.wakuPeerStore.peers().len == 2 - node.peerManager.wakuPeerStore.peers(WakuFilterSubscribeCodec).allIt( + node.peerManager.switch.peerStore.peers().len == 2 + node.peerManager.switch.peerStore.peers(WakuFilterSubscribeCodec).allIt( it.peerId == filterPeer.peerId and it.addrs.contains(filterLoc) and it.protocols.contains(WakuFilterSubscribeCodec) ) - node.peerManager.wakuPeerStore.peers(WakuStoreCodec).allIt( + node.peerManager.switch.peerStore.peers(WakuStoreCodec).allIt( it.peerId == storePeer.peerId and it.addrs.contains(storeLoc) and it.protocols.contains(WakuStoreCodec) ) @@ -166,7 +166,7 @@ procSuite "Peer Manager": nodes[0].peerManager.addPeer(nodes[1].peerInfo.toRemotePeerInfo()) check: # No information about node2's connectedness - nodes[0].peerManager.wakuPeerStore.connectedness(nodes[1].peerInfo.peerId) == + nodes[0].peerManager.switch.peerStore.connectedness(nodes[1].peerInfo.peerId) == NotConnected # Failed connection @@ -183,7 +183,7 @@ procSuite "Peer Manager": check: # Cannot connect to node2 - nodes[0].peerManager.wakuPeerStore.connectedness(nonExistentPeer.peerId) == + nodes[0].peerManager.switch.peerStore.connectedness(nonExistentPeer.peerId) == CannotConnect # Successful connection @@ -194,14 +194,14 @@ procSuite "Peer Manager": check: # Currently connected to node2 - nodes[0].peerManager.wakuPeerStore.connectedness(nodes[1].peerInfo.peerId) == + nodes[0].peerManager.switch.peerStore.connectedness(nodes[1].peerInfo.peerId) == Connected # Stop node. Gracefully disconnect from all peers. await nodes[0].stop() check: # Not currently connected to node2, but had recent, successful connection. - nodes[0].peerManager.wakuPeerStore.connectedness(nodes[1].peerInfo.peerId) == + nodes[0].peerManager.switch.peerStore.connectedness(nodes[1].peerInfo.peerId) == CanConnect await nodes[1].stop() @@ -232,12 +232,13 @@ procSuite "Peer Manager": let conn1Ok = await nodes[0].peerManager.connectPeer(nonExistentPeer) check: # Cannot connect to node2 - nodes[0].peerManager.wakuPeerStore.connectedness(nonExistentPeer.peerId) == + nodes[0].peerManager.switch.peerStore.connectedness(nonExistentPeer.peerId) == CannotConnect - nodes[0].peerManager.wakuPeerStore[ConnectionBook][nonExistentPeer.peerId] == + nodes[0].peerManager.switch.peerStore[ConnectionBook][nonExistentPeer.peerId] == CannotConnect - nodes[0].peerManager.wakuPeerStore[NumberFailedConnBook][nonExistentPeer.peerId] == - 1 + nodes[0].peerManager.switch.peerStore[NumberFailedConnBook][ + nonExistentPeer.peerId + ] == 1 # Connection attempt failed conn1Ok == false @@ -253,14 +254,17 @@ procSuite "Peer Manager": nodes[0].peerManager.canBeConnected(nodes[1].peerInfo.peerId) == true # After a successful connection, the number of failed connections is reset - nodes[0].peerManager.wakuPeerStore[NumberFailedConnBook][nodes[1].peerInfo.peerId] = - 4 + + nodes[0].peerManager.switch.peerStore[NumberFailedConnBook][ + nodes[1].peerInfo.peerId + ] = 4 let conn2Ok = await nodes[0].peerManager.connectPeer(nodes[1].peerInfo.toRemotePeerInfo()) check: conn2Ok == true - nodes[0].peerManager.wakuPeerStore[NumberFailedConnBook][nodes[1].peerInfo.peerId] == - 0 + nodes[0].peerManager.switch.peerStore[NumberFailedConnBook][ + nodes[1].peerInfo.peerId + ] == 0 await allFutures(nodes.mapIt(it.stop())) @@ -290,7 +294,7 @@ procSuite "Peer Manager": assert is12Connected == true, "Node 1 and 2 not connected" check: - node1.peerManager.wakuPeerStore[AddressBook][remotePeerInfo2.peerId] == + node1.peerManager.switch.peerStore[AddressBook][remotePeerInfo2.peerId] == remotePeerInfo2.addrs # wait for the peer store update @@ -298,9 +302,9 @@ procSuite "Peer Manager": check: # Currently connected to node2 - node1.peerManager.wakuPeerStore.peers().len == 1 - node1.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId) - node1.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == Connected + node1.peerManager.switch.peerStore.peers().len == 1 + node1.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId) + node1.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == Connected # Simulate restart by initialising a new node using the same storage let node3 = newTestWakuNode( @@ -316,9 +320,9 @@ procSuite "Peer Manager": check: # Node2 has been loaded after "restart", but we have not yet reconnected - node3.peerManager.wakuPeerStore.peers().len == 1 - node3.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId) - node3.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == NotConnected + node3.peerManager.switch.peerStore.peers().len == 1 + node3.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId) + node3.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == NotConnected await node3.mountRelay() @@ -328,9 +332,9 @@ procSuite "Peer Manager": check: # Reconnected to node2 after "restart" - node3.peerManager.wakuPeerStore.peers().len == 1 - node3.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId) - node3.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == Connected + node3.peerManager.switch.peerStore.peers().len == 1 + node3.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId) + node3.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == Connected await allFutures([node1.stop(), node2.stop(), node3.stop()]) @@ -360,7 +364,7 @@ procSuite "Peer Manager": assert is12Connected == true, "Node 1 and 2 not connected" check: - node1.peerManager.wakuPeerStore[AddressBook][remotePeerInfo2.peerId] == + node1.peerManager.switch.peerStore[AddressBook][remotePeerInfo2.peerId] == remotePeerInfo2.addrs # wait for the peer store update @@ -368,9 +372,9 @@ procSuite "Peer Manager": check: # Currently connected to node2 - node1.peerManager.wakuPeerStore.peers().len == 1 - node1.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId) - node1.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == Connected + node1.peerManager.switch.peerStore.peers().len == 1 + node1.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId) + node1.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == Connected # Simulate restart by initialising a new node using the same storage let node3 = newTestWakuNode( @@ -386,9 +390,9 @@ procSuite "Peer Manager": check: # Node2 has been loaded after "restart", but we have not yet reconnected - node3.peerManager.wakuPeerStore.peers().len == 1 - node3.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId) - node3.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == NotConnected + node3.peerManager.switch.peerStore.peers().len == 1 + node3.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId) + node3.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == NotConnected await node3.mountRelay() @@ -398,9 +402,9 @@ procSuite "Peer Manager": check: # Reconnected to node2 after "restart" - node3.peerManager.wakuPeerStore.peers().len == 1 - node3.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId) - node3.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == Connected + node3.peerManager.switch.peerStore.peers().len == 1 + node3.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId) + node3.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == Connected await allFutures([node1.stop(), node2.stop(), node3.stop()]) @@ -488,12 +492,12 @@ procSuite "Peer Manager": (await node1.peerManager.connectPeer(peerInfo2.toRemotePeerInfo())) == true check: # Currently connected to node2 - node1.peerManager.wakuPeerStore.peers().len == 1 - node1.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId) - node1.peerManager.wakuPeerStore.peers().anyIt( + node1.peerManager.switch.peerStore.peers().len == 1 + node1.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId) + node1.peerManager.switch.peerStore.peers().anyIt( it.protocols.contains(node2.wakuRelay.codec) ) - node1.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == Connected + node1.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == Connected # Simulate restart by initialising a new node using the same storage let node3 = newTestWakuNode( @@ -510,20 +514,22 @@ procSuite "Peer Manager": node2.wakuRelay.codec == betaCodec node3.wakuRelay.codec == stableCodec # Node2 has been loaded after "restart", but we have not yet reconnected - node3.peerManager.wakuPeerStore.peers().len == 1 - node3.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId) - node3.peerManager.wakuPeerStore.peers().anyIt(it.protocols.contains(betaCodec)) - node3.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == NotConnected + node3.peerManager.switch.peerStore.peers().len == 1 + node3.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId) + node3.peerManager.switch.peerStore.peers().anyIt(it.protocols.contains(betaCodec)) + node3.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == NotConnected await node3.start() # This should trigger a reconnect check: # Reconnected to node2 after "restart" - node3.peerManager.wakuPeerStore.peers().len == 1 - node3.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId) - node3.peerManager.wakuPeerStore.peers().anyIt(it.protocols.contains(betaCodec)) - node3.peerManager.wakuPeerStore.peers().anyIt(it.protocols.contains(stableCodec)) - node3.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == Connected + node3.peerManager.switch.peerStore.peers().len == 1 + node3.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId) + node3.peerManager.switch.peerStore.peers().anyIt(it.protocols.contains(betaCodec)) + node3.peerManager.switch.peerStore.peers().anyIt( + it.protocols.contains(stableCodec) + ) + node3.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == Connected await allFutures([node1.stop(), node2.stop(), node3.stop()]) @@ -560,38 +566,38 @@ procSuite "Peer Manager": check: # Peerstore track all three peers - nodes[0].peerManager.wakuPeerStore.peers().len == 3 + nodes[0].peerManager.switch.peerStore.peers().len == 3 # All peer ids are correct - nodes[0].peerManager.wakuPeerStore.peers().anyIt( + nodes[0].peerManager.switch.peerStore.peers().anyIt( it.peerId == nodes[1].switch.peerInfo.peerId ) - nodes[0].peerManager.wakuPeerStore.peers().anyIt( + nodes[0].peerManager.switch.peerStore.peers().anyIt( it.peerId == nodes[2].switch.peerInfo.peerId ) - nodes[0].peerManager.wakuPeerStore.peers().anyIt( + nodes[0].peerManager.switch.peerStore.peers().anyIt( it.peerId == nodes[3].switch.peerInfo.peerId ) # All peers support the relay protocol - nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains( + nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains( WakuRelayCodec ) - nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains( + nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains( WakuRelayCodec ) - nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains( + nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains( WakuRelayCodec ) # All peers are connected - nodes[0].peerManager.wakuPeerStore[ConnectionBook][ + nodes[0].peerManager.switch.peerStore[ConnectionBook][ nodes[1].switch.peerInfo.peerId ] == Connected - nodes[0].peerManager.wakuPeerStore[ConnectionBook][ + nodes[0].peerManager.switch.peerStore[ConnectionBook][ nodes[2].switch.peerInfo.peerId ] == Connected - nodes[0].peerManager.wakuPeerStore[ConnectionBook][ + nodes[0].peerManager.switch.peerStore[ConnectionBook][ nodes[3].switch.peerInfo.peerId ] == Connected @@ -630,38 +636,38 @@ procSuite "Peer Manager": check: # Peerstore track all three peers - nodes[0].peerManager.wakuPeerStore.peers().len == 3 + nodes[0].peerManager.switch.peerStore.peers().len == 3 # All peer ids are correct - nodes[0].peerManager.wakuPeerStore.peers().anyIt( + nodes[0].peerManager.switch.peerStore.peers().anyIt( it.peerId == nodes[1].switch.peerInfo.peerId ) - nodes[0].peerManager.wakuPeerStore.peers().anyIt( + nodes[0].peerManager.switch.peerStore.peers().anyIt( it.peerId == nodes[2].switch.peerInfo.peerId ) - nodes[0].peerManager.wakuPeerStore.peers().anyIt( + nodes[0].peerManager.switch.peerStore.peers().anyIt( it.peerId == nodes[3].switch.peerInfo.peerId ) # All peers support the relay protocol - nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains( + nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains( WakuRelayCodec ) - nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains( + nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains( WakuRelayCodec ) - nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains( + nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains( WakuRelayCodec ) # All peers are connected - nodes[0].peerManager.wakuPeerStore[ConnectionBook][ + nodes[0].peerManager.switch.peerStore[ConnectionBook][ nodes[1].switch.peerInfo.peerId ] == Connected - nodes[0].peerManager.wakuPeerStore[ConnectionBook][ + nodes[0].peerManager.switch.peerStore[ConnectionBook][ nodes[2].switch.peerInfo.peerId ] == Connected - nodes[0].peerManager.wakuPeerStore[ConnectionBook][ + nodes[0].peerManager.switch.peerStore[ConnectionBook][ nodes[3].switch.peerInfo.peerId ] == Connected @@ -690,66 +696,72 @@ procSuite "Peer Manager": check: # Peerstore track all three peers - nodes[0].peerManager.wakuPeerStore.peers().len == 3 + nodes[0].peerManager.switch.peerStore.peers().len == 3 # Inbound/Outbound number of peers match - nodes[0].peerManager.wakuPeerStore.getPeersByDirection(Inbound).len == 3 - nodes[0].peerManager.wakuPeerStore.getPeersByDirection(Outbound).len == 0 - nodes[1].peerManager.wakuPeerStore.getPeersByDirection(Inbound).len == 0 - nodes[1].peerManager.wakuPeerStore.getPeersByDirection(Outbound).len == 1 - nodes[2].peerManager.wakuPeerStore.getPeersByDirection(Inbound).len == 0 - nodes[2].peerManager.wakuPeerStore.getPeersByDirection(Outbound).len == 1 - nodes[3].peerManager.wakuPeerStore.getPeersByDirection(Inbound).len == 0 - nodes[3].peerManager.wakuPeerStore.getPeersByDirection(Outbound).len == 1 + nodes[0].peerManager.switch.peerStore.getPeersByDirection(Inbound).len == 3 + nodes[0].peerManager.switch.peerStore.getPeersByDirection(Outbound).len == 0 + nodes[1].peerManager.switch.peerStore.getPeersByDirection(Inbound).len == 0 + nodes[1].peerManager.switch.peerStore.getPeersByDirection(Outbound).len == 1 + nodes[2].peerManager.switch.peerStore.getPeersByDirection(Inbound).len == 0 + nodes[2].peerManager.switch.peerStore.getPeersByDirection(Outbound).len == 1 + nodes[3].peerManager.switch.peerStore.getPeersByDirection(Inbound).len == 0 + nodes[3].peerManager.switch.peerStore.getPeersByDirection(Outbound).len == 1 # All peer ids are correct - nodes[0].peerManager.wakuPeerStore.peers().anyIt( + nodes[0].peerManager.switch.peerStore.peers().anyIt( it.peerId == nodes[1].switch.peerInfo.peerId ) - nodes[0].peerManager.wakuPeerStore.peers().anyIt( + nodes[0].peerManager.switch.peerStore.peers().anyIt( it.peerId == nodes[2].switch.peerInfo.peerId ) - nodes[0].peerManager.wakuPeerStore.peers().anyIt( + nodes[0].peerManager.switch.peerStore.peers().anyIt( it.peerId == nodes[3].switch.peerInfo.peerId ) # All peers support the relay protocol - nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains( + nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains( WakuRelayCodec ) - nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains( + nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains( WakuRelayCodec ) - nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains( + nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains( WakuRelayCodec ) # All peers are connected - nodes[0].peerManager.wakuPeerStore[ConnectionBook][ + nodes[0].peerManager.switch.peerStore[ConnectionBook][ nodes[1].switch.peerInfo.peerId ] == Connected - nodes[0].peerManager.wakuPeerStore[ConnectionBook][ + nodes[0].peerManager.switch.peerStore[ConnectionBook][ nodes[2].switch.peerInfo.peerId ] == Connected - nodes[0].peerManager.wakuPeerStore[ConnectionBook][ + nodes[0].peerManager.switch.peerStore[ConnectionBook][ nodes[3].switch.peerInfo.peerId ] == Connected # All peers are Inbound in peer 0 - nodes[0].peerManager.wakuPeerStore[DirectionBook][nodes[1].switch.peerInfo.peerId] == - Inbound - nodes[0].peerManager.wakuPeerStore[DirectionBook][nodes[2].switch.peerInfo.peerId] == - Inbound - nodes[0].peerManager.wakuPeerStore[DirectionBook][nodes[3].switch.peerInfo.peerId] == - Inbound + nodes[0].peerManager.switch.peerStore[DirectionBook][ + nodes[1].switch.peerInfo.peerId + ] == Inbound + nodes[0].peerManager.switch.peerStore[DirectionBook][ + nodes[2].switch.peerInfo.peerId + ] == Inbound + nodes[0].peerManager.switch.peerStore[DirectionBook][ + nodes[3].switch.peerInfo.peerId + ] == Inbound # All peers have an Outbound connection with peer 0 - nodes[1].peerManager.wakuPeerStore[DirectionBook][nodes[0].switch.peerInfo.peerId] == - Outbound - nodes[2].peerManager.wakuPeerStore[DirectionBook][nodes[0].switch.peerInfo.peerId] == - Outbound - nodes[3].peerManager.wakuPeerStore[DirectionBook][nodes[0].switch.peerInfo.peerId] == - Outbound + nodes[1].peerManager.switch.peerStore[DirectionBook][ + nodes[0].switch.peerInfo.peerId + ] == Outbound + nodes[2].peerManager.switch.peerStore[DirectionBook][ + nodes[0].switch.peerInfo.peerId + ] == Outbound + nodes[3].peerManager.switch.peerStore[DirectionBook][ + nodes[0].switch.peerInfo.peerId + ] == Outbound await allFutures(nodes.mapIt(it.stop())) @@ -778,12 +790,13 @@ procSuite "Peer Manager": # all peers are stored in the peerstore check: - node.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peers[0].peerId) - node.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peers[1].peerId) - node.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peers[2].peerId) + node.peerManager.switch.peerStore.peers().anyIt(it.peerId == peers[0].peerId) + node.peerManager.switch.peerStore.peers().anyIt(it.peerId == peers[1].peerId) + node.peerManager.switch.peerStore.peers().anyIt(it.peerId == peers[2].peerId) # but the relay peer is not - node.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peers[3].peerId) == false + node.peerManager.switch.peerStore.peers().anyIt(it.peerId == peers[3].peerId) == + false # all service peers are added to its service slot check: @@ -900,8 +913,8 @@ procSuite "Peer Manager": peers.len == 3 # Add a peer[0] to the peerstore - pm.wakuPeerStore[AddressBook][peers[0].peerId] = peers[0].addrs - pm.wakuPeerStore[ProtoBook][peers[0].peerId] = + pm.switch.peerStore[AddressBook][peers[0].peerId] = peers[0].addrs + pm.switch.peerStore[ProtoBook][peers[0].peerId] = @[WakuRelayCodec, WakuStoreCodec, WakuFilterSubscribeCodec] # When no service peers, we get one from the peerstore @@ -979,44 +992,44 @@ procSuite "Peer Manager": # Check that we have 30 peers in the peerstore check: - pm.wakuPeerStore.peers.len == 30 + pm.switch.peerStore.peers.len == 30 # fake that some peers failed to connected - pm.wakuPeerStore[NumberFailedConnBook][peers[0].peerId] = 2 - pm.wakuPeerStore[NumberFailedConnBook][peers[1].peerId] = 2 - pm.wakuPeerStore[NumberFailedConnBook][peers[2].peerId] = 2 - pm.wakuPeerStore[NumberFailedConnBook][peers[3].peerId] = 2 - pm.wakuPeerStore[NumberFailedConnBook][peers[4].peerId] = 2 + pm.switch.peerStore[NumberFailedConnBook][peers[0].peerId] = 2 + pm.switch.peerStore[NumberFailedConnBook][peers[1].peerId] = 2 + pm.switch.peerStore[NumberFailedConnBook][peers[2].peerId] = 2 + pm.switch.peerStore[NumberFailedConnBook][peers[3].peerId] = 2 + pm.switch.peerStore[NumberFailedConnBook][peers[4].peerId] = 2 # fake that some peers are connected - pm.wakuPeerStore[ConnectionBook][peers[5].peerId] = Connected - pm.wakuPeerStore[ConnectionBook][peers[8].peerId] = Connected - pm.wakuPeerStore[ConnectionBook][peers[15].peerId] = Connected - pm.wakuPeerStore[ConnectionBook][peers[18].peerId] = Connected - pm.wakuPeerStore[ConnectionBook][peers[24].peerId] = Connected - pm.wakuPeerStore[ConnectionBook][peers[29].peerId] = Connected + pm.switch.peerStore[ConnectionBook][peers[5].peerId] = Connected + pm.switch.peerStore[ConnectionBook][peers[8].peerId] = Connected + pm.switch.peerStore[ConnectionBook][peers[15].peerId] = Connected + pm.switch.peerStore[ConnectionBook][peers[18].peerId] = Connected + pm.switch.peerStore[ConnectionBook][peers[24].peerId] = Connected + pm.switch.peerStore[ConnectionBook][peers[29].peerId] = Connected # Prune the peerstore (current=30, target=25) pm.prunePeerStore() check: # ensure peerstore was pruned - pm.wakuPeerStore.peers.len == 25 + pm.switch.peerStore.peers.len == 25 # ensure connected peers were not pruned - pm.wakuPeerStore.peers.anyIt(it.peerId == peers[5].peerId) - pm.wakuPeerStore.peers.anyIt(it.peerId == peers[8].peerId) - pm.wakuPeerStore.peers.anyIt(it.peerId == peers[15].peerId) - pm.wakuPeerStore.peers.anyIt(it.peerId == peers[18].peerId) - pm.wakuPeerStore.peers.anyIt(it.peerId == peers[24].peerId) - pm.wakuPeerStore.peers.anyIt(it.peerId == peers[29].peerId) + pm.switch.peerStore.peers.anyIt(it.peerId == peers[5].peerId) + pm.switch.peerStore.peers.anyIt(it.peerId == peers[8].peerId) + pm.switch.peerStore.peers.anyIt(it.peerId == peers[15].peerId) + pm.switch.peerStore.peers.anyIt(it.peerId == peers[18].peerId) + pm.switch.peerStore.peers.anyIt(it.peerId == peers[24].peerId) + pm.switch.peerStore.peers.anyIt(it.peerId == peers[29].peerId) # ensure peers that failed were the first to be pruned - not pm.wakuPeerStore.peers.anyIt(it.peerId == peers[0].peerId) - not pm.wakuPeerStore.peers.anyIt(it.peerId == peers[1].peerId) - not pm.wakuPeerStore.peers.anyIt(it.peerId == peers[2].peerId) - not pm.wakuPeerStore.peers.anyIt(it.peerId == peers[3].peerId) - not pm.wakuPeerStore.peers.anyIt(it.peerId == peers[4].peerId) + not pm.switch.peerStore.peers.anyIt(it.peerId == peers[0].peerId) + not pm.switch.peerStore.peers.anyIt(it.peerId == peers[1].peerId) + not pm.switch.peerStore.peers.anyIt(it.peerId == peers[2].peerId) + not pm.switch.peerStore.peers.anyIt(it.peerId == peers[3].peerId) + not pm.switch.peerStore.peers.anyIt(it.peerId == peers[4].peerId) asyncTest "canBeConnected() returns correct value": let pm = PeerManager.new( @@ -1042,8 +1055,8 @@ procSuite "Peer Manager": pm.canBeConnected(p1) == true # peer with ONE error that just failed - pm.wakuPeerStore[NumberFailedConnBook][p1] = 1 - pm.wakuPeerStore[LastFailedConnBook][p1] = Moment.init(getTime().toUnix, Second) + pm.switch.peerStore[NumberFailedConnBook][p1] = 1 + pm.switch.peerStore[LastFailedConnBook][p1] = Moment.init(getTime().toUnix, Second) # we cant connect right now check: pm.canBeConnected(p1) == false @@ -1054,8 +1067,8 @@ procSuite "Peer Manager": pm.canBeConnected(p1) == true # peer with TWO errors, we can connect until 2 seconds have passed - pm.wakuPeerStore[NumberFailedConnBook][p1] = 2 - pm.wakuPeerStore[LastFailedConnBook][p1] = Moment.init(getTime().toUnix, Second) + pm.switch.peerStore[NumberFailedConnBook][p1] = 2 + pm.switch.peerStore[LastFailedConnBook][p1] = Moment.init(getTime().toUnix, Second) # cant be connected after 1 second await sleepAsync(chronos.milliseconds(1000)) @@ -1152,6 +1165,23 @@ procSuite "Peer Manager": check: nodes[0].peerManager.ipTable["127.0.0.1"].len == 1 nodes[0].peerManager.switch.connManager.getConnections().len == 1 - nodes[0].peerManager.wakuPeerStore.peers().len == 1 + nodes[0].peerManager.switch.peerStore.peers().len == 1 await allFutures(nodes.mapIt(it.stop())) + + asyncTest "Retrieve peer that mounted peer exchange": + let + node1 = newTestWakuNode(generateSecp256k1Key(), getPrimaryIPAddr(), Port(55048)) + node2 = newTestWakuNode(generateSecp256k1Key(), getPrimaryIPAddr(), Port(55023)) + + await allFutures(node1.start(), node2.start()) + await allFutures(node1.mountRelay(), node2.mountRelay()) + await allFutures(node1.mountPeerExchange(), node2.mountPeerExchange()) + + await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) + + var r = node1.peerManager.selectPeer(WakuRelayCodec) + assert r.isSome(), "could not retrieve peer mounting WakuRelayCodec" + + r = node1.peerManager.selectPeer(WakuPeerExchangeCodec) + assert r.isSome(), "could not retrieve peer mounting WakuPeerExchangeCodec" diff --git a/tests/test_peer_store_extended.nim b/tests/test_peer_store_extended.nim index ef03fc69a..aa5947181 100644 --- a/tests/test_peer_store_extended.nim +++ b/tests/test_peer_store_extended.nim @@ -25,7 +25,7 @@ suite "Extended nim-libp2p Peer Store": setup: # Setup a nim-libp2p peerstore with some peers - let peerStore = WakuPeerStore.new(nil, capacity = 50) + let peerStore = PeerStore.new(nil, capacity = 50) var p1, p2, p3, p4, p5, p6: PeerId # create five peers basePeerId + [1-5] @@ -320,7 +320,7 @@ suite "Extended nim-libp2p Peer Store": test "del() successfully deletes waku custom books": # Given - let peerStore = WakuPeerStore.new(nil, capacity = 5) + let peerStore = PeerStore.new(nil, capacity = 5) var p1: PeerId require p1.init("QmeuZJbXrszW2jdT7GdduSjQskPU3S7vvGWKtKgDfkDvW1") diff --git a/tests/test_waku_dnsdisc.nim b/tests/test_waku_dnsdisc.nim index 228fa5542..cf0fd4007 100644 --- a/tests/test_waku_dnsdisc.nim +++ b/tests/test_waku_dnsdisc.nim @@ -94,20 +94,20 @@ suite "Waku DNS Discovery": check: # We have successfully connected to all discovered nodes - node4.peerManager.wakuPeerStore.peers().anyIt( + node4.peerManager.switch.peerStore.peers().anyIt( it.peerId == node1.switch.peerInfo.peerId ) - node4.peerManager.wakuPeerStore.connectedness(node1.switch.peerInfo.peerId) == + node4.peerManager.switch.peerStore.connectedness(node1.switch.peerInfo.peerId) == Connected - node4.peerManager.wakuPeerStore.peers().anyIt( + node4.peerManager.switch.peerStore.peers().anyIt( it.peerId == node2.switch.peerInfo.peerId ) - node4.peerManager.wakuPeerStore.connectedness(node2.switch.peerInfo.peerId) == + node4.peerManager.switch.peerStore.connectedness(node2.switch.peerInfo.peerId) == Connected - node4.peerManager.wakuPeerStore.peers().anyIt( + node4.peerManager.switch.peerStore.peers().anyIt( it.peerId == node3.switch.peerInfo.peerId ) - node4.peerManager.wakuPeerStore.connectedness(node3.switch.peerInfo.peerId) == + node4.peerManager.switch.peerStore.connectedness(node3.switch.peerInfo.peerId) == Connected await allFutures([node1.stop(), node2.stop(), node3.stop(), node4.stop()]) diff --git a/tests/waku_discv5/test_all.nim b/tests/waku_discv5/test_all.nim deleted file mode 100644 index a6d2c22c4..000000000 --- a/tests/waku_discv5/test_all.nim +++ /dev/null @@ -1 +0,0 @@ -import ./test_waku_discv5 diff --git a/tests/waku_discv5/test_waku_discv5.nim b/tests/waku_discv5/test_waku_discv5.nim index c4696d658..3d66136e8 100644 --- a/tests/waku_discv5/test_waku_discv5.nim +++ b/tests/waku_discv5/test_waku_discv5.nim @@ -8,13 +8,15 @@ import chronicles, testutils/unittests, libp2p/crypto/crypto as libp2p_keys, - eth/keys as eth_keys + eth/keys as eth_keys, + libp2p/crypto/secp, + libp2p/protocols/rendezvous import - waku/[waku_core/topics, waku_enr, discovery/waku_discv5, common/enr], + waku/[waku_core/topics, waku_enr, discovery/waku_discv5, waku_enr/capabilities], ../testlib/[wakucore, testasync, assertions, futures, wakunode], ../waku_enr/utils, - ./utils + ./utils as discv5_utils import eth/p2p/discoveryv5/enr as ethEnr @@ -53,7 +55,7 @@ suite "Waku Discovery v5": var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum) require builder.withWakuRelaySharding(shardsTopics).isOk() - builder.withWakuCapabilities(Relay) + builder.withWakuCapabilities(Capabilities.Relay) let recordRes = builder.build() require recordRes.isOk() @@ -73,7 +75,7 @@ suite "Waku Discovery v5": var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum) require builder.withWakuRelaySharding(shardsTopics).isOk() - builder.withWakuCapabilities(Relay) + builder.withWakuCapabilities(Capabilities.Relay) let recordRes = builder.build() require recordRes.isOk() @@ -93,7 +95,7 @@ suite "Waku Discovery v5": var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum) require builder.withWakuRelaySharding(shardsTopics).isOk() - builder.withWakuCapabilities(Relay) + builder.withWakuCapabilities(Capabilities.Relay) let recordRes = builder.build() require recordRes.isOk() @@ -187,7 +189,7 @@ suite "Waku Discovery v5": indices = indices, flags = recordFlags, ) - node = newTestDiscv5( + node = discv5_utils.newTestDiscv5( privKey = privKey, bindIp = bindIp, tcpPort = tcpPort, @@ -342,7 +344,8 @@ suite "Waku Discovery v5": let res4 = await node4.start() assertResultOk res4 - await sleepAsync(FUTURE_TIMEOUT) + ## leave some time for discv5 to act + await sleepAsync(chronos.seconds(10)) ## When let peers = await node1.findRandomPeers() @@ -407,12 +410,69 @@ suite "Waku Discovery v5": enrs.len == 0 suite "waku discv5 initialization": + asyncTest "Start waku and check discv5 discovered peers": + let myRng = crypto.newRng() + var conf = defaultTestWakuNodeConf() + + conf.nodekey = some(crypto.PrivateKey.random(Secp256k1, myRng[])[]) + conf.discv5Discovery = true + conf.discv5UdpPort = Port(9000) + + let waku0 = Waku.new(conf).valueOr: + raiseAssert error + (waitFor startWaku(addr waku0)).isOkOr: + raiseAssert error + + conf.nodekey = some(crypto.PrivateKey.random(Secp256k1, myRng[])[]) + conf.discv5BootstrapNodes = @[waku0.node.enr.toURI()] + conf.discv5Discovery = true + conf.discv5UdpPort = Port(9001) + conf.tcpPort = Port(60001) + + let waku1 = Waku.new(conf).valueOr: + raiseAssert error + (waitFor startWaku(addr waku1)).isOkOr: + raiseAssert error + + await waku1.node.mountPeerExchange() + await waku1.node.mountRendezvous() + + var conf2 = conf + conf2.discv5BootstrapNodes = @[waku1.node.enr.toURI()] + conf2.discv5Discovery = true + conf2.tcpPort = Port(60003) + conf2.discv5UdpPort = Port(9003) + conf2.nodekey = some(crypto.PrivateKey.random(Secp256k1, myRng[])[]) + + let waku2 = Waku.new(conf2).valueOr: + raiseAssert error + (waitFor startWaku(addr waku2)).isOkOr: + raiseAssert error + + # leave some time for discv5 to act + await sleepAsync(chronos.seconds(10)) + + var r = waku0.node.peerManager.selectPeer(WakuPeerExchangeCodec) + assert r.isSome(), "could not retrieve peer mounting WakuPeerExchangeCodec" + + r = waku1.node.peerManager.selectPeer(WakuRelayCodec) + assert r.isSome(), "could not retrieve peer mounting WakuRelayCodec" + + r = waku1.node.peerManager.selectPeer(WakuPeerExchangeCodec) + assert r.isNone(), "should not retrieve peer mounting WakuPeerExchangeCodec" + + r = waku2.node.peerManager.selectPeer(WakuPeerExchangeCodec) + assert r.isSome(), "could not retrieve peer mounting WakuPeerExchangeCodec" + + r = waku2.node.peerManager.selectPeer(RendezVousCodec) + assert r.isSome(), "could not retrieve peer mounting RendezVousCodec" + asyncTest "Discv5 bootstrap nodes should be added to the peer store": var conf = defaultTestWakuNodeConf() conf.discv5BootstrapNodes = @[validEnr] - let waku = Waku.init(conf).valueOr: + let waku = Waku.new(conf).valueOr: raiseAssert error discard setupDiscoveryV5( @@ -421,7 +481,7 @@ suite "Waku Discovery v5": ) check: - waku.node.peerManager.wakuPeerStore.peers().anyIt( + waku.node.peerManager.switch.peerStore.peers().anyIt( it.enr.isSome() and it.enr.get().toUri() == validEnr ) @@ -432,7 +492,7 @@ suite "Waku Discovery v5": conf.discv5BootstrapNodes = @[invalidEnr] - let waku = Waku.init(conf).valueOr: + let waku = Waku.new(conf).valueOr: raiseAssert error discard setupDiscoveryV5( @@ -441,6 +501,6 @@ suite "Waku Discovery v5": ) check: - not waku.node.peerManager.wakuPeerStore.peers().anyIt( + not waku.node.peerManager.switch.peerStore.peers().anyIt( it.enr.isSome() and it.enr.get().toUri() == invalidEnr ) diff --git a/waku/node/peer_manager/peer_manager.nim b/waku/node/peer_manager/peer_manager.nim index ba04b6b00..39baeea3e 100644 --- a/waku/node/peer_manager/peer_manager.nim +++ b/waku/node/peer_manager/peer_manager.nim @@ -79,7 +79,6 @@ type ConnectionChangeHandler* = proc( type PeerManager* = ref object of RootObj switch*: Switch - wakuPeerStore*: WakuPeerStore wakuMetadata*: WakuMetadata initialBackoffInSec*: int backoffFactor*: int @@ -138,38 +137,13 @@ proc addPeer*( trace "skipping to manage our unmanageable self" return - if pm.wakuPeerStore[AddressBook][remotePeerInfo.peerId] == remotePeerInfo.addrs and - pm.wakuPeerStore[KeyBook][remotePeerInfo.peerId] == remotePeerInfo.publicKey and - pm.wakuPeerStore[ENRBook][remotePeerInfo.peerId].raw.len > 0: - let incomingEnr = remotePeerInfo.enr.valueOr: - trace "peer already managed and incoming ENR is empty", - remote_peer_id = $remotePeerInfo.peerId - return - - if pm.wakuPeerStore[ENRBook][remotePeerInfo.peerId].raw == incomingEnr.raw or - pm.wakuPeerStore[ENRBook][remotePeerInfo.peerId].seqNum > incomingEnr.seqNum: - trace "peer already managed and ENR info is already saved", - remote_peer_id = $remotePeerInfo.peerId - return + pm.switch.peerStore.addPeer(remotePeerInfo, origin) trace "Adding peer to manager", - peerId = remotePeerInfo.peerId, addresses = remotePeerInfo.addrs + peerId = remotePeerInfo.peerId, addresses = remotePeerInfo.addrs, origin waku_total_unique_peers.inc() - pm.wakuPeerStore[AddressBook][remotePeerInfo.peerId] = remotePeerInfo.addrs - pm.wakuPeerStore[KeyBook][remotePeerInfo.peerId] = remotePeerInfo.publicKey - pm.wakuPeerStore[SourceBook][remotePeerInfo.peerId] = origin - pm.wakuPeerStore[ProtoVersionBook][remotePeerInfo.peerId] = - remotePeerInfo.protoVersion - pm.wakuPeerStore[AgentBook][remotePeerInfo.peerId] = remotePeerInfo.agent - - if remotePeerInfo.protocols.len > 0: - pm.wakuPeerStore[ProtoBook][remotePeerInfo.peerId] = remotePeerInfo.protocols - - if remotePeerInfo.enr.isSome(): - pm.wakuPeerStore[ENRBook][remotePeerInfo.peerId] = remotePeerInfo.enr.get() - # Add peer to storage. Entry will subsequently be updated with connectedness information if not pm.storage.isNil: # Reading from the db (pm.storage) is only done on startup, hence you need to connect to all saved peers. @@ -180,6 +154,9 @@ proc addPeer*( pm.storage.insertOrReplace(remotePeerInfo) +proc getPeer(pm: PeerManager, peerId: PeerId): RemotePeerInfo = + return pm.switch.peerStore.getPeer(peerId) + proc loadFromStorage(pm: PeerManager) {.gcsafe.} = ## Load peers from storage, if available @@ -202,19 +179,20 @@ proc loadFromStorage(pm: PeerManager) {.gcsafe.} = version = remotePeerInfo.protoVersion # nim-libp2p books - pm.wakuPeerStore[AddressBook][peerId] = remotePeerInfo.addrs - pm.wakuPeerStore[ProtoBook][peerId] = remotePeerInfo.protocols - pm.wakuPeerStore[KeyBook][peerId] = remotePeerInfo.publicKey - pm.wakuPeerStore[AgentBook][peerId] = remotePeerInfo.agent - pm.wakuPeerStore[ProtoVersionBook][peerId] = remotePeerInfo.protoVersion + pm.switch.peerStore[AddressBook][peerId] = remotePeerInfo.addrs + pm.switch.peerStore[ProtoBook][peerId] = remotePeerInfo.protocols + pm.switch.peerStore[KeyBook][peerId] = remotePeerInfo.publicKey + pm.switch.peerStore[AgentBook][peerId] = remotePeerInfo.agent + pm.switch.peerStore[ProtoVersionBook][peerId] = remotePeerInfo.protoVersion # custom books - pm.wakuPeerStore[ConnectionBook][peerId] = NotConnected # Reset connectedness state - pm.wakuPeerStore[DisconnectBook][peerId] = remotePeerInfo.disconnectTime - pm.wakuPeerStore[SourceBook][peerId] = remotePeerInfo.origin + pm.switch.peerStore[ConnectionBook][peerId] = NotConnected + # Reset connectedness state + pm.switch.peerStore[DisconnectBook][peerId] = remotePeerInfo.disconnectTime + pm.switch.peerStore[SourceBook][peerId] = remotePeerInfo.origin if remotePeerInfo.enr.isSome(): - pm.wakuPeerStore[ENRBook][peerId] = remotePeerInfo.enr.get() + pm.switch.peerStore[ENRBook][peerId] = remotePeerInfo.enr.get() amount.inc() @@ -228,10 +206,11 @@ proc loadFromStorage(pm: PeerManager) {.gcsafe.} = proc selectPeer*( pm: PeerManager, proto: string, shard: Option[PubsubTopic] = none(PubsubTopic) ): Option[RemotePeerInfo] = - trace "Selecting peer from peerstore", protocol = proto - # Selects the best peer for a given protocol - var peers = pm.wakuPeerStore.getPeersByProtocol(proto) + + var peers = pm.switch.peerStore.getPeersByProtocol(proto) + trace "Selecting peer from peerstore", + protocol = proto, peers, address = cast[uint](pm.switch.peerStore) if shard.isSome(): peers.keepItIf((it.enr.isSome() and it.enr.get().containsShard(shard.get()))) @@ -302,14 +281,16 @@ proc connectPeer*( ): Future[bool] {.async.} = let peerId = peer.peerId + var peerStore = pm.switch.peerStore + # Do not attempt to dial self if peerId == pm.switch.peerInfo.peerId: return false - if not pm.wakuPeerStore.peerExists(peerId): + if not peerStore.peerExists(peerId): pm.addPeer(peer) - let failedAttempts = pm.wakuPeerStore[NumberFailedConnBook][peerId] + let failedAttempts = peerStore[NumberFailedConnBook][peerId] trace "Connecting to peer", wireAddr = peer.addrs, peerId = peerId, failedAttempts = failedAttempts @@ -333,20 +314,19 @@ proc connectPeer*( waku_peers_dials.inc(labelValues = ["successful"]) waku_node_conns_initiated.inc(labelValues = [source]) - pm.wakuPeerStore[NumberFailedConnBook][peerId] = 0 + peerStore[NumberFailedConnBook][peerId] = 0 return true # Dial failed - pm.wakuPeerStore[NumberFailedConnBook][peerId] = - pm.wakuPeerStore[NumberFailedConnBook][peerId] + 1 - pm.wakuPeerStore[LastFailedConnBook][peerId] = Moment.init(getTime().toUnix, Second) - pm.wakuPeerStore[ConnectionBook][peerId] = CannotConnect + peerStore[NumberFailedConnBook][peerId] = peerStore[NumberFailedConnBook][peerId] + 1 + peerStore[LastFailedConnBook][peerId] = Moment.init(getTime().toUnix, Second) + peerStore[ConnectionBook][peerId] = CannotConnect trace "Connecting peer failed", peerId = peerId, reason = reasonFailed, - failedAttempts = pm.wakuPeerStore[NumberFailedConnBook][peerId] + failedAttempts = peerStore[NumberFailedConnBook][peerId] waku_peers_dials.inc(labelValues = [reasonFailed]) return false @@ -453,7 +433,7 @@ proc dialPeer*( # First add dialed peer info to peer store, if it does not exist yet.. # TODO: nim libp2p peerstore already adds them - if not pm.wakuPeerStore.hasPeer(remotePeerInfo.peerId, proto): + if not pm.switch.peerStore.hasPeer(remotePeerInfo.peerId, proto): trace "Adding newly dialed peer to manager", peerId = $remotePeerInfo.peerId, address = $remotePeerInfo.addrs[0], proto = proto pm.addPeer(remotePeerInfo) @@ -479,7 +459,8 @@ proc canBeConnected*(pm: PeerManager, peerId: PeerId): bool = # Returns if we can try to connect to this peer, based on past failed attempts # It uses an exponential backoff. Each connection attempt makes us # wait more before trying again. - let failedAttempts = pm.wakuPeerStore[NumberFailedConnBook][peerId] + let peerStore = pm.switch.peerStore + let failedAttempts = peerStore[NumberFailedConnBook][peerId] # if it never errored, we can try to connect if failedAttempts == 0: @@ -492,7 +473,7 @@ proc canBeConnected*(pm: PeerManager, peerId: PeerId): bool = # If it errored we wait an exponential backoff from last connection # the more failed attempts, the greater the backoff since last attempt let now = Moment.init(getTime().toUnix, Second) - let lastFailed = pm.wakuPeerStore[LastFailedConnBook][peerId] + let lastFailed = peerStore[LastFailedConnBook][peerId] let backoff = calculateBackoff(pm.initialBackoffInSec, pm.backoffFactor, failedAttempts) @@ -564,7 +545,7 @@ proc connectToRelayPeers*(pm: PeerManager) {.async.} = if outRelayPeers.len >= pm.outRelayPeersTarget: return - let notConnectedPeers = pm.wakuPeerStore.getDisconnectedPeers() + let notConnectedPeers = pm.switch.peerStore.getDisconnectedPeers() var outsideBackoffPeers = notConnectedPeers.filterIt(pm.canBeConnected(it.peerId)) @@ -593,7 +574,7 @@ proc reconnectPeers*( debug "Reconnecting peers", proto = proto # Proto is not persisted, we need to iterate over all peers. - for peerInfo in pm.wakuPeerStore.peers(protocolMatcher(proto)): + for peerInfo in pm.switch.peerStore.peers(protocolMatcher(proto)): # Check that the peer can be connected if peerInfo.connectedness == CannotConnect: error "Not reconnecting to unreachable or non-existing peer", @@ -666,7 +647,7 @@ proc onPeerMetadata(pm: PeerManager, peerId: PeerId) {.async.} = break guardClauses if ( - pm.wakuPeerStore.hasPeer(peerId, WakuRelayCodec) and + pm.switch.peerStore.hasPeer(peerId, WakuRelayCodec) and not metadata.shards.anyIt(pm.wakuMetadata.shards.contains(it)) ): let myShardsString = "[ " & toSeq(pm.wakuMetadata.shards).join(", ") & " ]" @@ -680,13 +661,14 @@ proc onPeerMetadata(pm: PeerManager, peerId: PeerId) {.async.} = info "disconnecting from peer", peerId = peerId, reason = reason asyncSpawn(pm.switch.disconnect(peerId)) - pm.wakuPeerStore.delete(peerId) + pm.switch.peerStore.delete(peerId) # called when a peer i) first connects to us ii) disconnects all connections from us proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} = if not pm.wakuMetadata.isNil() and event.kind == PeerEventKind.Joined: await pm.onPeerMetadata(peerId) + var peerStore = pm.switch.peerStore var direction: PeerDirection var connectedness: Connectedness @@ -698,7 +680,7 @@ proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} = ## Check max allowed in-relay peers let inRelayPeers = pm.connectedPeers(WakuRelayCodec)[0] if inRelayPeers.len > pm.inRelayPeersTarget and - pm.wakuPeerStore.hasPeer(peerId, WakuRelayCodec): + peerStore.hasPeer(peerId, WakuRelayCodec): debug "disconnecting relay peer because reached max num in-relay peers", peerId = peerId, inRelayPeers = inRelayPeers.len, @@ -717,7 +699,7 @@ proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} = for peerId in peersBehindIp[0 ..< (peersBehindIp.len - pm.colocationLimit)]: debug "Pruning connection due to ip colocation", peerId = peerId, ip = ip asyncSpawn(pm.switch.disconnect(peerId)) - pm.wakuPeerStore.delete(peerId) + peerStore.delete(peerId) if not pm.onConnectionChange.isNil(): # we don't want to await for the callback to finish asyncSpawn pm.onConnectionChange(peerId, Joined) @@ -738,11 +720,11 @@ proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} = of Identified: debug "event identified", peerId = peerId - pm.wakuPeerStore[ConnectionBook][peerId] = connectedness - pm.wakuPeerStore[DirectionBook][peerId] = direction + peerStore[ConnectionBook][peerId] = connectedness + peerStore[DirectionBook][peerId] = direction if not pm.storage.isNil: - var remotePeerInfo = pm.wakuPeerStore.getPeer(peerId) + var remotePeerInfo = peerStore.getPeer(peerId) if event.kind == PeerEventKind.Left: remotePeerInfo.disconnectTime = getTime().toUnix @@ -755,12 +737,12 @@ proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} = proc logAndMetrics(pm: PeerManager) {.async.} = heartbeat "Scheduling log and metrics run", LogAndMetricsInterval: + var peerStore = pm.switch.peerStore # log metrics let (inRelayPeers, outRelayPeers) = pm.connectedPeers(WakuRelayCodec) let maxConnections = pm.switch.connManager.inSema.size - let notConnectedPeers = pm.wakuPeerStore.getDisconnectedPeers().mapIt( - RemotePeerInfo.init(it.peerId, it.addrs) - ) + let notConnectedPeers = + peerStore.getDisconnectedPeers().mapIt(RemotePeerInfo.init(it.peerId, it.addrs)) let outsideBackoffPeers = notConnectedPeers.filterIt(pm.canBeConnected(it.peerId)) let totalConnections = pm.switch.connManager.getConnections().len @@ -772,7 +754,7 @@ proc logAndMetrics(pm: PeerManager) {.async.} = outsideBackoffPeers = outsideBackoffPeers.len # update prometheus metrics - for proto in pm.wakuPeerStore.getWakuProtos(): + for proto in peerStore.getWakuProtos(): let (protoConnsIn, protoConnsOut) = pm.connectedPeers(proto) let (protoStreamsIn, protoStreamsOut) = pm.getNumStreams(proto) waku_connected_peers.set( @@ -806,14 +788,16 @@ proc manageRelayPeers*(pm: PeerManager) {.async.} = let inTarget = pm.inRelayPeersTarget div pm.wakuMetadata.shards.len let outTarget = pm.outRelayPeersTarget div pm.wakuMetadata.shards.len + var peerStore = pm.switch.peerStore + for shard in pm.wakuMetadata.shards.items: # Filter out peer not on this shard let connectedInPeers = inPeers.filterIt( - pm.wakuPeerStore.hasShard(it, uint16(pm.wakuMetadata.clusterId), uint16(shard)) + peerStore.hasShard(it, uint16(pm.wakuMetadata.clusterId), uint16(shard)) ) let connectedOutPeers = outPeers.filterIt( - pm.wakuPeerStore.hasShard(it, uint16(pm.wakuMetadata.clusterId), uint16(shard)) + peerStore.hasShard(it, uint16(pm.wakuMetadata.clusterId), uint16(shard)) ) # Calculate the difference between current values and targets @@ -828,17 +812,17 @@ proc manageRelayPeers*(pm: PeerManager) {.async.} = # Get all peers for this shard var connectablePeers = - pm.wakuPeerStore.getPeersByShard(uint16(pm.wakuMetadata.clusterId), uint16(shard)) + peerStore.getPeersByShard(uint16(pm.wakuMetadata.clusterId), uint16(shard)) let shardCount = connectablePeers.len connectablePeers.keepItIf( - not pm.wakuPeerStore.isConnected(it.peerId) and pm.canBeConnected(it.peerId) + not peerStore.isConnected(it.peerId) and pm.canBeConnected(it.peerId) ) let connectableCount = connectablePeers.len - connectablePeers.keepItIf(pm.wakuPeerStore.hasCapability(it.peerId, Relay)) + connectablePeers.keepItIf(peerStore.hasCapability(it.peerId, Relay)) let relayCount = connectablePeers.len @@ -862,7 +846,7 @@ proc manageRelayPeers*(pm: PeerManager) {.async.} = if peersToConnect.len == 0: return - let uniquePeers = toSeq(peersToConnect).mapIt(pm.wakuPeerStore.getPeer(it)) + let uniquePeers = toSeq(peersToConnect).mapIt(peerStore.getPeer(it)) # Connect to all nodes for i in countup(0, uniquePeers.len, MaxParallelDials): @@ -871,8 +855,9 @@ proc manageRelayPeers*(pm: PeerManager) {.async.} = await pm.connectToNodes(uniquePeers[i ..< stop]) proc prunePeerStore*(pm: PeerManager) = - let numPeers = pm.wakuPeerStore[AddressBook].book.len - let capacity = pm.wakuPeerStore.getCapacity() + let peerStore = pm.switch.peerStore + let numPeers = peerStore[AddressBook].book.len + let capacity = peerStore.getCapacity() if numPeers <= capacity: return @@ -881,7 +866,7 @@ proc prunePeerStore*(pm: PeerManager) = var peersToPrune: HashSet[PeerId] # prune failed connections - for peerId, count in pm.wakuPeerStore[NumberFailedConnBook].book.pairs: + for peerId, count in peerStore[NumberFailedConnBook].book.pairs: if count < pm.maxFailedAttempts: continue @@ -890,7 +875,7 @@ proc prunePeerStore*(pm: PeerManager) = peersToPrune.incl(peerId) - var notConnected = pm.wakuPeerStore.getDisconnectedPeers().mapIt(it.peerId) + var notConnected = peerStore.getDisconnectedPeers().mapIt(it.peerId) # Always pick random non-connected peers shuffle(notConnected) @@ -899,11 +884,11 @@ proc prunePeerStore*(pm: PeerManager) = var peersByShard = initTable[uint16, seq[PeerId]]() for peer in notConnected: - if not pm.wakuPeerStore[ENRBook].contains(peer): + if not peerStore[ENRBook].contains(peer): shardlessPeers.add(peer) continue - let record = pm.wakuPeerStore[ENRBook][peer] + let record = peerStore[ENRBook][peer] let rec = record.toTyped().valueOr: shardlessPeers.add(peer) @@ -937,9 +922,9 @@ proc prunePeerStore*(pm: PeerManager) = peersToPrune.incl(peer) for peer in peersToPrune: - pm.wakuPeerStore.delete(peer) + peerStore.delete(peer) - let afterNumPeers = pm.wakuPeerStore[AddressBook].book.len + let afterNumPeers = peerStore[AddressBook].book.len trace "Finished pruning peer store", beforeNumPeers = numPeers, @@ -1060,7 +1045,6 @@ proc new*( let pm = PeerManager( switch: switch, wakuMetadata: wakuMetadata, - wakuPeerStore: createWakuPeerStore(switch.peerStore), storage: storage, initialBackoffInSec: initialBackoffInSec, backoffFactor: backoffFactor, @@ -1076,14 +1060,16 @@ proc new*( proc peerHook(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe.} = onPeerEvent(pm, peerId, event) + var peerStore = pm.switch.peerStore + proc peerStoreChanged(peerId: PeerId) {.gcsafe.} = - waku_peer_store_size.set(toSeq(pm.wakuPeerStore[AddressBook].book.keys).len.int64) + waku_peer_store_size.set(toSeq(peerStore[AddressBook].book.keys).len.int64) pm.switch.addPeerEventHandler(peerHook, PeerEventKind.Joined) pm.switch.addPeerEventHandler(peerHook, PeerEventKind.Left) # called every time the peerstore is updated - pm.wakuPeerStore[AddressBook].addHandler(peerStoreChanged) + peerStore[AddressBook].addHandler(peerStoreChanged) pm.serviceSlots = initTable[string, RemotePeerInfo]() pm.ipTable = initTable[string, seq[PeerId]]() diff --git a/waku/node/peer_manager/waku_peer_store.nim b/waku/node/peer_manager/waku_peer_store.nim index 027a1823f..777e4f2be 100644 --- a/waku/node/peer_manager/waku_peer_store.nim +++ b/waku/node/peer_manager/waku_peer_store.nim @@ -3,6 +3,7 @@ import std/[tables, sequtils, sets, options, strutils], chronos, + chronicles, eth/p2p/discoveryv5/enr, libp2p/builders, libp2p/peerstore @@ -11,14 +12,12 @@ import ../../waku_core, ../../waku_enr/sharding, ../../waku_enr/capabilities, - ../../common/utils/sequence + ../../common/utils/sequence, + ../../waku_core/peers export peerstore, builders type - WakuPeerStore* = ref object - peerStore: PeerStore - # Keeps track of the Connectedness state of a peer ConnectionBook* = ref object of PeerBook[Connectedness] @@ -40,137 +39,152 @@ type # Keeps track of the ENR (Ethereum Node Record) of a peer ENRBook* = ref object of PeerBook[enr.Record] -# Constructor -proc new*(T: type WakuPeerStore, identify: Identify, capacity = 1000): WakuPeerStore = - let peerStore = PeerStore.new(identify, capacity) - WakuPeerStore(peerStore: peerStore) - -proc createWakuPeerStore*(peerStore: PeerStore): WakuPeerStore = - WakuPeerStore(peerStore: peerStore) - -# Core functionality -proc `[]`*(wps: WakuPeerStore, T: typedesc): T = - wps.peerStore[T] - -proc getPeer*(wps: WakuPeerStore, peerId: PeerId): RemotePeerInfo = +proc getPeer*(peerStore: PeerStore, peerId: PeerId): RemotePeerInfo = RemotePeerInfo( peerId: peerId, - addrs: wps[AddressBook][peerId], + addrs: peerStore[AddressBook][peerId], enr: - if wps[ENRBook][peerId] != default(enr.Record): - some(wps[ENRBook][peerId]) + if peerStore[ENRBook][peerId] != default(enr.Record): + some(peerStore[ENRBook][peerId]) else: none(enr.Record), - protocols: wps[ProtoBook][peerId], - agent: wps[AgentBook][peerId], - protoVersion: wps[ProtoVersionBook][peerId], - publicKey: wps[KeyBook][peerId], - connectedness: wps[ConnectionBook][peerId], - disconnectTime: wps[DisconnectBook][peerId], - origin: wps[SourceBook][peerId], - direction: wps[DirectionBook][peerId], - lastFailedConn: wps[LastFailedConnBook][peerId], - numberFailedConn: wps[NumberFailedConnBook][peerId], + protocols: peerStore[ProtoBook][peerId], + agent: peerStore[AgentBook][peerId], + protoVersion: peerStore[ProtoVersionBook][peerId], + publicKey: peerStore[KeyBook][peerId], + connectedness: peerStore[ConnectionBook][peerId], + disconnectTime: peerStore[DisconnectBook][peerId], + origin: peerStore[SourceBook][peerId], + direction: peerStore[DirectionBook][peerId], + lastFailedConn: peerStore[LastFailedConnBook][peerId], + numberFailedConn: peerStore[NumberFailedConnBook][peerId], ) -proc addPeer*(wps: WakuPeerStore, peer: RemotePeerInfo) = - ## Only used in tests - wps[AddressBook][peer.peerId] = peer.addrs - wps[ProtoBook][peer.peerId] = peer.protocols - wps[AgentBook][peer.peerId] = peer.agent - wps[ProtoVersionBook][peer.peerId] = peer.protoVersion - wps[KeyBook][peer.peerId] = peer.publicKey - wps[ConnectionBook][peer.peerId] = peer.connectedness - wps[DisconnectBook][peer.peerId] = peer.disconnectTime - wps[SourceBook][peer.peerId] = peer.origin - wps[DirectionBook][peer.peerId] = peer.direction - wps[LastFailedConnBook][peer.peerId] = peer.lastFailedConn - wps[NumberFailedConnBook][peer.peerId] = peer.numberFailedConn - if peer.enr.isSome(): - wps[ENRBook][peer.peerId] = peer.enr.get() - -proc delete*(wps: WakuPeerStore, peerId: PeerId) = +proc delete*(peerStore: PeerStore, peerId: PeerId) = # Delete all the information of a given peer. - wps.peerStore.del(peerId) + peerStore.del(peerId) -# TODO: Rename peers() to getPeersByProtocol() -proc peers*(wps: WakuPeerStore): seq[RemotePeerInfo] = +proc peers*(peerStore: PeerStore): seq[RemotePeerInfo] = let allKeys = concat( - toSeq(wps[AddressBook].book.keys()), - toSeq(wps[ProtoBook].book.keys()), - toSeq(wps[KeyBook].book.keys()), + toSeq(peerStore[AddressBook].book.keys()), + toSeq(peerStore[ProtoBook].book.keys()), + toSeq(peerStore[KeyBook].book.keys()), ) .toHashSet() - return allKeys.mapIt(wps.getPeer(it)) + return allKeys.mapIt(peerStore.getPeer(it)) -proc peers*(wps: WakuPeerStore, proto: string): seq[RemotePeerInfo] = - wps.peers().filterIt(it.protocols.contains(proto)) +proc addPeer*(peerStore: PeerStore, peer: RemotePeerInfo, origin = UnknownOrigin) = + ## Notice that the origin parameter is used to manually override the given peer origin. + ## At the time of writing, this is used in waku_discv5 or waku_node (peer exchange.) + if peerStore[AddressBook][peer.peerId] == peer.addrs and + peerStore[KeyBook][peer.peerId] == peer.publicKey and + peerStore[ENRBook][peer.peerId].raw.len > 0: + let incomingEnr = peer.enr.valueOr: + trace "peer already managed and incoming ENR is empty", + remote_peer_id = $peer.peerId + return -proc peers*(wps: WakuPeerStore, protocolMatcher: Matcher): seq[RemotePeerInfo] = - wps.peers().filterIt(it.protocols.anyIt(protocolMatcher(it))) + if peerStore[ENRBook][peer.peerId].raw == incomingEnr.raw or + peerStore[ENRBook][peer.peerId].seqNum > incomingEnr.seqNum: + trace "peer already managed and ENR info is already saved", + remote_peer_id = $peer.peerId + return -proc connectedness*(wps: WakuPeerStore, peerId: PeerId): Connectedness = - wps[ConnectionBook].book.getOrDefault(peerId, NotConnected) + peerStore[AddressBook][peer.peerId] = peer.addrs -proc hasShard*(wps: WakuPeerStore, peerId: PeerID, cluster, shard: uint16): bool = - wps[ENRBook].book.getOrDefault(peerId).containsShard(cluster, shard) + var protos = peerStore[ProtoBook][peer.peerId] + for new_proto in peer.protocols: + ## append new discovered protocols to the current known protocols set + if not protos.contains(new_proto): + protos.add($new_proto) + peerStore[ProtoBook][peer.peerId] = protos -proc hasCapability*(wps: WakuPeerStore, peerId: PeerID, cap: Capabilities): bool = - wps[ENRBook].book.getOrDefault(peerId).supportsCapability(cap) + peerStore[AgentBook][peer.peerId] = peer.agent + peerStore[ProtoVersionBook][peer.peerId] = peer.protoVersion + peerStore[KeyBook][peer.peerId] = peer.publicKey + peerStore[ConnectionBook][peer.peerId] = peer.connectedness + peerStore[DisconnectBook][peer.peerId] = peer.disconnectTime + peerStore[SourceBook][peer.peerId] = + if origin != UnknownOrigin: origin else: peer.origin + peerStore[DirectionBook][peer.peerId] = peer.direction + peerStore[LastFailedConnBook][peer.peerId] = peer.lastFailedConn + peerStore[NumberFailedConnBook][peer.peerId] = peer.numberFailedConn + if peer.enr.isSome(): + peerStore[ENRBook][peer.peerId] = peer.enr.get() -proc peerExists*(wps: WakuPeerStore, peerId: PeerId): bool = - wps[AddressBook].contains(peerId) +proc peers*(peerStore: PeerStore, proto: string): seq[RemotePeerInfo] = + peerStore.peers().filterIt(it.protocols.contains(proto)) -proc isConnected*(wps: WakuPeerStore, peerId: PeerID): bool = +proc peers*(peerStore: PeerStore, protocolMatcher: Matcher): seq[RemotePeerInfo] = + peerStore.peers().filterIt(it.protocols.anyIt(protocolMatcher(it))) + +proc connectedness*(peerStore: PeerStore, peerId: PeerId): Connectedness = + peerStore[ConnectionBook].book.getOrDefault(peerId, NotConnected) + +proc hasShard*(peerStore: PeerStore, peerId: PeerID, cluster, shard: uint16): bool = + peerStore[ENRBook].book.getOrDefault(peerId).containsShard(cluster, shard) + +proc hasCapability*(peerStore: PeerStore, peerId: PeerID, cap: Capabilities): bool = + peerStore[ENRBook].book.getOrDefault(peerId).supportsCapability(cap) + +proc peerExists*(peerStore: PeerStore, peerId: PeerId): bool = + peerStore[AddressBook].contains(peerId) + +proc isConnected*(peerStore: PeerStore, peerId: PeerID): bool = # Returns `true` if the peer is connected - wps.connectedness(peerId) == Connected + peerStore.connectedness(peerId) == Connected -proc hasPeer*(wps: WakuPeerStore, peerId: PeerID, proto: string): bool = +proc hasPeer*(peerStore: PeerStore, peerId: PeerID, proto: string): bool = # Returns `true` if peer is included in manager for the specified protocol - # TODO: What if peer does not exist in the wps? - wps.getPeer(peerId).protocols.contains(proto) + # TODO: What if peer does not exist in the peerStore? + peerStore.getPeer(peerId).protocols.contains(proto) -proc hasPeers*(wps: WakuPeerStore, proto: string): bool = +proc hasPeers*(peerStore: PeerStore, proto: string): bool = # Returns `true` if the peerstore has any peer for the specified protocol - toSeq(wps[ProtoBook].book.values()).anyIt(it.anyIt(it == proto)) + toSeq(peerStore[ProtoBook].book.values()).anyIt(it.anyIt(it == proto)) -proc hasPeers*(wps: WakuPeerStore, protocolMatcher: Matcher): bool = +proc hasPeers*(peerStore: PeerStore, protocolMatcher: Matcher): bool = # Returns `true` if the peerstore has any peer matching the protocolMatcher - toSeq(wps[ProtoBook].book.values()).anyIt(it.anyIt(protocolMatcher(it))) + toSeq(peerStore[ProtoBook].book.values()).anyIt(it.anyIt(protocolMatcher(it))) -proc getCapacity*(wps: WakuPeerStore): int = - wps.peerStore.capacity +proc getCapacity*(peerStore: PeerStore): int = + peerStore.capacity -proc setCapacity*(wps: WakuPeerStore, capacity: int) = - wps.peerStore.capacity = capacity +proc setCapacity*(peerStore: PeerStore, capacity: int) = + peerStore.capacity = capacity -proc getWakuProtos*(wps: WakuPeerStore): seq[string] = - toSeq(wps[ProtoBook].book.values()).flatten().deduplicate().filterIt( +proc getWakuProtos*(peerStore: PeerStore): seq[string] = + toSeq(peerStore[ProtoBook].book.values()).flatten().deduplicate().filterIt( it.startsWith("/vac/waku") ) proc getPeersByDirection*( - wps: WakuPeerStore, direction: PeerDirection + peerStore: PeerStore, direction: PeerDirection ): seq[RemotePeerInfo] = - return wps.peers.filterIt(it.direction == direction) + return peerStore.peers.filterIt(it.direction == direction) -proc getDisconnectedPeers*(wps: WakuPeerStore): seq[RemotePeerInfo] = - return wps.peers.filterIt(it.connectedness != Connected) +proc getDisconnectedPeers*(peerStore: PeerStore): seq[RemotePeerInfo] = + return peerStore.peers.filterIt(it.connectedness != Connected) -proc getConnectedPeers*(wps: WakuPeerStore): seq[RemotePeerInfo] = - return wps.peers.filterIt(it.connectedness == Connected) +proc getConnectedPeers*(peerStore: PeerStore): seq[RemotePeerInfo] = + return peerStore.peers.filterIt(it.connectedness == Connected) -proc getPeersByProtocol*(wps: WakuPeerStore, proto: string): seq[RemotePeerInfo] = - return wps.peers.filterIt(it.protocols.contains(proto)) +proc getPeersByProtocol*(peerStore: PeerStore, proto: string): seq[RemotePeerInfo] = + return peerStore.peers.filterIt(it.protocols.contains(proto)) -proc getReachablePeers*(wps: WakuPeerStore): seq[RemotePeerInfo] = +proc getReachablePeers*(peerStore: PeerStore): seq[RemotePeerInfo] = + return peerStore.peers.filterIt(it.connectedness != CannotConnect) + +proc getPeersByShard*( + peerStore: PeerStore, cluster, shard: uint16 +): seq[RemotePeerInfo] = + return peerStore.peers.filterIt( + it.enr.isSome() and it.enr.get().containsShard(cluster, shard) + ) + +proc getPeersByCapability*( + peerStore: PeerStore, cap: Capabilities +): seq[RemotePeerInfo] = return - wps.peers.filterIt(it.connectedness == CanConnect or it.connectedness == Connected) - -proc getPeersByShard*(wps: WakuPeerStore, cluster, shard: uint16): seq[RemotePeerInfo] = - return - wps.peers.filterIt(it.enr.isSome() and it.enr.get().containsShard(cluster, shard)) - -proc getPeersByCapability*(wps: WakuPeerStore, cap: Capabilities): seq[RemotePeerInfo] = - return wps.peers.filterIt(it.enr.isSome() and it.enr.get().supportsCapability(cap)) + peerStore.peers.filterIt(it.enr.isSome() and it.enr.get().supportsCapability(cap)) diff --git a/waku/node/waku_node.nim b/waku/node/waku_node.nim index cb712befd..ae08b503a 100644 --- a/waku/node/waku_node.nim +++ b/waku/node/waku_node.nim @@ -417,7 +417,7 @@ proc startRelay*(node: WakuNode) {.async.} = ## Setup relay protocol # Resume previous relay connections - if node.peerManager.wakuPeerStore.hasPeers(protocolMatcher(WakuRelayCodec)): + if node.peerManager.switch.peerStore.hasPeers(protocolMatcher(WakuRelayCodec)): info "Found previous WakuRelay peers. Reconnecting." # Reconnect to previous relay peers. This will respect a backoff period, if necessary @@ -1260,7 +1260,7 @@ proc fetchPeerExchangePeers*( ) ) - info "Retrieving peer info via peer exchange protocol" + info "Retrieving peer info via peer exchange protocol", amount let pxPeersRes = await node.wakuPeerExchange.request(amount) if pxPeersRes.isOk: var validPeers = 0 diff --git a/waku/waku_api/rest/admin/handlers.nim b/waku/waku_api/rest/admin/handlers.nim index c140c46d6..f2eb4a8ba 100644 --- a/waku/waku_api/rest/admin/handlers.nim +++ b/waku/waku_api/rest/admin/handlers.nim @@ -41,7 +41,7 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) = router.api(MethodGet, ROUTE_ADMIN_V1_PEERS) do() -> RestApiResponse: var peers: WakuPeers = @[] - let relayPeers = node.peerManager.wakuPeerStore.peers(WakuRelayCodec).mapIt( + let relayPeers = node.peerManager.switch.peerStore.peers(WakuRelayCodec).mapIt( ( multiaddr: constructMultiaddrStr(it), protocol: WakuRelayCodec, @@ -51,7 +51,7 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) = ) tuplesToWakuPeers(peers, relayPeers) - let filterV2Peers = node.peerManager.wakuPeerStore + let filterV2Peers = node.peerManager.switch.peerStore .peers(WakuFilterSubscribeCodec) .mapIt( ( @@ -63,7 +63,7 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) = ) tuplesToWakuPeers(peers, filterV2Peers) - let storePeers = node.peerManager.wakuPeerStore.peers(WakuStoreCodec).mapIt( + let storePeers = node.peerManager.switch.peerStore.peers(WakuStoreCodec).mapIt( ( multiaddr: constructMultiaddrStr(it), protocol: WakuStoreCodec, @@ -73,7 +73,7 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) = ) tuplesToWakuPeers(peers, storePeers) - let legacyStorePeers = node.peerManager.wakuPeerStore + let legacyStorePeers = node.peerManager.switch.peerStore .peers(WakuLegacyStoreCodec) .mapIt( ( @@ -85,7 +85,7 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) = ) tuplesToWakuPeers(peers, legacyStorePeers) - let legacyLightpushPeers = node.peerManager.wakuPeerStore + let legacyLightpushPeers = node.peerManager.switch.peerStore .peers(WakuLegacyLightPushCodec) .mapIt( ( @@ -97,7 +97,9 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) = ) tuplesToWakuPeers(peers, legacyLightpushPeers) - let lightpushPeers = node.peerManager.wakuPeerStore.peers(WakuLightPushCodec).mapIt( + let lightpushPeers = node.peerManager.switch.peerStore + .peers(WakuLightPushCodec) + .mapIt( ( multiaddr: constructMultiaddrStr(it), protocol: WakuLightPushCodec, @@ -107,7 +109,7 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) = ) tuplesToWakuPeers(peers, lightpushPeers) - let pxPeers = node.peerManager.wakuPeerStore.peers(WakuPeerExchangeCodec).mapIt( + let pxPeers = node.peerManager.switch.peerStore.peers(WakuPeerExchangeCodec).mapIt( ( multiaddr: constructMultiaddrStr(it), protocol: WakuPeerExchangeCodec, diff --git a/waku/waku_filter_v2/protocol.nim b/waku/waku_filter_v2/protocol.nim index d8b79ab67..c3a4683f7 100644 --- a/waku/waku_filter_v2/protocol.nim +++ b/waku/waku_filter_v2/protocol.nim @@ -225,7 +225,7 @@ proc maintainSubscriptions*(wf: WakuFilter) {.async.} = ## Remove subscriptions for peers that have been removed from peer store var peersToRemove: seq[PeerId] for peerId in wf.subscriptions.peersSubscribed.keys: - if not wf.peerManager.wakuPeerStore.hasPeer(peerId, WakuFilterPushCodec): + if not wf.peerManager.switch.peerStore.hasPeer(peerId, WakuFilterPushCodec): debug "peer has been removed from peer store, we will remove subscription", peerId = peerId peersToRemove.add(peerId) diff --git a/waku/waku_peer_exchange/protocol.nim b/waku/waku_peer_exchange/protocol.nim index 7c9005215..2732cb1c1 100644 --- a/waku/waku_peer_exchange/protocol.nim +++ b/waku/waku_peer_exchange/protocol.nim @@ -218,7 +218,7 @@ proc poolFilter*(cluster: Option[uint16], peer: RemotePeerInfo): bool = proc populateEnrCache(wpx: WakuPeerExchange) = # share only peers that i) are reachable ii) come from discv5 iii) share cluster - let withEnr = wpx.peerManager.wakuPeerStore.getReachablePeers().filterIt( + let withEnr = wpx.peerManager.switch.peerStore.getReachablePeers().filterIt( poolFilter(wpx.cluster, it) ) From 483103de37844242fc5edc436b6d99ed7f6faa13 Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Wed, 9 Apr 2025 21:36:06 +0200 Subject: [PATCH 12/48] Update the upload-artifact from v3 to v4 in pre-release.yml (#3363) --- .github/workflows/pre-release.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index cf6711260..b138a2248 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -76,14 +76,14 @@ jobs: tar -cvzf ${{steps.vars.outputs.nwakutools}} ./build/wakucanary ./build/networkmonitor - name: upload artifacts - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: wakunode2 path: ${{steps.vars.outputs.nwaku}} retention-days: 2 - name: upload artifacts - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: wakutools path: ${{steps.vars.outputs.nwakutools}} From 3098b117d31ad3f1a79349bed5f12f2eab2b8498 Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Thu, 10 Apr 2025 00:28:25 +0200 Subject: [PATCH 13/48] chore: skip two flaky tests (#3364) --- tests/waku_discv5/test_waku_discv5.nim | 6 ++++-- tests/waku_rln_relay/test_wakunode_rln_relay.nim | 4 +++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/waku_discv5/test_waku_discv5.nim b/tests/waku_discv5/test_waku_discv5.nim index 3d66136e8..edde80ab3 100644 --- a/tests/waku_discv5/test_waku_discv5.nim +++ b/tests/waku_discv5/test_waku_discv5.nim @@ -14,7 +14,7 @@ import import waku/[waku_core/topics, waku_enr, discovery/waku_discv5, waku_enr/capabilities], - ../testlib/[wakucore, testasync, assertions, futures, wakunode], + ../testlib/[wakucore, testasync, assertions, futures, wakunode, testutils], ../waku_enr/utils, ./utils as discv5_utils @@ -300,7 +300,9 @@ suite "Waku Discovery v5": # Cleanup await allFutures(node1.stop(), node2.stop(), node3.stop(), node4.stop()) - asyncTest "find random peers with instance predicate": + xasyncTest "find random peers with instance predicate": + ## This is skipped because is flaky and made CI randomly fail but is useful to run manually + ## Setup # Records let diff --git a/tests/waku_rln_relay/test_wakunode_rln_relay.nim b/tests/waku_rln_relay/test_wakunode_rln_relay.nim index 186343727..b07cca408 100644 --- a/tests/waku_rln_relay/test_wakunode_rln_relay.nim +++ b/tests/waku_rln_relay/test_wakunode_rln_relay.nim @@ -486,7 +486,9 @@ procSuite "WakuNode - RLN relay": await node2.stop() await node3.stop() - asyncTest "clearNullifierLog: should clear epochs > MaxEpochGap": + xasyncTest "clearNullifierLog: should clear epochs > MaxEpochGap": + ## This is skipped because is flaky and made CI randomly fail but is useful to run manually + # Given two nodes let contentTopic = ContentTopic("/waku/2/default-content/proto") From dffad311a23c40f1f941deb413147e4708d90358 Mon Sep 17 00:00:00 2001 From: gabrielmer <101006718+gabrielmer@users.noreply.github.com> Date: Thu, 10 Apr 2025 14:34:54 +0300 Subject: [PATCH 14/48] fix: avoid performing nil check for userData (#3365) --- library/libwaku.nim | 4 ---- 1 file changed, 4 deletions(-) diff --git a/library/libwaku.nim b/library/libwaku.nim index ebe730da8..23600aca4 100644 --- a/library/libwaku.nim +++ b/library/libwaku.nim @@ -52,10 +52,6 @@ template callEventCallback(ctx: ptr WakuContext, eventName: string, body: untype error eventName & " - eventCallback is nil" return - if isNil(ctx[].eventUserData): - error eventName & " - eventUserData is nil" - return - foreignThreadGc: try: let event = body From 856224c62d099c008ca1d8c3b29efcc4db5fbbee Mon Sep 17 00:00:00 2001 From: Hanno Cornelius <68783915+jm-clius@users.noreply.github.com> Date: Thu, 10 Apr 2025 14:38:56 +0100 Subject: [PATCH 15/48] docs: update prerequisites (#3320) Add `rustc` and `cargo` as prerequisite to README (required for RLN compilation). --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 9d8b58110..9b6dba4a4 100644 --- a/README.md +++ b/README.md @@ -21,6 +21,13 @@ The standard developer tools, including a C compiler, GNU Make, Bash, and Git. M > In some distributions (Fedora linux for example), you may need to install `which` utility separately. Nimbus build system is relying on it. +You'll also need an installation of Rust and its toolchain (specifically `rustc` and `cargo`). +The easiest way to install these, is using `rustup`: + +```bash +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` + ### Wakunode ```bash From bbdf51ebf2eaea3e000962aafa23b3e55c90e964 Mon Sep 17 00:00:00 2001 From: markoburcul Date: Mon, 31 Mar 2025 14:08:20 +0200 Subject: [PATCH 16/48] nix: create nix flake and libwaku-android-arm64 target * android-ndk is added * in the derivation, system nim is default but one can change it to nimbus-build-system * special script for creating nimble links, necessary for the compilation to succeed. Referenced issue: * https://github.com/waku-org/nwaku/issues/3232 --- .gitmodules | 2 +- flake.lock | 49 ++++++++++++++ flake.nix | 63 +++++++++++++++++ nix/README.md | 35 ++++++++++ nix/atlas.nix | 12 ++++ nix/checksums.nix | 12 ++++ nix/create-nimble-link.sh | 82 ++++++++++++++++++++++ nix/csources.nix | 12 ++++ nix/default.nix | 112 +++++++++++++++++++++++++++++++ nix/nimble.nix | 12 ++++ nix/pkgs/android-sdk/compose.nix | 26 +++++++ nix/pkgs/android-sdk/default.nix | 14 ++++ nix/pkgs/android-sdk/pkgs.nix | 17 +++++ nix/pkgs/android-sdk/shell.nix | 19 ++++++ nix/sat.nix | 12 ++++ nix/shell.nix | 26 +++++++ nix/tools.nix | 15 +++++ scripts/generate_nimble_links.sh | 29 ++++++++ shell.nix | 22 ------ 19 files changed, 548 insertions(+), 23 deletions(-) create mode 100644 flake.lock create mode 100644 flake.nix create mode 100644 nix/README.md create mode 100644 nix/atlas.nix create mode 100644 nix/checksums.nix create mode 100755 nix/create-nimble-link.sh create mode 100644 nix/csources.nix create mode 100644 nix/default.nix create mode 100644 nix/nimble.nix create mode 100644 nix/pkgs/android-sdk/compose.nix create mode 100644 nix/pkgs/android-sdk/default.nix create mode 100644 nix/pkgs/android-sdk/pkgs.nix create mode 100644 nix/pkgs/android-sdk/shell.nix create mode 100644 nix/sat.nix create mode 100644 nix/shell.nix create mode 100644 nix/tools.nix create mode 100755 scripts/generate_nimble_links.sh delete mode 100644 shell.nix diff --git a/.gitmodules b/.gitmodules index bde56a76e..34a5b88e4 100644 --- a/.gitmodules +++ b/.gitmodules @@ -168,7 +168,7 @@ path = vendor/db_connector url = https://github.com/nim-lang/db_connector.git ignore = untracked - branch = master + branch = devel [submodule "vendor/nph"] ignore = untracked branch = master diff --git a/flake.lock b/flake.lock new file mode 100644 index 000000000..359ae2579 --- /dev/null +++ b/flake.lock @@ -0,0 +1,49 @@ +{ + "nodes": { + "nixpkgs": { + "locked": { + "lastModified": 1740603184, + "narHash": "sha256-t+VaahjQAWyA+Ctn2idyo1yxRIYpaDxMgHkgCNiMJa4=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "f44bd8ca21e026135061a0a57dcf3d0775b67a49", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "f44bd8ca21e026135061a0a57dcf3d0775b67a49", + "type": "github" + } + }, + "root": { + "inputs": { + "nixpkgs": "nixpkgs", + "zerokit": "zerokit" + } + }, + "zerokit": { + "inputs": { + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1743756626, + "narHash": "sha256-SvhfEl0bJcRsCd79jYvZbxQecGV2aT+TXjJ57WVv7Aw=", + "owner": "vacp2p", + "repo": "zerokit", + "rev": "c60e0c33fc6350a4b1c20e6b6727c44317129582", + "type": "github" + }, + "original": { + "owner": "vacp2p", + "repo": "zerokit", + "rev": "c60e0c33fc6350a4b1c20e6b6727c44317129582", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 000000000..419c1d6f7 --- /dev/null +++ b/flake.nix @@ -0,0 +1,63 @@ +{ + description = "NWaku build flake"; + + nixConfig = { + extra-substituters = [ "https://nix-cache.status.im/" ]; + extra-trusted-public-keys = [ "nix-cache.status.im-1:x/93lOfLU+duPplwMSBR+OlY4+mo+dCN7n0mr4oPwgY=" ]; + }; + + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs?rev=f44bd8ca21e026135061a0a57dcf3d0775b67a49"; + zerokit = { + url = "github:vacp2p/zerokit?rev=c60e0c33fc6350a4b1c20e6b6727c44317129582"; + inputs.nixpkgs.follows = "nixpkgs"; + }; + }; + + outputs = { self, nixpkgs, zerokit }: + let + stableSystems = [ + "x86_64-linux" "aarch64-linux" + "x86_64-darwin" "aarch64-darwin" + "x86_64-windows" "i686-linux" + "i686-windows" + ]; + + forAllSystems = f: nixpkgs.lib.genAttrs stableSystems (system: f system); + + pkgsFor = forAllSystems ( + system: import nixpkgs { + inherit system; + config = { + android_sdk.accept_license = true; + allowUnfree = true; + }; + overlays = [ + (final: prev: { + androidEnvCustom = prev.callPackage ./nix/pkgs/android-sdk { }; + androidPkgs = final.androidEnvCustom.pkgs; + androidShell = final.androidEnvCustom.shell; + }) + ]; + } + ); + + in rec { + packages = forAllSystems (system: let + pkgs = pkgsFor.${system}; + in rec { + libwaku-android-arm64 = pkgs.callPackage ./nix/default.nix { + inherit stableSystems; + src = self; + targets = ["libwaku-android-arm64"]; + androidArch = "aarch64-linux-android"; + zerokitPkg = zerokit.packages.${system}.zerokit-android-arm64; + }; + default = libwaku-android-arm64; + }); + + devShells = forAllSystems (system: { + default = pkgsFor.${system}.callPackage ./nix/shell.nix {}; + }); + }; +} \ No newline at end of file diff --git a/nix/README.md b/nix/README.md new file mode 100644 index 000000000..e928b7938 --- /dev/null +++ b/nix/README.md @@ -0,0 +1,35 @@ +# Usage + +## Shell + +A development shell can be started using: +```sh +nix develop +``` + +## Building + +To build a Codex you can use: +```sh +nix build '.?submodules=1#default' +``` +The `?submodules=1` part should eventually not be necessary. +For more details see: +https://github.com/NixOS/nix/issues/4423 + +It can be also done without even cloning the repo: +```sh +nix build 'git+https://github.com/waku-org/nwaku?submodules=1#' +``` + +## Running + +```sh +nix run 'git+https://github.com/waku-org/nwaku?submodules=1#'' +``` + +## Testing + +```sh +nix flake check ".?submodules=1#" +``` diff --git a/nix/atlas.nix b/nix/atlas.nix new file mode 100644 index 000000000..43336e07a --- /dev/null +++ b/nix/atlas.nix @@ -0,0 +1,12 @@ +{ pkgs ? import { } }: + +let + tools = pkgs.callPackage ./tools.nix {}; + sourceFile = ../vendor/nimbus-build-system/vendor/Nim/koch.nim; +in pkgs.fetchFromGitHub { + owner = "nim-lang"; + repo = "atlas"; + rev = tools.findKeyValue "^ +AtlasStableCommit = \"([a-f0-9]+)\"$" sourceFile; + # WARNING: Requires manual updates when Nim compiler version changes. + hash = "sha256-G1TZdgbRPSgxXZ3VsBP2+XFCLHXVb3an65MuQx67o/k="; +} \ No newline at end of file diff --git a/nix/checksums.nix b/nix/checksums.nix new file mode 100644 index 000000000..d79345d24 --- /dev/null +++ b/nix/checksums.nix @@ -0,0 +1,12 @@ +{ pkgs ? import { } }: + +let + tools = pkgs.callPackage ./tools.nix {}; + sourceFile = ../vendor/nimbus-build-system/vendor/Nim/koch.nim; +in pkgs.fetchFromGitHub { + owner = "nim-lang"; + repo = "checksums"; + rev = tools.findKeyValue "^ +ChecksumsStableCommit = \"([a-f0-9]+)\"$" sourceFile; + # WARNING: Requires manual updates when Nim compiler version changes. + hash = "sha256-Bm5iJoT2kAvcTexiLMFBa9oU5gf7d4rWjo3OiN7obWQ="; +} diff --git a/nix/create-nimble-link.sh b/nix/create-nimble-link.sh new file mode 100755 index 000000000..8d2bc77b3 --- /dev/null +++ b/nix/create-nimble-link.sh @@ -0,0 +1,82 @@ +#!/usr/bin/env bash + +# This script generates `.nimble-link` files and the folder structure typically created by `make update` +# within this repository. It is an alternative to `vendor/nimbus-build-system/scripts/create_nimble_link.sh`, +# designed for execution inside a Nix derivation, where Git commands are not available. + +output_file="submodule_paths.txt" + +# The EXCLUDED_NIM_PACKAGES variable (defined in the Makefile) contains a colon-separated list of +# submodule paths that should be ignored. We split it into an array for easier matching. +IFS=':' read -ra EXCLUDED_PATTERNS <<< "$EXCLUDED_NIM_PACKAGES" + +# Function to check if a given submodule path should be excluded +should_exclude() { + local path="$1" + for pattern in "${EXCLUDED_PATTERNS[@]}"; do + if [[ "$path" == *"$pattern"* ]]; then + return 0 # Match found, exclude this submodule + fi + done + return 1 # No match, include this submodule +} + +# Locate all `.gitmodules` files and extract submodule paths +find . -name .gitmodules | while read -r gitmodules_file; do + module_dir=$(dirname "$(realpath "$gitmodules_file")") + + while IFS= read -r line; do + # Extract the submodule path from lines matching `path = /some/path` + if [[ $line =~ path[[:space:]]*=[[:space:]]*(.*) ]]; then + submodule_path="${BASH_REMATCH[1]}" + abs_path="$module_dir/$submodule_path" + + # Skip if the submodule is in the excluded list + if should_exclude "$abs_path"; then + continue + fi + + # If the submodule contains a `src/` folder, use it as the path + if [[ -d "$abs_path/src" ]]; then + abs_path="$abs_path/src" + fi + + echo "$abs_path" >> "$output_file" + fi + done < "$gitmodules_file" +done + +echo "Submodule paths collected in $output_file" + +# Directory where Nimble packages will be linked +nimble_pkgs_dir="./vendor/.nimble/pkgs" + +mkdir -p "$nimble_pkgs_dir" + +# Process each submodule path collected earlier +while IFS= read -r submodule_path; do + # Determine the submodule name from its path + if [[ "$submodule_path" == */src ]]; then + submodule_name=$(basename "$(dirname "$submodule_path")") + else + submodule_name=$(basename "$submodule_path") + fi + + # Check if the submodule contains at least one `.nimble` file + base_dir="${submodule_path%/src}" + nimble_files_count=$(find "$base_dir" -maxdepth 1 -type f -name "*.nimble" | wc -l) + + if [ "$nimble_files_count" -gt 0 ]; then + submodule_dir="$nimble_pkgs_dir/${submodule_name}-#head" + mkdir -p "$submodule_dir" + + nimble_link_file="$submodule_dir/${submodule_name}.nimble-link" + # `.nimble-link` files require two identical lines for Nimble to recognize them properly + echo "$submodule_path" > "$nimble_link_file" + echo "$submodule_path" >> "$nimble_link_file" + fi +done < "$output_file" + +echo "Nimble packages prepared in $nimble_pkgs_dir" + +rm "$output_file" diff --git a/nix/csources.nix b/nix/csources.nix new file mode 100644 index 000000000..5aa90fd6f --- /dev/null +++ b/nix/csources.nix @@ -0,0 +1,12 @@ +{ pkgs ? import { } }: + +let + tools = pkgs.callPackage ./tools.nix {}; + sourceFile = ../vendor/nimbus-build-system/vendor/Nim/config/build_config.txt; +in pkgs.fetchFromGitHub { + owner = "nim-lang"; + repo = "csources_v2"; + rev = tools.findKeyValue "^nim_csourcesHash=([a-f0-9]+)$" sourceFile; + # WARNING: Requires manual updates when Nim compiler version changes. + hash = "sha256-UCLtoxOcGYjBdvHx7A47x6FjLMi6VZqpSs65MN7fpBs="; +} \ No newline at end of file diff --git a/nix/default.nix b/nix/default.nix new file mode 100644 index 000000000..5d598848d --- /dev/null +++ b/nix/default.nix @@ -0,0 +1,112 @@ +{ + config ? {}, + pkgs ? import { }, + src ? ../., + targets ? ["libwaku-android-arm64"], + verbosity ? 2, + useSystemNim ? true, + quickAndDirty ? true, + stableSystems ? [ + "x86_64-linux" "aarch64-linux" + ], + androidArch, + zerokitPkg, +}: + +assert pkgs.lib.assertMsg ((src.submodules or true) == true) + "Unable to build without submodules. Append '?submodules=1#' to the URI."; + +let + inherit (pkgs) stdenv lib writeScriptBin callPackage; + + revision = lib.substring 0 8 (src.rev or "dirty"); + +in stdenv.mkDerivation rec { + + pname = "nwaku"; + + version = "1.0.0-${revision}"; + + inherit src; + + buildInputs = with pkgs; [ + openssl + gmp + ]; + + # Dependencies that should only exist in the build environment. + nativeBuildInputs = let + # Fix for Nim compiler calling 'git rev-parse' and 'lsb_release'. + fakeGit = writeScriptBin "git" "echo ${version}"; + # Fix for the zerokit package that is built with cargo/rustup/cross. + fakeCargo = writeScriptBin "cargo" "echo ${version}"; + # Fix for the zerokit package that is built with cargo/rustup/cross. + fakeRustup = writeScriptBin "rustup" "echo ${version}"; + # Fix for the zerokit package that is built with cargo/rustup/cross. + fakeCross = writeScriptBin "cross" "echo ${version}"; + in + with pkgs; [ + cmake + which + lsb-release + zerokitPkg + nim-unwrapped-2_0 + fakeGit + fakeCargo + fakeRustup + fakeCross + ]; + + # Environment variables required for Android builds + ANDROID_SDK_ROOT="${pkgs.androidPkgs.sdk}"; + ANDROID_NDK_HOME="${pkgs.androidPkgs.ndk}"; + NIMFLAGS = "-d:disableMarchNative -d:git_revision_override=${revision}"; + XDG_CACHE_HOME = "/tmp"; + EXCLUDED_NIM_PACKAGES="vendor/nim-dnsdisc/vendor"; + + makeFlags = targets ++ [ + "V=${toString verbosity}" + "QUICK_AND_DIRTY_COMPILER=${if quickAndDirty then "1" else "0"}" + "QUICK_AND_DIRTY_NIMBLE=${if quickAndDirty then "1" else "0"}" + "USE_SYSTEM_NIM=${if useSystemNim then "1" else "0"}" + ]; + + configurePhase = '' + patchShebangs . vendor/nimbus-build-system > /dev/null + make nimbus-build-system-paths + ./nix/create-nimble-link.sh + ''; + + preBuild = '' + ln -s waku.nimble waku.nims + pushd vendor/nimbus-build-system/vendor/Nim + mkdir dist + cp -r ${callPackage ./nimble.nix {}} dist/nimble + chmod 777 -R dist/nimble + mkdir -p dist/nimble/dist + cp -r ${callPackage ./checksums.nix {}} dist/checksums # need both + cp -r ${callPackage ./checksums.nix {}} dist/nimble/dist/checksums + cp -r ${callPackage ./atlas.nix {}} dist/atlas + chmod 777 -R dist/atlas + mkdir dist/atlas/dist + cp -r ${callPackage ./sat.nix {}} dist/nimble/dist/sat + cp -r ${callPackage ./sat.nix {}} dist/atlas/dist/sat + cp -r ${callPackage ./csources.nix {}} csources_v2 + chmod 777 -R dist/nimble csources_v2 + popd + mkdir -p vendor/zerokit/target/${androidArch}/release + cp ${zerokitPkg}/librln.so vendor/zerokit/target/${androidArch}/release/ + ''; + + installPhase = '' + mkdir -p $out/build/android + cp -r ./build/android/* $out/build/android/ + ''; + + meta = with pkgs.lib; { + description = "NWaku derivation to build libwaku for mobile targets using Android NDK and Rust."; + homepage = "https://github.com/status-im/nwaku"; + license = licenses.mit; + platforms = stableSystems; + }; +} diff --git a/nix/nimble.nix b/nix/nimble.nix new file mode 100644 index 000000000..5bd7b0f32 --- /dev/null +++ b/nix/nimble.nix @@ -0,0 +1,12 @@ +{ pkgs ? import { } }: + +let + tools = pkgs.callPackage ./tools.nix {}; + sourceFile = ../vendor/nimbus-build-system/vendor/Nim/koch.nim; +in pkgs.fetchFromGitHub { + owner = "nim-lang"; + repo = "nimble"; + rev = tools.findKeyValue "^ +NimbleStableCommit = \"([a-f0-9]+)\".+" sourceFile; + # WARNING: Requires manual updates when Nim compiler version changes. + hash = "sha256-MVHf19UbOWk8Zba2scj06PxdYYOJA6OXrVyDQ9Ku6Us="; +} \ No newline at end of file diff --git a/nix/pkgs/android-sdk/compose.nix b/nix/pkgs/android-sdk/compose.nix new file mode 100644 index 000000000..c73aaee43 --- /dev/null +++ b/nix/pkgs/android-sdk/compose.nix @@ -0,0 +1,26 @@ +# +# This Nix expression centralizes the configuration +# for the Android development environment. +# + +{ androidenv, lib, stdenv }: + +assert lib.assertMsg (stdenv.system != "aarch64-darwin") + "aarch64-darwin not supported for Android SDK. Use: NIXPKGS_SYSTEM_OVERRIDE=x86_64-darwin"; + +# The "android-sdk-license" license is accepted +# by setting android_sdk.accept_license = true. +androidenv.composeAndroidPackages { + cmdLineToolsVersion = "9.0"; + toolsVersion = "26.1.1"; + platformToolsVersion = "33.0.3"; + buildToolsVersions = [ "34.0.0" ]; + platformVersions = [ "34" ]; + cmakeVersions = [ "3.22.1" ]; + ndkVersion = "25.2.9519653"; + includeNDK = true; + includeExtras = [ + "extras;android;m2repository" + "extras;google;m2repository" + ]; +} diff --git a/nix/pkgs/android-sdk/default.nix b/nix/pkgs/android-sdk/default.nix new file mode 100644 index 000000000..f3f795251 --- /dev/null +++ b/nix/pkgs/android-sdk/default.nix @@ -0,0 +1,14 @@ +# +# This Nix expression centralizes the configuration +# for the Android development environment. +# + +{ callPackage }: + +let + compose = callPackage ./compose.nix { }; + pkgs = callPackage ./pkgs.nix { inherit compose; }; + shell = callPackage ./shell.nix { androidPkgs = pkgs; }; +in { + inherit compose pkgs shell; +} diff --git a/nix/pkgs/android-sdk/pkgs.nix b/nix/pkgs/android-sdk/pkgs.nix new file mode 100644 index 000000000..645987b3a --- /dev/null +++ b/nix/pkgs/android-sdk/pkgs.nix @@ -0,0 +1,17 @@ +{ stdenv, compose }: + +# +# This derivation simply symlinks some stuff to get +# shorter paths as libexec/android-sdk is quite the mouthful. +# With this you can just do `androidPkgs.sdk` and `androidPkgs.ndk`. +# +stdenv.mkDerivation { + name = "${compose.androidsdk.name}-mod"; + phases = [ "symlinkPhase" ]; + outputs = [ "out" "sdk" "ndk" ]; + symlinkPhase = '' + ln -s ${compose.androidsdk} $out + ln -s ${compose.androidsdk}/libexec/android-sdk $sdk + ln -s ${compose.androidsdk}/libexec/android-sdk/ndk-bundle $ndk + ''; +} diff --git a/nix/pkgs/android-sdk/shell.nix b/nix/pkgs/android-sdk/shell.nix new file mode 100644 index 000000000..b5397763f --- /dev/null +++ b/nix/pkgs/android-sdk/shell.nix @@ -0,0 +1,19 @@ +{ mkShell, openjdk, androidPkgs }: + +mkShell { + name = "android-sdk-shell"; + buildInputs = [ openjdk ]; + + shellHook = '' + export ANDROID_HOME="${androidPkgs.sdk}" + export ANDROID_NDK_ROOT="${androidPkgs.ndk}" + export ANDROID_SDK_ROOT="$ANDROID_HOME" + export ANDROID_NDK_HOME="${androidPkgs.ndk}" + + export PATH="$ANDROID_NDK_ROOT:$PATH" + export PATH="$ANDROID_SDK_ROOT/tools:$PATH" + export PATH="$ANDROID_SDK_ROOT/tools/bin:$PATH" + export PATH="$(echo $ANDROID_SDK_ROOT/cmdline-tools/*/bin):$PATH" + export PATH="$ANDROID_SDK_ROOT/platform-tools:$PATH" + ''; +} diff --git a/nix/sat.nix b/nix/sat.nix new file mode 100644 index 000000000..31f264468 --- /dev/null +++ b/nix/sat.nix @@ -0,0 +1,12 @@ +{ pkgs ? import { } }: + +let + tools = pkgs.callPackage ./tools.nix {}; + sourceFile = ../vendor/nimbus-build-system/vendor/Nim/koch.nim; +in pkgs.fetchFromGitHub { + owner = "nim-lang"; + repo = "sat"; + rev = tools.findKeyValue "^ +SatStableCommit = \"([a-f0-9]+)\"$" sourceFile; + # WARNING: Requires manual updates when Nim compiler version changes. + hash = "sha256-JFrrSV+mehG0gP7NiQ8hYthL0cjh44HNbXfuxQNhq7c="; +} \ No newline at end of file diff --git a/nix/shell.nix b/nix/shell.nix new file mode 100644 index 000000000..26086a26e --- /dev/null +++ b/nix/shell.nix @@ -0,0 +1,26 @@ +{ + pkgs ? import { }, +}: +let + optionalDarwinDeps = pkgs.lib.optionals pkgs.stdenv.isDarwin [ + pkgs.libiconv + pkgs.darwin.apple_sdk.frameworks.Security + ]; +in +pkgs.mkShell { + inputsFrom = [ + pkgs.androidShell + ] ++ optionalDarwinDeps; + + buildInputs = with pkgs; [ + git + cargo + rustup + cmake + nim-unwrapped-2_0 + ]; + + LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath [ + pkgs.pcre + ]; +} diff --git a/nix/tools.nix b/nix/tools.nix new file mode 100644 index 000000000..108d38606 --- /dev/null +++ b/nix/tools.nix @@ -0,0 +1,15 @@ +{ pkgs ? import { } }: + +let + + inherit (pkgs.lib) fileContents last splitString flatten remove; + inherit (builtins) map match; +in { + findKeyValue = regex: sourceFile: + let + linesFrom = file: splitString "\n" (fileContents file); + matching = regex: lines: map (line: match regex line) lines; + extractMatch = matches: last (flatten (remove null matches)); + in + extractMatch (matching regex (linesFrom sourceFile)); +} diff --git a/scripts/generate_nimble_links.sh b/scripts/generate_nimble_links.sh new file mode 100755 index 000000000..238a22804 --- /dev/null +++ b/scripts/generate_nimble_links.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +# This script is used for building Nix derivation which doesn't allow Git commands. +# It implements similar logic as $(NIMBLE_DIR) target in nimbus-build-system Makefile. + +create_nimble_link_script_path="$(pwd)/${BUILD_SYSTEM_DIR}/scripts/create_nimble_link.sh" + +process_gitmodules() { + local gitmodules_file="$1" + local gitmodules_dir=$(dirname "$gitmodules_file") + + # Extract all submodule paths from the .gitmodules file + grep "path" $gitmodules_file | awk '{print $3}' | while read submodule_path; do + # Change pwd to the submodule dir and execute script + pushd "$gitmodules_dir/$submodule_path" > /dev/null + NIMBLE_DIR=$NIMBLE_DIR PWD_CMD=$PWD_CMD EXCLUDED_NIM_PACKAGES=$EXCLUDED_NIM_PACKAGES \ + "$create_nimble_link_script_path" "$submodule_path" + popd > /dev/null + done +} + +# Create the base directory if it doesn't exist +mkdir -p "${NIMBLE_DIR}/pkgs" + +# Find all .gitmodules files and process them +for gitmodules_file in $(find . -name '.gitmodules'); do + echo "Processing .gitmodules file: $gitmodules_file" + process_gitmodules "$gitmodules_file" +done \ No newline at end of file diff --git a/shell.nix b/shell.nix deleted file mode 100644 index ae2426a78..000000000 --- a/shell.nix +++ /dev/null @@ -1,22 +0,0 @@ -{ pkgs ? import (builtins.fetchTarball { - url = "https://github.com/NixOS/nixpkgs/archive/dbf1d73cd1a17276196afeee169b4cf7834b7a96.tar.gz"; - sha256 = "sha256:1k5nvn2yzw370cqsfh62lncsgydq2qkbjrx34cprzf0k6b93v7ch"; -}) {} }: - -pkgs.mkShell { - name = "nim-waku-build-shell"; - - # Versions dependent on nixpkgs commit. Update manually. - buildInputs = with pkgs; [ - git # 2.37.3 - which # 2.21 - rustc # 1.63.0 - ] ++ lib.optionals stdenv.isDarwin [ - libiconv - darwin.apple_sdk.frameworks.Security - ]; - - LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath [ - pkgs.pcre - ]; -} From bbf9905f46c83ffa600e364bf6605103f31252a9 Mon Sep 17 00:00:00 2001 From: markoburcul Date: Wed, 2 Apr 2025 11:34:15 +0200 Subject: [PATCH 17/48] gitmodules: remove unused quic and ngtcp2 --- .gitmodules | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/.gitmodules b/.gitmodules index 34a5b88e4..b7e52550a 100644 --- a/.gitmodules +++ b/.gitmodules @@ -179,16 +179,6 @@ url = https://github.com/status-im/nim-minilru.git ignore = untracked branch = master -[submodule "vendor/nim-quic"] - path = vendor/nim-quic - url = https://github.com/status-im/nim-quic.git - ignore = untracked - branch = master -[submodule "vendor/nim-ngtcp2"] - path = vendor/nim-ngtcp2 - url = https://github.com/vacp2p/nim-ngtcp2.git - ignore = untracked - branch = master [submodule "vendor/waku-rlnv2-contract"] path = vendor/waku-rlnv2-contract url = https://github.com/waku-org/waku-rlnv2-contract.git From c43cee6593ff0e5180b7f10ed5e5ddc10d18d20f Mon Sep 17 00:00:00 2001 From: markoburcul Date: Wed, 9 Apr 2025 09:45:43 +0200 Subject: [PATCH 18/48] makefile: add nimbus-build-system-nimble-dir target Create a makefile target that runs a script which is a wrapper around nimbus-build-system create_nimble_link.sh script. Referenced issue: * https://github.com/waku-org/nwaku/issues/3232 --- Makefile | 10 +++- nix/create-nimble-link.sh | 82 -------------------------------- nix/default.nix | 3 +- scripts/generate_nimble_links.sh | 2 +- 4 files changed, 10 insertions(+), 87 deletions(-) delete mode 100755 nix/create-nimble-link.sh diff --git a/Makefile b/Makefile index 473bb7801..5eb893442 100644 --- a/Makefile +++ b/Makefile @@ -4,8 +4,8 @@ # - MIT license # at your option. This file may not be copied, modified, or distributed except # according to those terms. -BUILD_SYSTEM_DIR := vendor/nimbus-build-system -EXCLUDED_NIM_PACKAGES := vendor/nim-dnsdisc/vendor +export BUILD_SYSTEM_DIR := vendor/nimbus-build-system +export EXCLUDED_NIM_PACKAGES := vendor/nim-dnsdisc/vendor LINK_PCRE := 0 FORMAT_MSG := "\\x1B[95mFormatting:\\x1B[39m" # we don't want an error here, so we can handle things later, in the ".DEFAULT" target @@ -152,6 +152,12 @@ endif clean: | clean-libbacktrace +### Create nimble links (used when building with Nix) + +nimbus-build-system-nimble-dir: + NIMBLE_DIR="$(CURDIR)/$(NIMBLE_DIR)" \ + PWD_CMD="$(PWD)" \ + $(CURDIR)/scripts/generate_nimble_links.sh ################## ## RLN ## diff --git a/nix/create-nimble-link.sh b/nix/create-nimble-link.sh deleted file mode 100755 index 8d2bc77b3..000000000 --- a/nix/create-nimble-link.sh +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env bash - -# This script generates `.nimble-link` files and the folder structure typically created by `make update` -# within this repository. It is an alternative to `vendor/nimbus-build-system/scripts/create_nimble_link.sh`, -# designed for execution inside a Nix derivation, where Git commands are not available. - -output_file="submodule_paths.txt" - -# The EXCLUDED_NIM_PACKAGES variable (defined in the Makefile) contains a colon-separated list of -# submodule paths that should be ignored. We split it into an array for easier matching. -IFS=':' read -ra EXCLUDED_PATTERNS <<< "$EXCLUDED_NIM_PACKAGES" - -# Function to check if a given submodule path should be excluded -should_exclude() { - local path="$1" - for pattern in "${EXCLUDED_PATTERNS[@]}"; do - if [[ "$path" == *"$pattern"* ]]; then - return 0 # Match found, exclude this submodule - fi - done - return 1 # No match, include this submodule -} - -# Locate all `.gitmodules` files and extract submodule paths -find . -name .gitmodules | while read -r gitmodules_file; do - module_dir=$(dirname "$(realpath "$gitmodules_file")") - - while IFS= read -r line; do - # Extract the submodule path from lines matching `path = /some/path` - if [[ $line =~ path[[:space:]]*=[[:space:]]*(.*) ]]; then - submodule_path="${BASH_REMATCH[1]}" - abs_path="$module_dir/$submodule_path" - - # Skip if the submodule is in the excluded list - if should_exclude "$abs_path"; then - continue - fi - - # If the submodule contains a `src/` folder, use it as the path - if [[ -d "$abs_path/src" ]]; then - abs_path="$abs_path/src" - fi - - echo "$abs_path" >> "$output_file" - fi - done < "$gitmodules_file" -done - -echo "Submodule paths collected in $output_file" - -# Directory where Nimble packages will be linked -nimble_pkgs_dir="./vendor/.nimble/pkgs" - -mkdir -p "$nimble_pkgs_dir" - -# Process each submodule path collected earlier -while IFS= read -r submodule_path; do - # Determine the submodule name from its path - if [[ "$submodule_path" == */src ]]; then - submodule_name=$(basename "$(dirname "$submodule_path")") - else - submodule_name=$(basename "$submodule_path") - fi - - # Check if the submodule contains at least one `.nimble` file - base_dir="${submodule_path%/src}" - nimble_files_count=$(find "$base_dir" -maxdepth 1 -type f -name "*.nimble" | wc -l) - - if [ "$nimble_files_count" -gt 0 ]; then - submodule_dir="$nimble_pkgs_dir/${submodule_name}-#head" - mkdir -p "$submodule_dir" - - nimble_link_file="$submodule_dir/${submodule_name}.nimble-link" - # `.nimble-link` files require two identical lines for Nimble to recognize them properly - echo "$submodule_path" > "$nimble_link_file" - echo "$submodule_path" >> "$nimble_link_file" - fi -done < "$output_file" - -echo "Nimble packages prepared in $nimble_pkgs_dir" - -rm "$output_file" diff --git a/nix/default.nix b/nix/default.nix index 5d598848d..a9d31b46d 100644 --- a/nix/default.nix +++ b/nix/default.nix @@ -62,7 +62,6 @@ in stdenv.mkDerivation rec { ANDROID_NDK_HOME="${pkgs.androidPkgs.ndk}"; NIMFLAGS = "-d:disableMarchNative -d:git_revision_override=${revision}"; XDG_CACHE_HOME = "/tmp"; - EXCLUDED_NIM_PACKAGES="vendor/nim-dnsdisc/vendor"; makeFlags = targets ++ [ "V=${toString verbosity}" @@ -74,7 +73,7 @@ in stdenv.mkDerivation rec { configurePhase = '' patchShebangs . vendor/nimbus-build-system > /dev/null make nimbus-build-system-paths - ./nix/create-nimble-link.sh + make nimbus-build-system-nimble-dir ''; preBuild = '' diff --git a/scripts/generate_nimble_links.sh b/scripts/generate_nimble_links.sh index 238a22804..e01e6db46 100755 --- a/scripts/generate_nimble_links.sh +++ b/scripts/generate_nimble_links.sh @@ -26,4 +26,4 @@ mkdir -p "${NIMBLE_DIR}/pkgs" for gitmodules_file in $(find . -name '.gitmodules'); do echo "Processing .gitmodules file: $gitmodules_file" process_gitmodules "$gitmodules_file" -done \ No newline at end of file +done From e99762ddfef0ff5c74d401e710d333d43acb7c44 Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Fri, 11 Apr 2025 11:05:22 +0200 Subject: [PATCH 19/48] chore: maintenance to c and c++ simple examples (#3367) --- examples/cbindings/README.md | 18 +++ examples/cbindings/waku_example.c | 71 ++++------ examples/cpp/README.md | 18 +++ examples/cpp/waku.cpp | 216 +++++++++++++++++++----------- library/libwaku.nim | 3 +- 5 files changed, 200 insertions(+), 126 deletions(-) create mode 100644 examples/cbindings/README.md create mode 100644 examples/cpp/README.md diff --git a/examples/cbindings/README.md b/examples/cbindings/README.md new file mode 100644 index 000000000..5465cf512 --- /dev/null +++ b/examples/cbindings/README.md @@ -0,0 +1,18 @@ +## App description +This is a very simple example that shows how to invoke libwaku functions from a C program. + +## Build +1. Open terminal +2. cd to nwaku root folder +3. make cwaku_example -j8 + +This will create libwaku.so and cwaku_example binary within the build folder. + +## Run +1. Open terminal +2. cd to nwaku root folder +3. export LD_LIBRARY_PATH=build +4. `./build/cwaku_example --host=0.0.0.0 --port=60001` + +Use `./build/cwaku_example --help` to see some other options. + diff --git a/examples/cbindings/waku_example.c b/examples/cbindings/waku_example.c index bbb76c862..b80b9af8f 100644 --- a/examples/cbindings/waku_example.c +++ b/examples/cbindings/waku_example.c @@ -14,7 +14,6 @@ #include "base64.h" #include "../../library/libwaku.h" - // Shared synchronization variables pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; pthread_cond_t cond = PTHREAD_COND_INITIALIZER; @@ -29,7 +28,6 @@ void waitForCallback() { pthread_mutex_unlock(&mutex); } - #define WAKU_CALL(call) \ do { \ int ret = call; \ @@ -107,6 +105,13 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { return 0; } +void signal_cond() { + pthread_mutex_lock(&mutex); + callback_executed = 1; + pthread_cond_signal(&cond); + pthread_mutex_unlock(&mutex); +} + static struct argp argp = { options, parse_opt, args_doc, doc, 0, 0, 0 }; void event_handler(int callerRet, const char* msg, size_t len, void* userData) { @@ -118,10 +123,7 @@ void event_handler(int callerRet, const char* msg, size_t len, void* userData) { printf("Receiving event: %s\n", msg); } - pthread_mutex_lock(&mutex); - callback_executed = 1; - pthread_cond_signal(&cond); - pthread_mutex_unlock(&mutex); + signal_cond(); } void on_event_received(int callerRet, const char* msg, size_t len, void* userData) { @@ -142,6 +144,7 @@ void handle_content_topic(int callerRet, const char* msg, size_t len, void* user contentTopic = malloc(len * sizeof(char) + 1); strcpy(contentTopic, msg); + signal_cond(); } char* publishResponse = NULL; @@ -158,33 +161,30 @@ void handle_publish_ok(int callerRet, const char* msg, size_t len, void* userDat #define MAX_MSG_SIZE 65535 -void publish_message(char* pubsubTopic, const char* msg) { +void publish_message(const char* msg) { char jsonWakuMsg[MAX_MSG_SIZE]; char *msgPayload = b64_encode(msg, strlen(msg)); - WAKU_CALL( waku_content_topic(RET_OK, + WAKU_CALL( waku_content_topic(ctx, "appName", 1, "contentTopicName", "encoding", handle_content_topic, userData) ); - snprintf(jsonWakuMsg, MAX_MSG_SIZE, - "{\"payload\":\"%s\",\"content_topic\":\"%s\"}", + "{\"payload\":\"%s\",\"contentTopic\":\"%s\"}", msgPayload, contentTopic); free(msgPayload); - WAKU_CALL( waku_relay_publish(&ctx, - pubsubTopic, + WAKU_CALL( waku_relay_publish(ctx, + "/waku/2/rs/16/32", jsonWakuMsg, 10000 /*timeout ms*/, event_handler, userData) ); - - printf("waku relay response [%s]\n", publishResponse); } void show_help_and_exit() { @@ -194,20 +194,12 @@ void show_help_and_exit() { void print_default_pubsub_topic(int callerRet, const char* msg, size_t len, void* userData) { printf("Default pubsub topic: %s\n", msg); - - pthread_mutex_lock(&mutex); - callback_executed = 1; - pthread_cond_signal(&cond); - pthread_mutex_unlock(&mutex); + signal_cond(); } void print_waku_version(int callerRet, const char* msg, size_t len, void* userData) { printf("Git Version: %s\n", msg); - - pthread_mutex_lock(&mutex); - callback_executed = 1; - pthread_cond_signal(&cond); - pthread_mutex_unlock(&mutex); + signal_cond(); } // Beginning of UI program logic @@ -236,9 +228,6 @@ void handle_user_input() { return; } - int c; - while ( (c = getchar()) != '\n' && c != EOF ) { } - switch (atoi(cmd)) { case SUBSCRIBE_TOPIC_MENU: @@ -247,7 +236,7 @@ void handle_user_input() { char pubsubTopic[128]; scanf("%127s", pubsubTopic); - WAKU_CALL( waku_relay_subscribe(&ctx, + WAKU_CALL( waku_relay_subscribe(ctx, pubsubTopic, event_handler, userData) ); @@ -262,21 +251,17 @@ void handle_user_input() { printf("e.g.: /ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\n"); char peerAddr[512]; scanf("%511s", peerAddr); - WAKU_CALL(waku_connect(&ctx, peerAddr, 10000 /* timeoutMs */, event_handler, userData)); + WAKU_CALL(waku_connect(ctx, peerAddr, 10000 /* timeoutMs */, event_handler, userData)); show_main_menu(); break; case PUBLISH_MESSAGE_MENU: { - printf("Indicate the Pubsubtopic:\n"); - char pubsubTopic[128]; - scanf("%127s", pubsubTopic); - - printf("Type the message tp publish:\n"); + printf("Type the message to publish:\n"); char msg[1024]; scanf("%1023s", msg); - publish_message(pubsubTopic, msg); + publish_message(msg); show_main_menu(); } @@ -311,24 +296,24 @@ int main(int argc, char** argv) { char jsonConfig[5000]; snprintf(jsonConfig, 5000, "{ \ + \"clusterId\": 16, \ + \"shards\": [ 1, 32, 64, 128, 256 ], \ \"listenAddress\": \"%s\", \ \"tcpPort\": %d, \ - \"nodekey\": \"%s\", \ \"relay\": %s, \ \"store\": %s, \ \"storeMessageDbUrl\": \"%s\", \ \"storeMessageRetentionPolicy\": \"%s\", \ \"storeMaxNumDbConnections\": %d , \ - \"logLevel\": \"DEBUG\", \ + \"logLevel\": \"FATAL\", \ \"discv5Discovery\": true, \ \"discv5BootstrapNodes\": \ [\"enr:-QESuEB4Dchgjn7gfAvwB00CxTA-nGiyk-aALI-H4dYSZD3rUk7bZHmP8d2U6xDiQ2vZffpo45Jp7zKNdnwDUx6g4o6XAYJpZIJ2NIJpcIRA4VDAim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOvD3S3jUNICsrOILlmhENiWAMmMVlAl6-Q8wRB7hidY4N0Y3CCdl-DdWRwgiMohXdha3UyDw\", \"enr:-QEkuEBIkb8q8_mrorHndoXH9t5N6ZfD-jehQCrYeoJDPHqT0l0wyaONa2-piRQsi3oVKAzDShDVeoQhy0uwN1xbZfPZAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQKnGt-GSgqPSf3IAPM7bFgTlpczpMZZLF3geeoNNsxzSoN0Y3CCdl-DdWRwgiMohXdha3UyDw\"], \ \"discv5UdpPort\": 9999, \ - \"dnsDiscoveryUrl\": \"enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im\", \ + \"dnsDiscoveryUrl\": \"enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.prod.status.nodes.status.im\", \ \"dnsDiscoveryNameServers\": [\"8.8.8.8\", \"1.0.0.1\"] \ }", cfgNode.host, cfgNode.port, - cfgNode.key, cfgNode.relay ? "true":"false", cfgNode.store ? "true":"false", cfgNode.storeDbUrl, @@ -351,14 +336,6 @@ int main(int argc, char** argv) { WAKU_CALL( waku_listen_addresses(ctx, event_handler, userData) ); - printf("Establishing connection with: %s\n", cfgNode.peers); - - WAKU_CALL( waku_connect(ctx, - cfgNode.peers, - 10000 /* timeoutMs */, - event_handler, - userData) ); - WAKU_CALL( waku_relay_subscribe(ctx, "/waku/2/rs/0/0", event_handler, diff --git a/examples/cpp/README.md b/examples/cpp/README.md new file mode 100644 index 000000000..fa8d246e0 --- /dev/null +++ b/examples/cpp/README.md @@ -0,0 +1,18 @@ +## App description +This is a very simple example that shows how to invoke libwaku functions from a C++ program. + +## Build +1. Open terminal +2. cd to nwaku root folder +3. make cppwaku_example -j8 + +This will create libwaku.so and cppwaku_example binary within the build folder. + +## Run +1. Open terminal +2. cd to nwaku root folder +3. export LD_LIBRARY_PATH=build +4. `./build/cppwaku_example --host=0.0.0.0 --port=60001` + +Use `./build/cppwaku_example --help` to see some other options. + diff --git a/examples/cpp/waku.cpp b/examples/cpp/waku.cpp index 4b601c492..c47877d02 100644 --- a/examples/cpp/waku.cpp +++ b/examples/cpp/waku.cpp @@ -16,12 +16,34 @@ #include "base64.h" #include "../../library/libwaku.h" +// Shared synchronization variables +pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_cond_t cond = PTHREAD_COND_INITIALIZER; +int callback_executed = 0; + +void waitForCallback() { + pthread_mutex_lock(&mutex); + while (!callback_executed) { + pthread_cond_wait(&cond, &mutex); + } + callback_executed = 0; + pthread_mutex_unlock(&mutex); +} + +void signal_cond() { + pthread_mutex_lock(&mutex); + callback_executed = 1; + pthread_cond_signal(&cond); + pthread_mutex_unlock(&mutex); +} + #define WAKU_CALL(call) \ do { \ int ret = call; \ if (ret != 0) { \ std::cout << "Failed the call to: " << #call << ". Code: " << ret << "\n"; \ } \ + waitForCallback(); \ } while (0) struct ConfigNode { @@ -78,6 +100,24 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { return 0; } +void event_handler(const char* msg, size_t len) { + printf("Receiving event: %s\n", msg); +} + +void handle_error(const char* msg, size_t len) { + printf("handle_error: %s\n", msg); + exit(1); +} + +template +auto cify(F&& f) { + static F fn = std::forward(f); + return [](int callerRet, const char* msg, size_t len, void* userData) { + signal_cond(); + return fn(msg, len); + }; +} + static struct argp argp = { options, parse_opt, args_doc, doc, 0, 0, 0 }; // Beginning of UI program logic @@ -98,7 +138,7 @@ void show_main_menu() { printf("\t3.) Publish a message\n"); } -void handle_user_input() { +void handle_user_input(void* ctx) { char cmd[1024]; memset(cmd, 0, 1024); int numRead = read(0, cmd, 1024); @@ -106,9 +146,6 @@ void handle_user_input() { return; } - int c; - while ( (c = getchar()) != '\n' && c != EOF ) { } - switch (atoi(cmd)) { case SUBSCRIBE_TOPIC_MENU: @@ -116,10 +153,14 @@ void handle_user_input() { printf("Indicate the Pubsubtopic to subscribe:\n"); char pubsubTopic[128]; scanf("%127s", pubsubTopic); - // if (!waku_relay_subscribe(pubsubTopic, &mResp)) { - // printf("Error subscribing to PubsubTopic: %s\n", mResp->data); - // } - // printf("Waku Relay subscription response: %s\n", mResp->data); + + WAKU_CALL( waku_relay_subscribe(ctx, + pubsubTopic, + cify([&](const char* msg, size_t len) { + event_handler(msg, len); + }), + nullptr) ); + printf("The subscription went well\n"); show_main_menu(); } @@ -130,41 +171,51 @@ void handle_user_input() { printf("e.g.: /ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\n"); char peerAddr[512]; scanf("%511s", peerAddr); - // if (!waku_connect(peerAddr, 10000 /* timeoutMs */, &mResp)) { - // printf("Couldn't connect to the remote peer: %s\n", mResp->data); - // } + WAKU_CALL( waku_connect(ctx, + peerAddr, + 10000 /* timeoutMs */, + cify([&](const char* msg, size_t len) { + event_handler(msg, len); + }), + nullptr)); show_main_menu(); break; case PUBLISH_MESSAGE_MENU: { - printf("Indicate the Pubsubtopic:\n"); - char pubsubTopic[128]; - scanf("%127s", pubsubTopic); - - printf("Type the message tp publish:\n"); + printf("Type the message to publish:\n"); char msg[1024]; scanf("%1023s", msg); - char jsonWakuMsg[1024]; + char jsonWakuMsg[2048]; std::vector msgPayload; b64_encode(msg, strlen(msg), msgPayload); - // waku_content_topic("appName", - // 1, - // "contentTopicName", - // "encoding", - // &mResp); + std::string contentTopic; + waku_content_topic(ctx, + "appName", + 1, + "contentTopicName", + "encoding", + cify([&contentTopic](const char* msg, size_t len) { + contentTopic = msg; + }), + nullptr); - // snprintf(jsonWakuMsg, - // 1024, - // "{\"payload\":\"%s\",\"content_topic\":\"%s\"}", - // msgPayload, mResp->data); + snprintf(jsonWakuMsg, + 2048, + "{\"payload\":\"%s\",\"contentTopic\":\"%s\"}", + msgPayload.data(), contentTopic.c_str()); - // free(msgPayload); + WAKU_CALL( waku_relay_publish(ctx, + "/waku/2/rs/16/32", + jsonWakuMsg, + 10000 /*timeout ms*/, + cify([&](const char* msg, size_t len) { + event_handler(msg, len); + }), + nullptr) ); - // waku_relay_publish(pubsubTopic, jsonWakuMsg, 10000 /*timeout ms*/, &mResp); - // printf("waku relay response [%s]\n", mResp->data); show_main_menu(); } break; @@ -181,23 +232,6 @@ void show_help_and_exit() { exit(1); } -void event_handler(const char* msg, size_t len) { - printf("Receiving message %s\n", msg); -} - -void handle_error(const char* msg, size_t len) { - printf("Error: %s\n", msg); - exit(1); -} - -template -auto cify(F&& f) { - static F fn = std::forward(f); - return [](const char* msg, size_t len) { - return fn(msg, len); - }; -} - int main(int argc, char** argv) { struct ConfigNode cfgNode; // default values @@ -212,60 +246,86 @@ int main(int argc, char** argv) { show_help_and_exit(); } - char jsonConfig[1024]; - snprintf(jsonConfig, 1024, "{ \ + char jsonConfig[2048]; + snprintf(jsonConfig, 2048, "{ \ \"host\": \"%s\", \ \"port\": %d, \ - \"key\": \"%s\", \ - \"relay\": %s, \ - \"logLevel\": \"DEBUG\" \ + \"relay\": true, \ + \"clusterId\": 16, \ + \"shards\": [ 1, 32, 64, 128, 256 ], \ + \"logLevel\": \"FATAL\", \ + \"discv5Discovery\": true, \ + \"discv5BootstrapNodes\": \ + [\"enr:-QESuEB4Dchgjn7gfAvwB00CxTA-nGiyk-aALI-H4dYSZD3rUk7bZHmP8d2U6xDiQ2vZffpo45Jp7zKNdnwDUx6g4o6XAYJpZIJ2NIJpcIRA4VDAim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOvD3S3jUNICsrOILlmhENiWAMmMVlAl6-Q8wRB7hidY4N0Y3CCdl-DdWRwgiMohXdha3UyDw\", \"enr:-QEkuEBIkb8q8_mrorHndoXH9t5N6ZfD-jehQCrYeoJDPHqT0l0wyaONa2-piRQsi3oVKAzDShDVeoQhy0uwN1xbZfPZAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQKnGt-GSgqPSf3IAPM7bFgTlpczpMZZLF3geeoNNsxzSoN0Y3CCdl-DdWRwgiMohXdha3UyDw\"], \ + \"discv5UdpPort\": 9999, \ + \"dnsDiscoveryUrl\": \"enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.prod.status.nodes.status.im\", \ + \"dnsDiscoveryNameServers\": [\"8.8.8.8\", \"1.0.0.1\"] \ }", cfgNode.host, - cfgNode.port, - cfgNode.key, - cfgNode.relay ? "true":"false"); + cfgNode.port); - WAKU_CALL(waku_new(jsonConfig, cify([](const char* msg, size_t len) { - std::cout << "Error: " << msg << std::endl; - exit(1); - }))); + void* ctx = + waku_new(jsonConfig, + cify([](const char* msg, size_t len) { + std::cout << "waku_new feedback: " << msg << std::endl; + } + ), + nullptr + ); + waitForCallback(); // example on how to retrieve a value from the `libwaku` callback. std::string defaultPubsubTopic; - WAKU_CALL(waku_default_pubsub_topic(cify([&defaultPubsubTopic](const char* msg, size_t len) { - defaultPubsubTopic = msg; - }))); + WAKU_CALL( + waku_default_pubsub_topic( + ctx, + cify([&defaultPubsubTopic](const char* msg, size_t len) { + defaultPubsubTopic = msg; + } + ), + nullptr)); std::cout << "Default pubsub topic: " << defaultPubsubTopic << std::endl; - WAKU_CALL(waku_version(cify([&](const char* msg, size_t len) { - std::cout << "Git Version: " << msg << std::endl; - }))); + WAKU_CALL(waku_version(ctx, + cify([&](const char* msg, size_t len) { + std::cout << "Git Version: " << msg << std::endl; + }), + nullptr)); printf("Bind addr: %s:%u\n", cfgNode.host, cfgNode.port); printf("Waku Relay enabled: %s\n", cfgNode.relay == 1 ? "YES": "NO"); std::string pubsubTopic; - WAKU_CALL(waku_pubsub_topic("example", cify([&](const char* msg, size_t len) { - pubsubTopic = msg; - }))); + WAKU_CALL(waku_pubsub_topic(ctx, + "example", + cify([&](const char* msg, size_t len) { + pubsubTopic = msg; + }), + nullptr)); std::cout << "Custom pubsub topic: " << pubsubTopic << std::endl; - waku_set_event_callback(event_handler); - waku_start(); + waku_set_event_callback(ctx, + cify([&](const char* msg, size_t len) { + event_handler(msg, len); + }), + nullptr); - WAKU_CALL( waku_connect(cfgNode.peers, - 10000 /* timeoutMs */, - handle_error) ); + WAKU_CALL( waku_start(ctx, + cify([&](const char* msg, size_t len) { + event_handler(msg, len); + }), + nullptr)); - WAKU_CALL( waku_relay_subscribe(defaultPubsubTopic.c_str(), - handle_error) ); - - std::cout << "Establishing connection with: " << cfgNode.peers << std::endl; - WAKU_CALL(waku_connect(cfgNode.peers, 10000 /* timeoutMs */, handle_error)); + WAKU_CALL( waku_relay_subscribe(ctx, + defaultPubsubTopic.c_str(), + cify([&](const char* msg, size_t len) { + event_handler(msg, len); + }), + nullptr) ); show_main_menu(); while(1) { - handle_user_input(); + handle_user_input(ctx); } } diff --git a/library/libwaku.nim b/library/libwaku.nim index 23600aca4..050395bc5 100644 --- a/library/libwaku.nim +++ b/library/libwaku.nim @@ -42,7 +42,8 @@ import template checkLibwakuParams*( ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer ) = - ctx[].userData = userData + if not isNil(ctx): + ctx[].userData = userData if isNil(callback): return RET_MISSING_CALLBACK From 001456cda073c3065697af2494d091d6870e7330 Mon Sep 17 00:00:00 2001 From: gabrielmer <101006718+gabrielmer@users.noreply.github.com> Date: Fri, 11 Apr 2025 12:07:35 +0300 Subject: [PATCH 20/48] chore: expect camelCase JSON for libwaku store queries (#3366) --- .../requests/protocols/store_request.nim | 33 +++++++++---------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/library/waku_thread/inter_thread_communication/requests/protocols/store_request.nim b/library/waku_thread/inter_thread_communication/requests/protocols/store_request.nim index aa4071fcf..57786a581 100644 --- a/library/waku_thread/inter_thread_communication/requests/protocols/store_request.nim +++ b/library/waku_thread/inter_thread_communication/requests/protocols/store_request.nim @@ -24,50 +24,49 @@ func fromJsonNode( T: type StoreRequest, jsonContent: JsonNode ): Result[StoreQueryRequest, string] = var contentTopics: seq[string] - if jsonContent.contains("content_topics"): + if jsonContent.contains("contentTopics"): contentTopics = collect(newSeq): - for cTopic in jsonContent["content_topics"].getElems(): + for cTopic in jsonContent["contentTopics"].getElems(): cTopic.getStr() var msgHashes: seq[WakuMessageHash] - if jsonContent.contains("message_hashes"): - for hashJsonObj in jsonContent["message_hashes"].getElems(): + if jsonContent.contains("messageHashes"): + for hashJsonObj in jsonContent["messageHashes"].getElems(): let hash = hashJsonObj.getStr().hexToHash().valueOr: return err("Failed converting message hash hex string to bytes: " & error) msgHashes.add(hash) let pubsubTopic = - if jsonContent.contains("pubsub_topic"): - some(jsonContent["pubsub_topic"].getStr()) + if jsonContent.contains("pubsubTopic"): + some(jsonContent["pubsubTopic"].getStr()) else: none(string) let paginationCursor = - if jsonContent.contains("pagination_cursor"): - let hash = jsonContent["pagination_cursor"].getStr().hexToHash().valueOr: - return - err("Failed converting pagination_cursor hex string to bytes: " & error) + if jsonContent.contains("paginationCursor"): + let hash = jsonContent["paginationCursor"].getStr().hexToHash().valueOr: + return err("Failed converting paginationCursor hex string to bytes: " & error) some(hash) else: none(WakuMessageHash) - let paginationForwardBool = jsonContent["pagination_forward"].getBool() + let paginationForwardBool = jsonContent["paginationForward"].getBool() let paginationForward = if paginationForwardBool: PagingDirection.FORWARD else: PagingDirection.BACKWARD let paginationLimit = - if jsonContent.contains("pagination_limit"): - some(uint64(jsonContent["pagination_limit"].getInt())) + if jsonContent.contains("paginationLimit"): + some(uint64(jsonContent["paginationLimit"].getInt())) else: none(uint64) - let startTime = ?jsonContent.getProtoInt64("time_start") - let endTime = ?jsonContent.getProtoInt64("time_end") + let startTime = ?jsonContent.getProtoInt64("timeStart") + let endTime = ?jsonContent.getProtoInt64("timeEnd") return ok( StoreQueryRequest( - requestId: jsonContent["request_id"].getStr(), - includeData: jsonContent["include_data"].getBool(), + requestId: jsonContent["requestId"].getStr(), + includeData: jsonContent["includeData"].getBool(), pubsubTopic: pubsubTopic, contentTopics: contentTopics, startTime: startTime, From ed0474ade326191c4e1c9cebd94171adbe605d0c Mon Sep 17 00:00:00 2001 From: Miran Date: Fri, 11 Apr 2025 17:20:23 +0200 Subject: [PATCH 21/48] chore: fix unused and deprecated imports (#3368) --- tests/common/test_base64_codec.nim | 2 +- tests/common/test_confutils_envvar.nim | 2 +- tests/common/test_enr_builder.nim | 2 +- tests/common/test_parse_size.nim | 2 +- tests/common/test_ratelimit_setting.nim | 2 +- tests/common/test_requestratelimiter.nim | 2 +- tests/common/test_sqlite_migrations.nim | 2 +- .../incentivization/test_poc_eligibility.nim | 12 ++--------- tests/incentivization/test_poc_reputation.nim | 15 ++----------- tests/node/peer_manager/peer_store/utils.nim | 2 +- tests/node/test_wakunode_filter.nim | 1 - tests/node/test_wakunode_legacy_lightpush.nim | 13 +++--------- tests/node/test_wakunode_legacy_store.nim | 5 +---- tests/node/test_wakunode_lightpush.nim | 19 ++++------------- tests/node/test_wakunode_peer_exchange.nim | 10 ++------- tests/node/test_wakunode_peer_manager.nim | 7 ++----- tests/node/test_wakunode_store.nim | 7 ++----- tests/test_message_cache.nim | 2 +- tests/test_peer_manager.nim | 3 +-- tests/test_peer_store_extended.nim | 8 ++----- tests/test_relay_peer_exchange.nim | 2 -- tests/test_waku_dnsdisc.nim | 3 ++- tests/test_waku_enr.nim | 2 +- tests/test_waku_keepalive.nim | 1 - tests/test_waku_noise_sessions.nim | 2 +- tests/test_wakunode.nim | 4 +--- tests/testlib/simple_mock.nim | 4 ++-- tests/testlib/wakucore.nim | 3 ++- tests/testlib/wakunode.nim | 2 +- tests/waku_archive/archive_utils.nim | 3 +-- tests/waku_archive/test_driver_queue.nim | 2 +- .../waku_archive/test_driver_queue_index.nim | 2 +- .../test_driver_queue_pagination.nim | 1 - tests/waku_archive/test_driver_sqlite.nim | 7 +------ .../waku_archive/test_driver_sqlite_query.nim | 8 +------ tests/waku_archive/test_retention_policy.nim | 4 +--- tests/waku_archive/test_waku_archive.nim | 2 -- .../waku_archive_legacy/test_driver_queue.nim | 2 +- .../test_driver_sqlite.nim | 2 -- .../test_driver_sqlite_query.nim | 2 -- .../waku_archive_legacy/test_waku_archive.nim | 10 +-------- tests/waku_core/test_peers.nim | 2 +- tests/waku_discv5/test_waku_discv5.nim | 2 +- tests/waku_discv5/utils.nim | 1 - tests/waku_enr/utils.nim | 6 ++---- tests/waku_filter_v2/test_waku_client.nim | 9 +------- .../test_waku_filter_dos_protection.nim | 19 ++++++----------- tests/waku_filter_v2/waku_filter_utils.nim | 2 +- tests/waku_lightpush/lightpush_utils.nim | 2 +- tests/waku_lightpush/test_client.nim | 9 ++------ tests/waku_lightpush/test_ratelimit.nim | 21 ++++--------------- .../waku_lightpush_legacy/lightpush_utils.nim | 3 +-- tests/waku_lightpush_legacy/test_client.nim | 10 ++------- .../waku_lightpush_legacy/test_ratelimit.nim | 16 +++----------- tests/waku_peer_exchange/test_protocol.nim | 10 +++------ tests/waku_relay/test_message_id.nim | 5 +++-- tests/waku_relay/test_protocol.nim | 7 +++---- tests/waku_relay/utils.nim | 4 +--- .../test_rln_group_manager_onchain.nim | 13 +++++------- .../test_rln_group_manager_static.nim | 2 +- tests/waku_rln_relay/test_rln_serde.nim | 2 +- tests/waku_rln_relay/test_waku_rln_relay.nim | 1 - tests/waku_rln_relay/utils.nim | 2 +- tests/waku_rln_relay/utils_onchain.nim | 5 +++-- tests/waku_store/store_utils.nim | 5 ++--- tests/waku_store/test_client.nim | 4 ++-- tests/waku_store/test_waku_store.nim | 4 ++-- tests/waku_store/test_wakunode_store.nim | 2 -- tests/waku_store_legacy/store_utils.nim | 2 +- tests/waku_store_legacy/test_client.nim | 4 ++-- tests/waku_store_legacy/test_rpc_codec.nim | 1 - tests/waku_store_legacy/test_waku_store.nim | 4 ++-- .../waku_store_legacy/test_wakunode_store.nim | 1 - tests/waku_store_sync/sync_utils.nim | 2 +- tests/waku_store_sync/test_protocol.nim | 6 +----- tests/waku_store_sync/test_storage.nim | 1 - tests/wakunode_rest/test_rest_admin.nim | 2 +- tests/wakunode_rest/test_rest_cors.nim | 3 --- .../wakunode_rest/test_rest_debug_serdes.nim | 2 +- tests/wakunode_rest/test_rest_filter.nim | 1 - tests/wakunode_rest/test_rest_health.nim | 1 - .../test_rest_lightpush_legacy.nim | 3 +-- .../wakunode_rest/test_rest_relay_serdes.nim | 2 +- tests/wakunode_rest/test_rest_serdes.nim | 2 +- waku.nimble | 1 + waku/common/databases/db_postgres/dbconn.nim | 3 +-- waku/factory/builder.nim | 1 - waku/factory/waku.nim | 1 - waku/incentivization/common.nim | 2 +- waku/incentivization/eligibility_manager.nim | 2 +- waku/incentivization/rpc_codec.nim | 2 +- waku/node/waku_node.nim | 5 +---- waku/waku_api/rest/admin/client.nim | 9 ++------ waku/waku_api/rest/debug/client.nim | 2 +- waku/waku_api/rest/filter/client.nim | 10 +-------- waku/waku_api/rest/health/client.nim | 5 ++--- .../waku_api/rest/legacy_lightpush/client.nim | 12 ++--------- waku/waku_api/rest/relay/client.nim | 10 ++------- .../driver/queue_driver/index.nim | 1 - waku/waku_filter_v2/subscriptions.nim | 7 +------ waku/waku_lightpush/callbacks.nim | 3 +-- waku/waku_lightpush/common.nim | 2 -- waku/waku_lightpush/self_req_handler.nim | 11 ++-------- waku/waku_noise/noise_types.nim | 2 +- waku/waku_noise/noise_utils.nim | 5 +++-- 105 files changed, 136 insertions(+), 351 deletions(-) diff --git a/tests/common/test_base64_codec.nim b/tests/common/test_base64_codec.nim index fd3b23c76..1c2d04c45 100644 --- a/tests/common/test_base64_codec.nim +++ b/tests/common/test_base64_codec.nim @@ -1,6 +1,6 @@ {.used.} -import std/strutils, stew/[results, byteutils], testutils/unittests +import std/strutils, results, stew/byteutils, testutils/unittests import waku/common/base64 suite "Waku Common - stew base64 wrapper": diff --git a/tests/common/test_confutils_envvar.nim b/tests/common/test_confutils_envvar.nim index 6916f52a8..676a35ae1 100644 --- a/tests/common/test_confutils_envvar.nim +++ b/tests/common/test_confutils_envvar.nim @@ -2,7 +2,7 @@ import std/[os, options], - stew/results, + results, stew/shims/net as stewNet, testutils/unittests, confutils, diff --git a/tests/common/test_enr_builder.nim b/tests/common/test_enr_builder.nim index b95828bb4..9fe8f6807 100644 --- a/tests/common/test_enr_builder.nim +++ b/tests/common/test_enr_builder.nim @@ -1,6 +1,6 @@ {.used.} -import std/options, stew/results, stew/shims/net, testutils/unittests +import std/options, results, stew/shims/net, testutils/unittests import waku/common/enr, ../testlib/wakucore suite "nim-eth ENR - builder and typed record": diff --git a/tests/common/test_parse_size.nim b/tests/common/test_parse_size.nim index dd1f2c7af..009cb9637 100644 --- a/tests/common/test_parse_size.nim +++ b/tests/common/test_parse_size.nim @@ -1,6 +1,6 @@ {.used.} -import testutils/unittests, stew/results +import testutils/unittests, results import waku/common/utils/parse_size_units suite "Size serialization test": diff --git a/tests/common/test_ratelimit_setting.nim b/tests/common/test_ratelimit_setting.nim index 6f6ac8d38..97d69e06a 100644 --- a/tests/common/test_ratelimit_setting.nim +++ b/tests/common/test_ratelimit_setting.nim @@ -10,7 +10,7 @@ import testutils/unittests import chronos, libp2p/stream/connection -import std/[sequtils, options, tables] +import std/[options, tables] import ../../waku/common/rate_limit/request_limiter import ../../waku/common/rate_limit/timed_map diff --git a/tests/common/test_requestratelimiter.nim b/tests/common/test_requestratelimiter.nim index 0b494c1be..be910b38e 100644 --- a/tests/common/test_requestratelimiter.nim +++ b/tests/common/test_requestratelimiter.nim @@ -10,7 +10,7 @@ import testutils/unittests import chronos, libp2p/stream/connection -import std/[sequtils, options] +import std/options import ../../waku/common/rate_limit/request_limiter import ../../waku/common/rate_limit/timed_map diff --git a/tests/common/test_sqlite_migrations.nim b/tests/common/test_sqlite_migrations.nim index 58708ce21..9e67fb9c8 100644 --- a/tests/common/test_sqlite_migrations.nim +++ b/tests/common/test_sqlite_migrations.nim @@ -1,6 +1,6 @@ {.used.} -import std/[strutils, os], stew/results, testutils/unittests +import std/[strutils, os], results, testutils/unittests import waku/common/databases/db_sqlite {.all.}, ../waku_archive/archive_utils template sourceDir(): string = diff --git a/tests/incentivization/test_poc_eligibility.nim b/tests/incentivization/test_poc_eligibility.nim index 7490c2304..be9018898 100644 --- a/tests/incentivization/test_poc_eligibility.nim +++ b/tests/incentivization/test_poc_eligibility.nim @@ -1,17 +1,9 @@ {.used.} -import - std/options, - testutils/unittests, - chronos, - web3, - stew/byteutils, - stint, - strutils, - tests/testlib/testasync +import std/options, testutils/unittests, chronos, web3, stint, tests/testlib/testasync import - waku/[node/peer_manager, waku_core], + waku/node/peer_manager, waku/incentivization/[rpc, eligibility_manager], ../waku_rln_relay/[utils_onchain, utils] diff --git a/tests/incentivization/test_poc_reputation.nim b/tests/incentivization/test_poc_reputation.nim index d601d1e24..0547b9744 100644 --- a/tests/incentivization/test_poc_reputation.nim +++ b/tests/incentivization/test_poc_reputation.nim @@ -1,17 +1,6 @@ -import - std/options, - testutils/unittests, - chronos, - web3, - stew/byteutils, - stint, - strutils, - tests/testlib/testasync +import std/options, testutils/unittests, chronos, web3 -import - waku/[node/peer_manager, waku_core], - waku/incentivization/[rpc, reputation_manager], - waku/waku_lightpush_legacy/rpc +import waku/incentivization/reputation_manager, waku/waku_lightpush_legacy/rpc suite "Waku Incentivization PoC Reputation": var manager {.threadvar.}: ReputationManager diff --git a/tests/node/peer_manager/peer_store/utils.nim b/tests/node/peer_manager/peer_store/utils.nim index b087dc471..891c5fdab 100644 --- a/tests/node/peer_manager/peer_store/utils.nim +++ b/tests/node/peer_manager/peer_store/utils.nim @@ -1,4 +1,4 @@ -import std/options, stew/results, libp2p/peerstore +import std/options, results import waku/node/peer_manager/[waku_peer_store, peer_store/waku_peer_storage], diff --git a/tests/node/test_wakunode_filter.nim b/tests/node/test_wakunode_filter.nim index c9ea12f17..83c486a7e 100644 --- a/tests/node/test_wakunode_filter.nim +++ b/tests/node/test_wakunode_filter.nim @@ -6,7 +6,6 @@ import testutils/unittests, chronos, chronicles, - os, libp2p/[peerstore, crypto/crypto] import diff --git a/tests/node/test_wakunode_legacy_lightpush.nim b/tests/node/test_wakunode_legacy_lightpush.nim index ab23921a0..4ff9c7f00 100644 --- a/tests/node/test_wakunode_legacy_lightpush.nim +++ b/tests/node/test_wakunode_legacy_lightpush.nim @@ -1,31 +1,24 @@ {.used.} import - std/[options, tables, sequtils, tempfiles, strutils], + std/[options, tempfiles], stew/shims/net as stewNet, testutils/unittests, chronos, - chronicles, std/strformat, - os, - libp2p/[peerstore, crypto/crypto] + libp2p/crypto/crypto import waku/[ waku_core, node/peer_manager, node/waku_node, - waku_filter_v2, - waku_filter_v2/client, - waku_filter_v2/subscriptions, waku_lightpush_legacy, waku_lightpush_legacy/common, - waku_lightpush_legacy/client, waku_lightpush_legacy/protocol_metrics, - waku_lightpush_legacy/rpc, waku_rln_relay, ], - ../testlib/[assertions, common, wakucore, wakunode, testasync, futures, testutils], + ../testlib/[wakucore, wakunode, testasync, futures], ../resources/payloads suite "Waku Legacy Lightpush - End To End": diff --git a/tests/node/test_wakunode_legacy_store.nim b/tests/node/test_wakunode_legacy_store.nim index 5b0409d86..8ede3f6f2 100644 --- a/tests/node/test_wakunode_legacy_store.nim +++ b/tests/node/test_wakunode_legacy_store.nim @@ -14,14 +14,11 @@ import node/peer_manager, waku_core, waku_store_legacy, - waku_store_legacy/client, waku_archive_legacy, - waku_archive_legacy/driver/sqlite_driver, - common/databases/db_sqlite, ], ../waku_store_legacy/store_utils, ../waku_archive_legacy/archive_utils, - ../testlib/[common, wakucore, wakunode, testasync, futures, testutils] + ../testlib/[wakucore, wakunode, testasync, testutils] suite "Waku Store - End to End - Sorted Archive": var pubsubTopic {.threadvar.}: PubsubTopic diff --git a/tests/node/test_wakunode_lightpush.nim b/tests/node/test_wakunode_lightpush.nim index 865fb38ff..2e785e368 100644 --- a/tests/node/test_wakunode_lightpush.nim +++ b/tests/node/test_wakunode_lightpush.nim @@ -1,27 +1,16 @@ {.used.} import - std/[options, tables, sequtils, tempfiles, strutils], + std/[options, tempfiles], stew/shims/net as stewNet, testutils/unittests, chronos, - chronicles, std/strformat, - os, - libp2p/[peerstore, crypto/crypto] + libp2p/crypto/crypto import - waku/[ - waku_core, - node/peer_manager, - node/waku_node, - waku_filter_v2, - waku_filter_v2/client, - waku_filter_v2/subscriptions, - waku_lightpush, - waku_rln_relay, - ], - ../testlib/[assertions, common, wakucore, wakunode, testasync, futures, testutils], + waku/[waku_core, node/peer_manager, node/waku_node, waku_lightpush, waku_rln_relay], + ../testlib/[wakucore, wakunode, testasync, futures], ../resources/payloads const PublishedToOnePeer = 1 diff --git a/tests/node/test_wakunode_peer_exchange.nim b/tests/node/test_wakunode_peer_exchange.nim index afd808a2c..26837869d 100644 --- a/tests/node/test_wakunode_peer_exchange.nim +++ b/tests/node/test_wakunode_peer_exchange.nim @@ -13,14 +13,8 @@ import eth/p2p/discoveryv5/enr import - waku/[ - waku_node, - discovery/waku_discv5, - waku_peer_exchange, - node/peer_manager, - waku_relay/protocol, - waku_core, - ], + waku/ + [waku_node, discovery/waku_discv5, waku_peer_exchange, node/peer_manager, waku_core], ../waku_peer_exchange/utils, ../testlib/[wakucore, wakunode, testasync] diff --git a/tests/node/test_wakunode_peer_manager.nim b/tests/node/test_wakunode_peer_manager.nim index 6b8fb2fa6..e37b3e108 100644 --- a/tests/node/test_wakunode_peer_manager.nim +++ b/tests/node/test_wakunode_peer_manager.nim @@ -18,18 +18,15 @@ import waku_core, node/peer_manager, node/waku_node, - waku_enr/sharding, discovery/waku_discv5, waku_filter_v2/common, waku_relay/protocol, ], - ../testlib/ - [wakucore, wakunode, testasync, testutils, assertions, comparisons, futures], + ../testlib/[wakucore, wakunode, testasync, testutils, comparisons], ../waku_enr/utils, ../waku_archive/archive_utils, ../waku_discv5/utils, - ./peer_manager/peer_store/utils, - ./utils + ./peer_manager/peer_store/utils const DEFAULT_PROTOCOLS: seq[string] = @["/ipfs/id/1.0.0", "/libp2p/autonat/1.0.0", "/libp2p/circuit/relay/0.2.0/hop"] diff --git a/tests/node/test_wakunode_store.nim b/tests/node/test_wakunode_store.nim index 49c24c6d8..622322d92 100644 --- a/tests/node/test_wakunode_store.nim +++ b/tests/node/test_wakunode_store.nim @@ -1,7 +1,7 @@ {.used.} import - std/[options, sequtils, algorithm, sets], + std/[options, sequtils, sets], stew/shims/net as stewNet, testutils/unittests, chronos, @@ -15,14 +15,11 @@ import waku_core, waku_core/message/digest, waku_store, - waku_store/client, waku_archive, - waku_archive/driver/sqlite_driver, - common/databases/db_sqlite, ], ../waku_store/store_utils, ../waku_archive/archive_utils, - ../testlib/[common, wakucore, wakunode, testasync, futures, testutils] + ../testlib/[wakucore, wakunode, testasync, testutils] suite "Waku Store - End to End - Sorted Archive": var pubsubTopic {.threadvar.}: PubsubTopic diff --git a/tests/test_message_cache.nim b/tests/test_message_cache.nim index b6bb91b86..cd2e882c1 100644 --- a/tests/test_message_cache.nim +++ b/tests/test_message_cache.nim @@ -1,6 +1,6 @@ {.used.} -import std/[sets, random], stew/[results, byteutils], testutils/unittests +import std/[sets, random], results, stew/byteutils, testutils/unittests import waku/waku_core, waku/waku_api/message_cache, ./testlib/wakucore randomize() diff --git a/tests/test_peer_manager.nim b/tests/test_peer_manager.nim index 4ca08e46f..d79c6b991 100644 --- a/tests/test_peer_manager.nim +++ b/tests/test_peer_manager.nim @@ -1,7 +1,7 @@ {.used.} import - std/[options, sequtils, times, sugar, net], + std/[sequtils, times, sugar, net], stew/shims/net as stewNet, testutils/unittests, chronos, @@ -27,7 +27,6 @@ import waku_relay/protocol, waku_filter_v2/common, waku_store/common, - waku_lightpush/common, waku_peer_exchange, waku_metadata, ], diff --git a/tests/test_peer_store_extended.nim b/tests/test_peer_store_extended.nim index aa5947181..16926c7c2 100644 --- a/tests/test_peer_store_extended.nim +++ b/tests/test_peer_store_extended.nim @@ -9,12 +9,8 @@ import libp2p/multiaddress, testutils/unittests import - waku/[ - node/peer_manager/peer_manager, - node/peer_manager/waku_peer_store, - waku_node, - waku_core/peers, - ], + waku/ + [node/peer_manager/peer_manager, node/peer_manager/waku_peer_store, waku_core/peers], ./testlib/wakucore suite "Extended nim-libp2p Peer Store": diff --git a/tests/test_relay_peer_exchange.nim b/tests/test_relay_peer_exchange.nim index 0be3c9193..e950cb015 100644 --- a/tests/test_relay_peer_exchange.nim +++ b/tests/test_relay_peer_exchange.nim @@ -4,10 +4,8 @@ import std/[sequtils, options], stew/shims/net, testutils/unittests, - chronicles, chronos, libp2p/peerid, - libp2p/crypto/crypto, libp2p/protocols/pubsub/gossipsub import waku/waku_core, waku/waku_node, ./testlib/wakucore, ./testlib/wakunode diff --git a/tests/test_waku_dnsdisc.nim b/tests/test_waku_dnsdisc.nim index cf0fd4007..fe29627d4 100644 --- a/tests/test_waku_dnsdisc.nim +++ b/tests/test_waku_dnsdisc.nim @@ -3,7 +3,8 @@ import std/[sequtils, tables], stew/shims/net, - stew/[base32, results], + results, + stew/base32, testutils/unittests, chronicles, chronos, diff --git a/tests/test_waku_enr.nim b/tests/test_waku_enr.nim index b6571b09f..2ffff5e57 100644 --- a/tests/test_waku_enr.nim +++ b/tests/test_waku_enr.nim @@ -1,6 +1,6 @@ {.used.} -import std/[options, sequtils], stew/results, testutils/unittests +import std/[options, sequtils], results, testutils/unittests import waku/waku_core, waku/waku_enr, ./testlib/wakucore suite "Waku ENR - Capabilities bitfield": diff --git a/tests/test_waku_keepalive.nim b/tests/test_waku_keepalive.nim index aebee13dc..c961773e5 100644 --- a/tests/test_waku_keepalive.nim +++ b/tests/test_waku_keepalive.nim @@ -1,7 +1,6 @@ {.used.} import - std/options, stew/shims/net as stewNet, testutils/unittests, chronos, diff --git a/tests/test_waku_noise_sessions.nim b/tests/test_waku_noise_sessions.nim index a02407e63..543653982 100644 --- a/tests/test_waku_noise_sessions.nim +++ b/tests/test_waku_noise_sessions.nim @@ -1,6 +1,6 @@ {.used.} -import std/tables, stew/[results, byteutils], testutils/unittests +import std/tables, results, stew/byteutils, testutils/unittests import waku/[ common/protobuf, diff --git a/tests/test_wakunode.nim b/tests/test_wakunode.nim index 2213b7f8e..df4b442d6 100644 --- a/tests/test_wakunode.nim +++ b/tests/test_wakunode.nim @@ -17,9 +17,7 @@ import libp2p/nameresolving/mockresolver, eth/p2p/discoveryv5/enr import - waku/[waku_core, waku_node, node/peer_manager, waku_relay, waku_peer_exchange], - ./testlib/wakucore, - ./testlib/wakunode + waku/[waku_core, waku_node, node/peer_manager], ./testlib/wakucore, ./testlib/wakunode suite "WakuNode": asyncTest "Protocol matcher works as expected": diff --git a/tests/testlib/simple_mock.nim b/tests/testlib/simple_mock.nim index 234647d33..91ec19261 100644 --- a/tests/testlib/simple_mock.nim +++ b/tests/testlib/simple_mock.nim @@ -9,9 +9,9 @@ type Instr {.union.} = object proc mockImpl*(target, replacement: pointer) = # YOLO who needs alignment #doAssert (cast[ByteAddress](target) and ByteAddress(0x07)) == 0 - var page = cast[pointer](cast[ByteAddress](target) and (not 0xfff)) + var page = cast[pointer](cast[uint](target) and (not 0xfff)) doAssert mprotect(page, 4096, PROT_WRITE or PROT_EXEC) == 0 - let rel = cast[ByteAddress](replacement) - cast[ByteAddress](target) - 5 + let rel = cast[uint](replacement) - cast[uint](target) - 5 var instr = Instr( bytes: [ 0xe9.byte, diff --git a/tests/testlib/wakucore.nim b/tests/testlib/wakucore.nim index d18a87e7d..c68a69deb 100644 --- a/tests/testlib/wakucore.nim +++ b/tests/testlib/wakucore.nim @@ -1,6 +1,7 @@ import std/[options, times], - stew/[results, byteutils], + results, + stew/byteutils, stew/shims/net, chronos, libp2p/switch, diff --git a/tests/testlib/wakunode.nim b/tests/testlib/wakunode.nim index 1c9b8ec83..d1df39b6b 100644 --- a/tests/testlib/wakunode.nim +++ b/tests/testlib/wakunode.nim @@ -1,6 +1,6 @@ import std/options, - stew/results, + results, stew/shims/net, chronos, libp2p/switch, diff --git a/tests/waku_archive/archive_utils.nim b/tests/waku_archive/archive_utils.nim index 48e23f41d..498855075 100644 --- a/tests/waku_archive/archive_utils.nim +++ b/tests/waku_archive/archive_utils.nim @@ -1,13 +1,12 @@ {.used.} -import std/options, stew/results, chronos, libp2p/crypto/crypto +import std/options, results, chronos, libp2p/crypto/crypto import waku/[ node/peer_manager, waku_core, waku_archive, - waku_archive/common, waku_archive/driver/sqlite_driver, waku_archive/driver/sqlite_driver/migrations, common/databases/db_sqlite, diff --git a/tests/waku_archive/test_driver_queue.nim b/tests/waku_archive/test_driver_queue.nim index 16c0163c7..584ea9d7e 100644 --- a/tests/waku_archive/test_driver_queue.nim +++ b/tests/waku_archive/test_driver_queue.nim @@ -1,6 +1,6 @@ {.used.} -import std/options, stew/results, testutils/unittests +import std/options, results, testutils/unittests import waku/[ waku_archive, diff --git a/tests/waku_archive/test_driver_queue_index.nim b/tests/waku_archive/test_driver_queue_index.nim index c383a676c..f34e181af 100644 --- a/tests/waku_archive/test_driver_queue_index.nim +++ b/tests/waku_archive/test_driver_queue_index.nim @@ -1,6 +1,6 @@ {.used.} -import std/[times, random], stew/byteutils, testutils/unittests, nimcrypto +import std/random, testutils/unittests import waku/waku_core, waku/waku_archive/driver/queue_driver/index var rng = initRand() diff --git a/tests/waku_archive/test_driver_queue_pagination.nim b/tests/waku_archive/test_driver_queue_pagination.nim index dec3ccdee..45543c570 100644 --- a/tests/waku_archive/test_driver_queue_pagination.nim +++ b/tests/waku_archive/test_driver_queue_pagination.nim @@ -9,7 +9,6 @@ import waku_archive/driver/queue_driver/index, waku_core, ], - ../testlib/common, ../testlib/wakucore proc getTestQueueDriver(numMessages: int): QueueDriver = diff --git a/tests/waku_archive/test_driver_sqlite.nim b/tests/waku_archive/test_driver_sqlite.nim index 3ceae595d..5809a8492 100644 --- a/tests/waku_archive/test_driver_sqlite.nim +++ b/tests/waku_archive/test_driver_sqlite.nim @@ -2,12 +2,7 @@ import std/sequtils, testutils/unittests, chronos import - waku/[ - common/databases/db_sqlite, - waku_archive, - waku_archive/driver/sqlite_driver, - waku_core, - ], + waku/[waku_archive, waku_archive/driver/sqlite_driver, waku_core], ../waku_archive/archive_utils, ../testlib/wakucore diff --git a/tests/waku_archive/test_driver_sqlite_query.nim b/tests/waku_archive/test_driver_sqlite_query.nim index fc00a3be8..327ae17bb 100644 --- a/tests/waku_archive/test_driver_sqlite_query.nim +++ b/tests/waku_archive/test_driver_sqlite_query.nim @@ -4,13 +4,7 @@ import std/[options, sequtils, random, algorithm], testutils/unittests, chronos, chronicles import - waku/[ - common/databases/db_sqlite, - waku_archive, - waku_archive/driver/sqlite_driver, - waku_core, - waku_core/message/digest, - ], + waku/[waku_archive, waku_core, waku_core/message/digest], ../testlib/common, ../testlib/wakucore, ../waku_archive/archive_utils diff --git a/tests/waku_archive/test_retention_policy.nim b/tests/waku_archive/test_retention_policy.nim index 4686dda7e..ea86e1d69 100644 --- a/tests/waku_archive/test_retention_policy.nim +++ b/tests/waku_archive/test_retention_policy.nim @@ -1,13 +1,11 @@ {.used.} -import std/[sequtils, times], stew/results, testutils/unittests, chronos +import std/[sequtils, times], results, testutils/unittests, chronos import waku/[ - common/databases/db_sqlite, waku_core, waku_core/message/digest, waku_archive, - waku_archive/driver/sqlite_driver, waku_archive/retention_policy, waku_archive/retention_policy/retention_policy_capacity, waku_archive/retention_policy/retention_policy_size, diff --git a/tests/waku_archive/test_waku_archive.nim b/tests/waku_archive/test_waku_archive.nim index 9211b15e9..802473d64 100644 --- a/tests/waku_archive/test_waku_archive.nim +++ b/tests/waku_archive/test_waku_archive.nim @@ -4,12 +4,10 @@ import std/[options, sequtils], testutils/unittests, chronos, libp2p/crypto/cryp import waku/[ - common/databases/db_sqlite, common/databases/db_postgres/dbconn, common/paging, waku_core, waku_core/message/digest, - waku_archive/driver/sqlite_driver, waku_archive, ], ../waku_archive/archive_utils, diff --git a/tests/waku_archive_legacy/test_driver_queue.nim b/tests/waku_archive_legacy/test_driver_queue.nim index c69e5aa6a..aec9ad65d 100644 --- a/tests/waku_archive_legacy/test_driver_queue.nim +++ b/tests/waku_archive_legacy/test_driver_queue.nim @@ -1,6 +1,6 @@ {.used.} -import std/options, stew/results, testutils/unittests +import std/options, results, testutils/unittests import waku/waku_archive_legacy, waku/waku_archive_legacy/driver/queue_driver/queue_driver {.all.}, diff --git a/tests/waku_archive_legacy/test_driver_sqlite.nim b/tests/waku_archive_legacy/test_driver_sqlite.nim index af043116f..9d8c4d14b 100644 --- a/tests/waku_archive_legacy/test_driver_sqlite.nim +++ b/tests/waku_archive_legacy/test_driver_sqlite.nim @@ -2,12 +2,10 @@ import std/sequtils, testutils/unittests, chronos import - waku/common/databases/db_sqlite, waku/waku_archive_legacy, waku/waku_archive_legacy/driver/sqlite_driver, waku/waku_core, ../waku_archive_legacy/archive_utils, - ../testlib/common, ../testlib/wakucore suite "SQLite driver": diff --git a/tests/waku_archive_legacy/test_driver_sqlite_query.nim b/tests/waku_archive_legacy/test_driver_sqlite_query.nim index ecf88e7c0..42f394891 100644 --- a/tests/waku_archive_legacy/test_driver_sqlite_query.nim +++ b/tests/waku_archive_legacy/test_driver_sqlite_query.nim @@ -4,9 +4,7 @@ import std/[options, sequtils, random, algorithm], testutils/unittests, chronos, chronicles import - waku/common/databases/db_sqlite, waku/waku_archive_legacy, - waku/waku_archive_legacy/driver/sqlite_driver, waku/waku_core, waku/waku_core/message/digest, ../testlib/common, diff --git a/tests/waku_archive_legacy/test_waku_archive.nim b/tests/waku_archive_legacy/test_waku_archive.nim index 181560a28..e58b2cfc9 100644 --- a/tests/waku_archive_legacy/test_waku_archive.nim +++ b/tests/waku_archive_legacy/test_waku_archive.nim @@ -1,21 +1,13 @@ {.used.} -import - std/[options, sequtils], - testutils/unittests, - chronicles, - chronos, - libp2p/crypto/crypto +import std/[options, sequtils], testutils/unittests, chronos, libp2p/crypto/crypto import - waku/common/databases/db_sqlite, waku/common/paging, waku/waku_core, waku/waku_core/message/digest, - waku/waku_archive_legacy/driver/sqlite_driver, waku/waku_archive_legacy, ../waku_archive_legacy/archive_utils, - ../testlib/common, ../testlib/wakucore suite "Waku Archive - message handling": diff --git a/tests/waku_core/test_peers.nim b/tests/waku_core/test_peers.nim index 3dc68fa1a..59ae2e2f3 100644 --- a/tests/waku_core/test_peers.nim +++ b/tests/waku_core/test_peers.nim @@ -1,7 +1,7 @@ {.used.} import - stew/results, + results, testutils/unittests, libp2p/multiaddress, libp2p/peerid, diff --git a/tests/waku_discv5/test_waku_discv5.nim b/tests/waku_discv5/test_waku_discv5.nim index edde80ab3..b2d59813a 100644 --- a/tests/waku_discv5/test_waku_discv5.nim +++ b/tests/waku_discv5/test_waku_discv5.nim @@ -2,7 +2,7 @@ import std/[sequtils, algorithm], - stew/results, + results, stew/shims/net, chronos, chronicles, diff --git a/tests/waku_discv5/utils.nim b/tests/waku_discv5/utils.nim index 679d206ea..422e13fd9 100644 --- a/tests/waku_discv5/utils.nim +++ b/tests/waku_discv5/utils.nim @@ -1,6 +1,5 @@ import std/options, - stew/results, stew/shims/net, chronos, libp2p/crypto/crypto as libp2p_keys, diff --git a/tests/waku_enr/utils.nim b/tests/waku_enr/utils.nim index 6dd017add..8f79b1d8f 100644 --- a/tests/waku_enr/utils.nim +++ b/tests/waku_enr/utils.nim @@ -1,15 +1,13 @@ import std/options, sequtils, - stew/results, + results, stew/shims/net, chronos, libp2p/crypto/crypto as libp2p_keys, eth/keys as eth_keys -import - waku/[waku_core/topics, waku_enr, discovery/waku_discv5, waku_enr/sharding], - ../testlib/[common, wakucore] +import waku/[waku_enr, discovery/waku_discv5, waku_enr/sharding], ../testlib/wakucore proc newTestEnrRecord*( privKey: libp2p_keys.PrivateKey, diff --git a/tests/waku_filter_v2/test_waku_client.nim b/tests/waku_filter_v2/test_waku_client.nim index dbfcd1c51..2c3e2f4ec 100644 --- a/tests/waku_filter_v2/test_waku_client.nim +++ b/tests/waku_filter_v2/test_waku_client.nim @@ -1,13 +1,6 @@ {.used.} -import - std/[options, tables, sequtils, strutils, json], - testutils/unittests, - stew/[results, byteutils], - chronos, - chronicles, - os, - libp2p/peerstore +import std/[options, sequtils, json], testutils/unittests, results, chronos import waku/node/[peer_manager, waku_node], diff --git a/tests/waku_filter_v2/test_waku_filter_dos_protection.nim b/tests/waku_filter_v2/test_waku_filter_dos_protection.nim index c751114c1..7c8c640ba 100644 --- a/tests/waku_filter_v2/test_waku_filter_dos_protection.nim +++ b/tests/waku_filter_v2/test_waku_filter_dos_protection.nim @@ -1,25 +1,18 @@ {.used.} import - std/[options, tables, sequtils, strutils, json], + std/[options, tables, json], testutils/unittests, - stew/[results, byteutils], + results, chronos, chronicles, - os, libp2p/peerstore import - waku/[ - node/peer_manager, - waku_core, - common/rate_limit/setting, - common/rate_limit/token_bucket, - ], - waku/waku_filter_v2/[common, client, subscriptions, protocol, rpc_codec], - ../testlib/[wakucore, testasync, testutils, futures, sequtils], - ./waku_filter_utils, - ../resources/payloads + waku/[node/peer_manager, waku_core], + waku/waku_filter_v2/[common, client, subscriptions, protocol], + ../testlib/[wakucore, testasync, futures], + ./waku_filter_utils type AFilterClient = ref object of RootObj clientSwitch*: Switch diff --git a/tests/waku_filter_v2/waku_filter_utils.nim b/tests/waku_filter_v2/waku_filter_utils.nim index 5698949c5..2f04ceb36 100644 --- a/tests/waku_filter_v2/waku_filter_utils.nim +++ b/tests/waku_filter_v2/waku_filter_utils.nim @@ -1,4 +1,4 @@ -import std/[options, tables, sets, sequtils, algorithm], chronos, chronicles, os +import std/[options, tables, sets, algorithm], chronos, chronicles, os import waku/[ diff --git a/tests/waku_lightpush/lightpush_utils.nim b/tests/waku_lightpush/lightpush_utils.nim index f3e94cb47..b334f043a 100644 --- a/tests/waku_lightpush/lightpush_utils.nim +++ b/tests/waku_lightpush/lightpush_utils.nim @@ -1,6 +1,6 @@ {.used.} -import std/options, chronicles, chronos, libp2p/crypto/crypto +import std/options, chronos, libp2p/crypto/crypto import waku/node/peer_manager, diff --git a/tests/waku_lightpush/test_client.nim b/tests/waku_lightpush/test_client.nim index 060a8c22b..d1a7ba57e 100644 --- a/tests/waku_lightpush/test_client.nim +++ b/tests/waku_lightpush/test_client.nim @@ -1,11 +1,6 @@ {.used.} -import - std/[options, strscans], - testutils/unittests, - chronicles, - chronos, - libp2p/crypto/crypto +import std/[options, strscans], testutils/unittests, chronos, libp2p/crypto/crypto import waku/[ @@ -15,7 +10,7 @@ import waku_lightpush/client, waku_lightpush/protocol_metrics, ], - ../testlib/[assertions, wakucore, testasync, futures, testutils], + ../testlib/[assertions, wakucore, testasync, futures], ./lightpush_utils, ../resources/[pubsub_topics, content_topics, payloads] diff --git a/tests/waku_lightpush/test_ratelimit.nim b/tests/waku_lightpush/test_ratelimit.nim index 7148be37a..0dd7913d1 100644 --- a/tests/waku_lightpush/test_ratelimit.nim +++ b/tests/waku_lightpush/test_ratelimit.nim @@ -1,24 +1,11 @@ {.used.} -import - std/[options, strscans], - testutils/unittests, - chronicles, - chronos, - libp2p/crypto/crypto +import std/options, testutils/unittests, chronos, libp2p/crypto/crypto import - waku/[ - node/peer_manager, - common/rate_limit/setting, - waku_core, - waku_lightpush, - waku_lightpush/client, - waku_lightpush/protocol_metrics, - ], - ../testlib/[assertions, wakucore, testasync, futures, testutils], - ./lightpush_utils, - ../resources/[pubsub_topics, content_topics, payloads] + waku/[node/peer_manager, waku_core, waku_lightpush, waku_lightpush/client], + ../testlib/wakucore, + ./lightpush_utils suite "Rate limited push service": asyncTest "push message with rate limit not violated": diff --git a/tests/waku_lightpush_legacy/lightpush_utils.nim b/tests/waku_lightpush_legacy/lightpush_utils.nim index 733fbc8b1..11c4bf929 100644 --- a/tests/waku_lightpush_legacy/lightpush_utils.nim +++ b/tests/waku_lightpush_legacy/lightpush_utils.nim @@ -1,10 +1,9 @@ {.used.} -import std/options, chronicles, chronos, libp2p/crypto/crypto +import std/options, chronos, libp2p/crypto/crypto import waku/node/peer_manager, - waku/waku_core, waku/waku_lightpush_legacy, waku/waku_lightpush_legacy/[client, common], waku/common/rate_limit/setting, diff --git a/tests/waku_lightpush_legacy/test_client.nim b/tests/waku_lightpush_legacy/test_client.nim index b71b7d5c3..1dcb466c9 100644 --- a/tests/waku_lightpush_legacy/test_client.nim +++ b/tests/waku_lightpush_legacy/test_client.nim @@ -1,11 +1,6 @@ {.used.} -import - std/[options, strscans], - testutils/unittests, - chronicles, - chronos, - libp2p/crypto/crypto +import std/[options, strscans], testutils/unittests, chronos, libp2p/crypto/crypto import waku/[ @@ -16,9 +11,8 @@ import waku_lightpush_legacy/common, waku_lightpush_legacy/protocol_metrics, waku_lightpush_legacy/rpc, - waku_lightpush_legacy/rpc_codec, ], - ../testlib/[assertions, wakucore, testasync, futures, testutils], + ../testlib/[assertions, wakucore, testasync, futures], ./lightpush_utils, ../resources/[pubsub_topics, content_topics, payloads] diff --git a/tests/waku_lightpush_legacy/test_ratelimit.nim b/tests/waku_lightpush_legacy/test_ratelimit.nim index 1d033302f..3df8d369d 100644 --- a/tests/waku_lightpush_legacy/test_ratelimit.nim +++ b/tests/waku_lightpush_legacy/test_ratelimit.nim @@ -1,27 +1,17 @@ {.used.} -import - std/[options, strscans], - testutils/unittests, - chronicles, - chronos, - libp2p/crypto/crypto +import std/options, testutils/unittests, chronos, libp2p/crypto/crypto import waku/[ node/peer_manager, - common/rate_limit/setting, waku_core, waku_lightpush_legacy, waku_lightpush_legacy/client, waku_lightpush_legacy/common, - waku_lightpush_legacy/protocol_metrics, - waku_lightpush_legacy/rpc, - waku_lightpush_legacy/rpc_codec, ], - ../testlib/[assertions, wakucore, testasync, futures, testutils], - ./lightpush_utils, - ../resources/[pubsub_topics, content_topics, payloads] + ../testlib/wakucore, + ./lightpush_utils suite "Rate limited push service": asyncTest "push message with rate limit not violated": diff --git a/tests/waku_peer_exchange/test_protocol.nim b/tests/waku_peer_exchange/test_protocol.nim index 11a61c4dc..8f7f20574 100644 --- a/tests/waku_peer_exchange/test_protocol.nim +++ b/tests/waku_peer_exchange/test_protocol.nim @@ -1,11 +1,10 @@ {.used.} import - std/[options, sequtils, tables, net], + std/[options, sequtils, net], testutils/unittests, chronos, - chronicles, - libp2p/[switch, peerId, crypto/crypto, multistream, muxers/muxer], + libp2p/[switch, peerId, crypto/crypto], eth/[keys, p2p/discoveryv5/enr] import @@ -18,14 +17,11 @@ import waku_peer_exchange/rpc_codec, waku_peer_exchange/protocol, node/peer_manager, - waku_relay/protocol, - waku_relay, waku_core, - waku_core/message/codec, common/enr/builder, waku_enr/sharding, ], - ../testlib/[wakucore, wakunode, simple_mock, assertions], + ../testlib/[wakucore, wakunode, assertions], ./utils.nim suite "Waku Peer Exchange": diff --git a/tests/waku_relay/test_message_id.nim b/tests/waku_relay/test_message_id.nim index b46554d17..633303120 100644 --- a/tests/waku_relay/test_message_id.nim +++ b/tests/waku_relay/test_message_id.nim @@ -1,10 +1,11 @@ import unittest, - stew/[shims/net, results, byteutils], + results, + stew/[shims/net, byteutils], nimcrypto/sha2, libp2p/protocols/pubsub/rpc/messages -import waku/waku_relay/message_id, ../testlib/sequtils +import waku/waku_relay/message_id suite "Message ID Provider": test "Non-empty string": diff --git a/tests/waku_relay/test_protocol.nim b/tests/waku_relay/test_protocol.nim index 399b55ea8..d0e8a7ed6 100644 --- a/tests/waku_relay/test_protocol.nim +++ b/tests/waku_relay/test_protocol.nim @@ -1,13 +1,12 @@ {.used.} import - std/[options, sequtils, strutils, strformat], + std/[options, strformat], stew/shims/net as stewNet, testutils/unittests, - chronicles, chronos, libp2p/protocols/pubsub/[pubsub, gossipsub], - libp2p/[multihash, stream/connection, switch], + libp2p/[stream/connection, switch], ./crypto_utils, std/json @@ -19,7 +18,7 @@ import waku_core, waku_core/message/codec, ], - ../testlib/[wakucore, testasync, testutils, futures, sequtils], + ../testlib/[wakucore, testasync, futures, sequtils], ./utils, ../resources/payloads diff --git a/tests/waku_relay/utils.nim b/tests/waku_relay/utils.nim index c1a085b10..3e39294a1 100644 --- a/tests/waku_relay/utils.nim +++ b/tests/waku_relay/utils.nim @@ -4,7 +4,6 @@ import std/[strutils, sequtils, tempfiles], stew/byteutils, stew/shims/net as stewNet, - testutils/unittests, chronos, libp2p/switch, libp2p/protocols/pubsub/pubsub @@ -23,8 +22,7 @@ import ], ../waku_store/store_utils, ../waku_archive/archive_utils, - ../testlib/[wakucore, wakunode, testasync, futures], - ../resources/payloads + ../testlib/[wakucore, futures] proc noopRawHandler*(): WakuRelayHandler = var handler: WakuRelayHandler diff --git a/tests/waku_rln_relay/test_rln_group_manager_onchain.nim b/tests/waku_rln_relay/test_rln_group_manager_onchain.nim index 3d7be7220..b6fc44e27 100644 --- a/tests/waku_rln_relay/test_rln_group_manager_onchain.nim +++ b/tests/waku_rln_relay/test_rln_group_manager_onchain.nim @@ -3,8 +3,9 @@ {.push raises: [].} import - std/[options, os, osproc, sequtils, deques, streams, strutils, tempfiles, strformat], - stew/[results, byteutils], + std/[options, sequtils, deques], + results, + stew/byteutils, testutils/unittests, chronos, chronicles, @@ -16,19 +17,15 @@ import import waku/[ - waku_node, - node/waku_node, waku_rln_relay, waku_rln_relay/protocol_types, waku_rln_relay/constants, - waku_rln_relay/contract, waku_rln_relay/rln, waku_rln_relay/conversion_utils, waku_rln_relay/group_manager/on_chain/group_manager, ], - ../testlib/[wakucore, wakunode, common], - ./utils_onchain, - ./utils + ../testlib/wakucore, + ./utils_onchain suite "Onchain group manager": # We run Anvil diff --git a/tests/waku_rln_relay/test_rln_group_manager_static.nim b/tests/waku_rln_relay/test_rln_group_manager_static.nim index 56b5e8df1..5d1916f63 100644 --- a/tests/waku_rln_relay/test_rln_group_manager_static.nim +++ b/tests/waku_rln_relay/test_rln_group_manager_static.nim @@ -4,7 +4,7 @@ import testutils/unittests, - stew/results, + results, options, waku/[ waku_rln_relay/protocol_types, diff --git a/tests/waku_rln_relay/test_rln_serde.nim b/tests/waku_rln_relay/test_rln_serde.nim index 88badce97..1b1d8cd5f 100644 --- a/tests/waku_rln_relay/test_rln_serde.nim +++ b/tests/waku_rln_relay/test_rln_serde.nim @@ -2,7 +2,7 @@ {.push raises: [].} -import stew/results, stint +import results import ./rln/waku_rln_relay_utils, diff --git a/tests/waku_rln_relay/test_waku_rln_relay.nim b/tests/waku_rln_relay/test_waku_rln_relay.nim index 6768cd782..bc1c3f640 100644 --- a/tests/waku_rln_relay/test_waku_rln_relay.nim +++ b/tests/waku_rln_relay/test_waku_rln_relay.nim @@ -17,7 +17,6 @@ import waku_rln_relay/protocol_metrics, waku_keystore, ], - ../testlib/common, ./rln/waku_rln_relay_utils suite "Waku rln relay": diff --git a/tests/waku_rln_relay/utils.nim b/tests/waku_rln_relay/utils.nim index 7dfeffe65..a4247ab44 100644 --- a/tests/waku_rln_relay/utils.nim +++ b/tests/waku_rln_relay/utils.nim @@ -1,4 +1,4 @@ -import web3, chronos, options, stint, stew/byteutils +import web3, chronos, stew/byteutils proc deployContract*( web3: Web3, code: string, gasPrice = 0, contractInput = "" diff --git a/tests/waku_rln_relay/utils_onchain.nim b/tests/waku_rln_relay/utils_onchain.nim index 788d6742e..82eaf085e 100644 --- a/tests/waku_rln_relay/utils_onchain.nim +++ b/tests/waku_rln_relay/utils_onchain.nim @@ -3,8 +3,9 @@ {.push raises: [].} import - std/[options, os, osproc, sequtils, deques, streams, strutils, tempfiles, strformat], - stew/[results, byteutils], + std/[options, os, osproc, deques, streams, strutils, tempfiles, strformat], + results, + stew/byteutils, testutils/unittests, chronos, chronicles, diff --git a/tests/waku_store/store_utils.nim b/tests/waku_store/store_utils.nim index f652f24b6..779074d7e 100644 --- a/tests/waku_store/store_utils.nim +++ b/tests/waku_store/store_utils.nim @@ -1,10 +1,9 @@ {.used.} -import std/options, chronos, chronicles, libp2p/crypto/crypto +import std/options, chronos import - waku/[node/peer_manager, waku_core, waku_store, waku_store/client], - ../testlib/[common, wakucore] + waku/[node/peer_manager, waku_store, waku_store/client], ../testlib/[common, wakucore] proc newTestWakuStore*( switch: Switch, handler: StoreQueryRequestHandler diff --git a/tests/waku_store/test_client.nim b/tests/waku_store/test_client.nim index 53e95b83e..38b07bdf4 100644 --- a/tests/waku_store/test_client.nim +++ b/tests/waku_store/test_client.nim @@ -1,10 +1,10 @@ {.used.} -import std/options, testutils/unittests, chronos, chronicles, libp2p/crypto/crypto +import std/options, testutils/unittests, chronos, libp2p/crypto/crypto import waku/[node/peer_manager, waku_core, waku_store, waku_store/client, common/paging], - ../testlib/[common, wakucore, testasync, futures], + ../testlib/[wakucore, testasync, futures], ./store_utils suite "Store Client": diff --git a/tests/waku_store/test_waku_store.nim b/tests/waku_store/test_waku_store.nim index b21c66be0..815b3ac7d 100644 --- a/tests/waku_store/test_waku_store.nim +++ b/tests/waku_store/test_waku_store.nim @@ -1,6 +1,6 @@ {.used.} -import std/options, testutils/unittests, chronos, chronicles, libp2p/crypto/crypto +import std/options, testutils/unittests, chronos, libp2p/crypto/crypto import waku/[ @@ -12,7 +12,7 @@ import waku_store/client, waku_store/common, ], - ../testlib/[common, wakucore], + ../testlib/wakucore, ./store_utils suite "Waku Store - query handler": diff --git a/tests/waku_store/test_wakunode_store.nim b/tests/waku_store/test_wakunode_store.nim index 1f48d18f2..1d5e4dcfd 100644 --- a/tests/waku_store/test_wakunode_store.nim +++ b/tests/waku_store/test_wakunode_store.nim @@ -18,10 +18,8 @@ import common/paging, waku_core, waku_core/message/digest, - waku_core/subscription, node/peer_manager, waku_archive, - waku_archive/driver/sqlite_driver, waku_filter_v2, waku_filter_v2/client, waku_store, diff --git a/tests/waku_store_legacy/store_utils.nim b/tests/waku_store_legacy/store_utils.nim index cd6236928..a70ca9376 100644 --- a/tests/waku_store_legacy/store_utils.nim +++ b/tests/waku_store_legacy/store_utils.nim @@ -1,6 +1,6 @@ {.used.} -import std/options, chronos, chronicles, libp2p/crypto/crypto +import std/options, chronos import waku/[node/peer_manager, waku_core, waku_store_legacy, waku_store_legacy/client], diff --git a/tests/waku_store_legacy/test_client.nim b/tests/waku_store_legacy/test_client.nim index 9e403dc21..2a8616375 100644 --- a/tests/waku_store_legacy/test_client.nim +++ b/tests/waku_store_legacy/test_client.nim @@ -1,6 +1,6 @@ {.used.} -import std/options, testutils/unittests, chronos, chronicles, libp2p/crypto/crypto +import std/options, testutils/unittests, chronos, libp2p/crypto/crypto import waku/[ @@ -10,7 +10,7 @@ import waku_store_legacy/client, common/paging, ], - ../testlib/[common, wakucore, testasync, futures], + ../testlib/[wakucore, testasync, futures], ./store_utils suite "Store Client": diff --git a/tests/waku_store_legacy/test_rpc_codec.nim b/tests/waku_store_legacy/test_rpc_codec.nim index dae738d01..6897bab41 100644 --- a/tests/waku_store_legacy/test_rpc_codec.nim +++ b/tests/waku_store_legacy/test_rpc_codec.nim @@ -9,7 +9,6 @@ import waku_store_legacy/rpc, waku_store_legacy/rpc_codec, ], - ../testlib/common, ../testlib/wakucore procSuite "Waku Store - RPC codec": diff --git a/tests/waku_store_legacy/test_waku_store.nim b/tests/waku_store_legacy/test_waku_store.nim index e5e38b208..b8dc835c8 100644 --- a/tests/waku_store_legacy/test_waku_store.nim +++ b/tests/waku_store_legacy/test_waku_store.nim @@ -1,6 +1,6 @@ {.used.} -import std/options, testutils/unittests, chronos, chronicles, libp2p/crypto/crypto +import testutils/unittests, chronos, libp2p/crypto/crypto import waku/[ @@ -10,7 +10,7 @@ import waku_store_legacy, waku_store_legacy/client, ], - ../testlib/[common, wakucore], + ../testlib/wakucore, ./store_utils suite "Waku Store - query handler legacy": diff --git a/tests/waku_store_legacy/test_wakunode_store.nim b/tests/waku_store_legacy/test_wakunode_store.nim index 496ab753e..549033e98 100644 --- a/tests/waku_store_legacy/test_wakunode_store.nim +++ b/tests/waku_store_legacy/test_wakunode_store.nim @@ -3,7 +3,6 @@ import std/net, testutils/unittests, - chronicles, chronos, libp2p/crypto/crypto, libp2p/peerid, diff --git a/tests/waku_store_sync/sync_utils.nim b/tests/waku_store_sync/sync_utils.nim index 20a6bdfb1..a81ad6e2f 100644 --- a/tests/waku_store_sync/sync_utils.nim +++ b/tests/waku_store_sync/sync_utils.nim @@ -1,4 +1,4 @@ -import std/[options, random], chronos, chronicles +import std/[options, random], chronos import waku/[ diff --git a/tests/waku_store_sync/test_protocol.nim b/tests/waku_store_sync/test_protocol.nim index f507ad95b..df14de6a1 100644 --- a/tests/waku_store_sync/test_protocol.nim +++ b/tests/waku_store_sync/test_protocol.nim @@ -1,11 +1,7 @@ {.used.} import - std/[options, sets, random, math], - testutils/unittests, - chronos, - libp2p/crypto/crypto, - stew/byteutils + std/[options, sets, random, math], testutils/unittests, chronos, libp2p/crypto/crypto import ../../waku/[ diff --git a/tests/waku_store_sync/test_storage.nim b/tests/waku_store_sync/test_storage.nim index 034eb260e..9e9a80b29 100644 --- a/tests/waku_store_sync/test_storage.nim +++ b/tests/waku_store_sync/test_storage.nim @@ -4,7 +4,6 @@ import std/[options, random], testutils/unittests, chronos import ../../waku/waku_core, - ../../waku/waku_core/message/digest, ../../waku/waku_store_sync/common, ../../waku/waku_store_sync/storage/seq_storage, ./sync_utils diff --git a/tests/wakunode_rest/test_rest_admin.nim b/tests/wakunode_rest/test_rest_admin.nim index fdb0cbc41..99ddacd8c 100644 --- a/tests/wakunode_rest/test_rest_admin.nim +++ b/tests/wakunode_rest/test_rest_admin.nim @@ -1,7 +1,7 @@ {.used.} import - std/[sequtils, strformat, net], + std/[sequtils, net], stew/shims/net, testutils/unittests, presto, diff --git a/tests/wakunode_rest/test_rest_cors.nim b/tests/wakunode_rest/test_rest_cors.nim index 49b05df16..fc32440d7 100644 --- a/tests/wakunode_rest/test_rest_cors.nim +++ b/tests/wakunode_rest/test_rest_cors.nim @@ -13,10 +13,7 @@ import waku_node, node/waku_node as waku_node2, waku_api/rest/server, - waku_api/rest/client, - waku_api/rest/responses, waku_api/rest/debug/handlers as debug_api, - waku_api/rest/debug/client as debug_api_client, ], ../testlib/common, ../testlib/wakucore, diff --git a/tests/wakunode_rest/test_rest_debug_serdes.nim b/tests/wakunode_rest/test_rest_debug_serdes.nim index bf007b8d2..13b791dc9 100644 --- a/tests/wakunode_rest/test_rest_debug_serdes.nim +++ b/tests/wakunode_rest/test_rest_debug_serdes.nim @@ -1,6 +1,6 @@ {.used.} -import stew/results, stew/byteutils, testutils/unittests, json_serialization +import results, stew/byteutils, testutils/unittests, json_serialization import waku/waku_api/rest/serdes, waku/waku_api/rest/debug/types suite "Waku v2 REST API - Debug - serialization": diff --git a/tests/wakunode_rest/test_rest_filter.nim b/tests/wakunode_rest/test_rest_filter.nim index 60252f92a..358872769 100644 --- a/tests/wakunode_rest/test_rest_filter.nim +++ b/tests/wakunode_rest/test_rest_filter.nim @@ -1,7 +1,6 @@ {.used.} import - std/os, chronos/timer, stew/byteutils, stew/shims/net, diff --git a/tests/wakunode_rest/test_rest_health.nim b/tests/wakunode_rest/test_rest_health.nim index f3b27906e..7d842a3eb 100644 --- a/tests/wakunode_rest/test_rest_health.nim +++ b/tests/wakunode_rest/test_rest_health.nim @@ -23,7 +23,6 @@ import node/health_monitor, ], ../testlib/common, - ../testlib/testutils, ../testlib/wakucore, ../testlib/wakunode diff --git a/tests/wakunode_rest/test_rest_lightpush_legacy.nim b/tests/wakunode_rest/test_rest_lightpush_legacy.nim index 3490a5f80..61d1de88d 100644 --- a/tests/wakunode_rest/test_rest_lightpush_legacy.nim +++ b/tests/wakunode_rest/test_rest_lightpush_legacy.nim @@ -26,8 +26,7 @@ import common/rate_limit/setting, ], ../testlib/wakucore, - ../testlib/wakunode, - ../testlib/testutils + ../testlib/wakunode proc testWakuNode(): WakuNode = let diff --git a/tests/wakunode_rest/test_rest_relay_serdes.nim b/tests/wakunode_rest/test_rest_relay_serdes.nim index 8cc5835f0..086aba22b 100644 --- a/tests/wakunode_rest/test_rest_relay_serdes.nim +++ b/tests/wakunode_rest/test_rest_relay_serdes.nim @@ -1,6 +1,6 @@ {.used.} -import stew/[results, byteutils], chronicles, unittest2, json_serialization +import results, stew/byteutils, unittest2, json_serialization import waku/[common/base64, waku_api/rest/serdes, waku_api/rest/relay/types, waku_core] suite "Waku v2 Rest API - Relay - serialization": diff --git a/tests/wakunode_rest/test_rest_serdes.nim b/tests/wakunode_rest/test_rest_serdes.nim index c50bba983..719742bf8 100644 --- a/tests/wakunode_rest/test_rest_serdes.nim +++ b/tests/wakunode_rest/test_rest_serdes.nim @@ -1,6 +1,6 @@ {.used.} -import stew/[results, byteutils], chronicles, unittest2, json_serialization +import results, stew/byteutils, chronicles, unittest2, json_serialization import waku/waku_api/rest/serdes, waku/waku_api/rest/debug/types # TODO: Decouple this test suite from the `debug_api` module by defining diff --git a/waku.nimble b/waku.nimble index 9c0e819fb..9cf73295f 100644 --- a/waku.nimble +++ b/waku.nimble @@ -23,6 +23,7 @@ requires "nim >= 2.0.8", "web3", "presto", "regex", + "results", "db_connector", "minilru", "quic" diff --git a/waku/common/databases/db_postgres/dbconn.nim b/waku/common/databases/db_postgres/dbconn.nim index 0edb74ede..317cc3003 100644 --- a/waku/common/databases/db_postgres/dbconn.nim +++ b/waku/common/databases/db_postgres/dbconn.nim @@ -1,6 +1,5 @@ import - std/[times, strutils, asyncnet, os, sequtils, sets, strformat], - regex, + std/[times, strutils, os, sets, strformat], results, chronos, chronos/threadsync, diff --git a/waku/factory/builder.nim b/waku/factory/builder.nim index e896ecdbb..caa84db63 100644 --- a/waku/factory/builder.nim +++ b/waku/factory/builder.nim @@ -8,7 +8,6 @@ import libp2p/builders, libp2p/nameresolving/nameresolver, libp2p/transports/wstransport, - libp2p/protocols/connectivity/relay/client, libp2p/protocols/connectivity/relay/relay import ../waku_enr, diff --git a/waku/factory/waku.nim b/waku/factory/waku.nim index 91f3cee2e..9760d1580 100644 --- a/waku/factory/waku.nim +++ b/waku/factory/waku.nim @@ -38,7 +38,6 @@ import ../waku_rln_relay, ../waku_store, ../waku_filter_v2, - ../factory/networks_config, ../factory/node_factory, ../factory/internal_config, ../factory/external_config, diff --git a/waku/incentivization/common.nim b/waku/incentivization/common.nim index 79fcf1645..071b4c18f 100644 --- a/waku/incentivization/common.nim +++ b/waku/incentivization/common.nim @@ -1,6 +1,6 @@ import std/options -import waku/incentivization/[rpc, eligibility_manager] +import waku/incentivization/rpc proc init*(T: type EligibilityStatus, isEligible: bool): T = if isEligible: diff --git a/waku/incentivization/eligibility_manager.nim b/waku/incentivization/eligibility_manager.nim index da8280da3..b10b293e1 100644 --- a/waku/incentivization/eligibility_manager.nim +++ b/waku/incentivization/eligibility_manager.nim @@ -1,6 +1,6 @@ import std/[options, sets], chronos, web3, stew/byteutils, stint, results, chronicles -import waku/incentivization/rpc, tests/waku_rln_relay/[utils_onchain, utils] +import waku/incentivization/rpc, tests/waku_rln_relay/utils_onchain const SimpleTransferGasUsed = Quantity(21000) const TxReceiptQueryTimeout = 3.seconds diff --git a/waku/incentivization/rpc_codec.nim b/waku/incentivization/rpc_codec.nim index 5d3ce48d5..9529ddffe 100644 --- a/waku/incentivization/rpc_codec.nim +++ b/waku/incentivization/rpc_codec.nim @@ -1,5 +1,5 @@ import std/options -import ../common/protobuf, ../waku_core, ./rpc +import ../common/protobuf, ./rpc # Codec for EligibilityProof diff --git a/waku/node/waku_node.nim b/waku/node/waku_node.nim index ae08b503a..18986d5c0 100644 --- a/waku/node/waku_node.nim +++ b/waku/node/waku_node.nim @@ -15,8 +15,6 @@ import libp2p/protocols/ping, libp2p/protocols/pubsub/gossipsub, libp2p/protocols/pubsub/rpc/messages, - libp2p/protocols/connectivity/autonat/client, - libp2p/protocols/connectivity/autonat/service, libp2p/builders, libp2p/transports/transport, libp2p/transports/tcptransport, @@ -50,8 +48,7 @@ import ../waku_rln_relay, ./config, ./peer_manager, - ../common/rate_limit/setting, - ../discovery/autonat_service + ../common/rate_limit/setting declarePublicCounter waku_node_messages, "number of messages received", ["type"] declarePublicHistogram waku_histogram_message_size, diff --git a/waku/waku_api/rest/admin/client.nim b/waku/waku_api/rest/admin/client.nim index 1fd9fdfc8..ebcebe965 100644 --- a/waku/waku_api/rest/admin/client.nim +++ b/waku/waku_api/rest/admin/client.nim @@ -1,13 +1,8 @@ {.push raises: [].} -import - chronicles, - json_serialization, - json_serialization/std/options, - presto/[route, client], - stew/byteutils +import chronicles, json_serialization, presto/[route, client], stew/byteutils -import ../serdes, ../responses, ../rest_serdes, ./types +import ../serdes, ../rest_serdes, ./types export types diff --git a/waku/waku_api/rest/debug/client.nim b/waku/waku_api/rest/debug/client.nim index 7048ae98f..c2d588197 100644 --- a/waku/waku_api/rest/debug/client.nim +++ b/waku/waku_api/rest/debug/client.nim @@ -2,7 +2,7 @@ import chronicles, json_serialization, json_serialization/std/options, presto/[route, client] -import ../serdes, ../responses, ../rest_serdes, ./types +import ../serdes, ../rest_serdes, ./types export types diff --git a/waku/waku_api/rest/filter/client.nim b/waku/waku_api/rest/filter/client.nim index b674bc594..db1a6895e 100644 --- a/waku/waku_api/rest/filter/client.nim +++ b/waku/waku_api/rest/filter/client.nim @@ -2,20 +2,12 @@ import json, - std/sets, stew/byteutils, - strformat, chronicles, json_serialization, json_serialization/std/options, presto/[route, client, common] -import - ../../../common/base64, - ../../../waku_core, - ../serdes, - ../responses, - ../rest_serdes, - ./types +import ../../../common/base64, ../serdes, ../rest_serdes, ./types export types diff --git a/waku/waku_api/rest/health/client.nim b/waku/waku_api/rest/health/client.nim index c6f339006..97f4a2c6d 100644 --- a/waku/waku_api/rest/health/client.nim +++ b/waku/waku_api/rest/health/client.nim @@ -1,8 +1,7 @@ {.push raises: [].} -import - chronicles, json_serialization, json_serialization/std/options, presto/[route, client] -import ./types, ../serdes, ../responses, ../rest_serdes, waku/node/health_monitor +import chronicles, json_serialization, presto/[route, client] +import ./types, ../serdes, ../rest_serdes, waku/node/health_monitor logScope: topics = "waku node rest health_api" diff --git a/waku/waku_api/rest/legacy_lightpush/client.nim b/waku/waku_api/rest/legacy_lightpush/client.nim index f0932e99f..a1e442857 100644 --- a/waku/waku_api/rest/legacy_lightpush/client.nim +++ b/waku/waku_api/rest/legacy_lightpush/client.nim @@ -1,15 +1,7 @@ {.push raises: [].} -import - json, - std/sets, - stew/byteutils, - strformat, - chronicles, - json_serialization, - json_serialization/std/options, - presto/[route, client, common] -import ../../../waku_core, ../serdes, ../responses, ../rest_serdes, ./types +import chronicles, json_serialization, presto/[route, client, common] +import ../serdes, ../rest_serdes, ./types export types diff --git a/waku/waku_api/rest/relay/client.nim b/waku/waku_api/rest/relay/client.nim index 5e72bb609..6956a956d 100644 --- a/waku/waku_api/rest/relay/client.nim +++ b/waku/waku_api/rest/relay/client.nim @@ -1,13 +1,7 @@ {.push raises: [].} -import - std/sets, - stew/byteutils, - chronicles, - json_serialization, - json_serialization/std/options, - presto/[route, client, common] -import ../../../waku_core, ../serdes, ../responses, ../rest_serdes, ./types +import stew/byteutils, chronicles, json_serialization, presto/[route, client, common] +import ../../../waku_core, ../serdes, ../rest_serdes, ./types export types diff --git a/waku/waku_archive/driver/queue_driver/index.nim b/waku/waku_archive/driver/queue_driver/index.nim index 113d426d4..17783ebcc 100644 --- a/waku/waku_archive/driver/queue_driver/index.nim +++ b/waku/waku_archive/driver/queue_driver/index.nim @@ -1,6 +1,5 @@ {.push raises: [].} -import stew/byteutils import ../../../waku_core type Index* = object diff --git a/waku/waku_filter_v2/subscriptions.nim b/waku/waku_filter_v2/subscriptions.nim index 6b22a94b9..8d3b8084f 100644 --- a/waku/waku_filter_v2/subscriptions.nim +++ b/waku/waku_filter_v2/subscriptions.nim @@ -7,12 +7,7 @@ import libp2p/peerid, libp2p/stream/connection, stew/shims/sets -import - ../waku_core, - ../utils/tableutils, - ../common/rate_limit/setting, - ../node/peer_manager, - ./common +import ../waku_core, ../utils/tableutils, ../node/peer_manager logScope: topics = "waku filter subscriptions" diff --git a/waku/waku_lightpush/callbacks.nim b/waku/waku_lightpush/callbacks.nim index d6700412f..3cfc3fe90 100644 --- a/waku/waku_lightpush/callbacks.nim +++ b/waku/waku_lightpush/callbacks.nim @@ -1,12 +1,11 @@ {.push raises: [].} -import stew/results +import results import ../waku_core, ../waku_relay, ./common, - ./protocol_metrics, ../waku_rln_relay, ../waku_rln_relay/protocol_types diff --git a/waku/waku_lightpush/common.nim b/waku/waku_lightpush/common.nim index c9f39cca2..356ccf8f2 100644 --- a/waku/waku_lightpush/common.nim +++ b/waku/waku_lightpush/common.nim @@ -78,5 +78,3 @@ func mapPubishingErrorToPushResult*( some("Error generating message id, skipping publish"), ) ) - else: - return err((LightpushStatusCode.INTERNAL_SERVER_ERROR, none[string]())) diff --git a/waku/waku_lightpush/self_req_handler.nim b/waku/waku_lightpush/self_req_handler.nim index fffced40a..06a0d3715 100644 --- a/waku/waku_lightpush/self_req_handler.nim +++ b/waku/waku_lightpush/self_req_handler.nim @@ -9,15 +9,8 @@ ## which spawn a full service Waku node ## that could be used also as a lightpush client, helping testing and development. -import results, chronos, chronicles, std/options, metrics, stew/byteutils -import - ../waku_core, - ./protocol, - ./common, - ./rpc, - ./rpc_codec, - ./protocol_metrics, - ../utils/requests +import results, chronos, std/options, metrics +import ../waku_core, ./protocol, ./common, ./rpc, ./rpc_codec, ../utils/requests proc handleSelfLightPushRequest*( self: WakuLightPush, pubSubTopic: Option[PubsubTopic], message: WakuMessage diff --git a/waku/waku_noise/noise_types.nim b/waku/waku_noise/noise_types.nim index 3d288a242..3b88c43e8 100644 --- a/waku/waku_noise/noise_types.nim +++ b/waku/waku_noise/noise_types.nim @@ -11,7 +11,7 @@ import std/[options, tables] import chronos import chronicles import bearssl -import nimcrypto/[sha2, hmac] +import nimcrypto/sha2 import libp2p/errors import libp2p/crypto/[crypto, chacha20poly1305, curve25519] diff --git a/waku/waku_noise/noise_utils.nim b/waku/waku_noise/noise_utils.nim index a612c0728..babab1ca4 100644 --- a/waku/waku_noise/noise_utils.nim +++ b/waku/waku_noise/noise_utils.nim @@ -9,8 +9,9 @@ import std/[algorithm, base64, oids, options, strutils, tables, sequtils] import chronos import chronicles import bearssl/rand -import stew/[results, endians2, byteutils] -import nimcrypto/[sha2, hmac] +import results +import stew/[endians2, byteutils] +import nimcrypto/sha2 import libp2p/crypto/[chacha20poly1305, curve25519, hkdf] From 7c59f7c25712c8f3568bafb5265124237a7c1312 Mon Sep 17 00:00:00 2001 From: Simon-Pierre Vivier Date: Wed, 16 Apr 2025 09:24:05 -0400 Subject: [PATCH 22/48] feat: enhance Waku Sync logs and metrics (#3370) --- waku/waku_store_sync/protocols_metrics.nim | 4 ++++ waku/waku_store_sync/reconciliation.nim | 26 ++++++++++++++++------ waku/waku_store_sync/transfer.nim | 12 ++++++---- 3 files changed, 31 insertions(+), 11 deletions(-) diff --git a/waku/waku_store_sync/protocols_metrics.nim b/waku/waku_store_sync/protocols_metrics.nim index 2d2776674..bb22f11c7 100644 --- a/waku/waku_store_sync/protocols_metrics.nim +++ b/waku/waku_store_sync/protocols_metrics.nim @@ -10,6 +10,10 @@ declarePublicHistogram reconciliation_roundtrips, "the nubmer of roundtrips for each reconciliation", buckets = [0.0, 1.0, 2.0, 3.0, 5.0, 10.0, Inf] +declarePublicHistogram reconciliation_differences, + "the nubmer of differences for each reconciliation", + buckets = [0.0, 10.0, 50.0, 100.0, 500.0, 1000.0, 10000.0, Inf] + declarePublicSummary total_bytes_exchanged, "the number of bytes sent and received by the protocols", ["protocol", "direction"] diff --git a/waku/waku_store_sync/reconciliation.nim b/waku/waku_store_sync/reconciliation.nim index 80c025140..f7c13d42c 100644 --- a/waku/waku_store_sync/reconciliation.nim +++ b/waku/waku_store_sync/reconciliation.nim @@ -96,19 +96,23 @@ proc messageIngress*(self: SyncReconciliation, id: SyncID) = proc processRequest( self: SyncReconciliation, conn: Connection ): Future[Result[void, string]] {.async.} = - var roundTrips = 0 + var + roundTrips = 0 + diffs = 0 while true: let readRes = catch: await conn.readLp(int.high) let buffer: seq[byte] = readRes.valueOr: - return err("connection read error: " & error.msg) + await conn.close() + return err("remote " & $conn.peerId & " connection read error: " & error.msg) total_bytes_exchanged.observe(buffer.len, labelValues = [Reconciliation, Receiving]) let recvPayload = RangesData.deltaDecode(buffer).valueOr: - return err("payload decoding error: " & error) + await conn.close() + return err("remote " & $conn.peerId & " payload decoding error: " & error) roundTrips.inc() @@ -136,9 +140,11 @@ proc processRequest( for hash in hashToSend: self.remoteNeedsTx.addLastNoWait((conn.peerId, hash)) + diffs.inc() for hash in hashToRecv: self.localWantsTx.addLastNoWait((conn.peerId, hash)) + diffs.inc() rawPayload = sendPayload.deltaEncode() @@ -150,7 +156,9 @@ proc processRequest( await conn.writeLP(rawPayload) if writeRes.isErr(): - return err("connection write error: " & writeRes.error.msg) + await conn.close() + return + err("remote " & $conn.peerId & " connection write error: " & writeRes.error.msg) trace "sync payload sent", local = self.peerManager.switch.peerInfo.peerId, @@ -163,6 +171,7 @@ proc processRequest( continue reconciliation_roundtrips.observe(roundTrips) + reconciliation_differences.observe(diffs) await conn.close() @@ -196,12 +205,15 @@ proc initiate( await connection.writeLP(sendPayload) if writeRes.isErr(): - return err("connection write error: " & writeRes.error.msg) + await connection.close() + return err( + "remote " & $connection.peerId & " connection write error: " & writeRes.error.msg + ) trace "sync payload sent", local = self.peerManager.switch.peerInfo.peerId, remote = connection.peerId, - payload = sendPayload + payload = initPayload ?await self.processRequest(connection) @@ -217,7 +229,7 @@ proc storeSynchronization*( let connOpt = await self.peerManager.dialPeer(peer, WakuReconciliationCodec) let conn: Connection = connOpt.valueOr: - return err("cannot establish sync connection") + return err("fail to dial remote " & $peer.peerId) debug "sync session initialized", local = self.peerManager.switch.peerInfo.peerId, remote = conn.peerId diff --git a/waku/waku_store_sync/transfer.nim b/waku/waku_store_sync/transfer.nim index 0ac959de0..81bed5ece 100644 --- a/waku/waku_store_sync/transfer.nim +++ b/waku/waku_store_sync/transfer.nim @@ -57,7 +57,8 @@ proc sendMessage( await conn.writeLP(rawPayload) if writeRes.isErr(): - return err("connection write error: " & writeRes.error.msg) + return + err("remote " & $conn.peerId & " connection write error: " & writeRes.error.msg) total_transfer_messages_exchanged.inc(labelValues = [Sending]) @@ -69,7 +70,7 @@ proc openConnection( let connOpt = await self.peerManager.dialPeer(peerId, WakuTransferCodec) let conn: Connection = connOpt.valueOr: - return err("Cannot establish transfer connection") + return err("fail to dial remote " & $peerId) debug "transfer session initialized", local = self.peerManager.switch.peerInfo.peerId, remote = conn.peerId @@ -126,6 +127,8 @@ proc needsReceiverLoop(self: SyncTransfer) {.async.} = WakuMessageAndTopic(pubsub: response.topics[0], message: response.messages[0]) (await sendMessage(connection, msg)).isOkOr: + self.outSessions.del(peerId) + await connection.close() error "failed to send message", error = error continue @@ -158,17 +161,16 @@ proc initProtocolHandler(self: SyncTransfer) = if value[].missingOrExcl(hash): error "unwanted hash received, disconnecting" self.inSessions.del(conn.peerId) - await conn.close() break do: error "unwanted hash received, disconnecting" self.inSessions.del(conn.peerId) - await conn.close() break #TODO verify msg RLN proof... (await self.wakuArchive.syncMessageIngress(hash, pubsub, msg)).isOkOr: + error "failed to archive message", error = $error continue let id = SyncID(time: msg.timestamp, hash: hash) @@ -176,6 +178,8 @@ proc initProtocolHandler(self: SyncTransfer) = continue + await conn.close() + debug "transfer session ended", local = self.peerManager.switch.peerInfo.peerId, remote = conn.peerId From 2786ef607942421fc045de56ad9449032cfafbff Mon Sep 17 00:00:00 2001 From: NagyZoltanPeter <113987313+NagyZoltanPeter@users.noreply.github.com> Date: Wed, 16 Apr 2025 17:04:52 +0200 Subject: [PATCH 23/48] chore: update lite-protocol-tester for handling shard argument. (#3371) * chore: replace pubsub topic with shard configuration across the lite protocol tester * chore: enhance protocol performance - response time - metrics * fix filter-client double mounting possibility. --- apps/liteprotocoltester/.env | 6 ++--- .../Dockerfile.liteprotocoltester.compile | 2 ++ apps/liteprotocoltester/README.md | 6 ++--- .../docker-compose-on-simularor.yml | 2 +- apps/liteprotocoltester/docker-compose.yml | 2 +- apps/liteprotocoltester/filter_subscriber.nim | 8 ++++--- apps/liteprotocoltester/infra.env | 2 +- .../lightpush_publisher.nim | 9 +++++++- .../liteprotocoltester/liteprotocoltester.nim | 5 ++--- apps/liteprotocoltester/lpt_metrics.nim | 7 ++++++ apps/liteprotocoltester/run_service_node.sh | 8 +++---- apps/liteprotocoltester/run_tester_node.sh | 16 +++++++++----- .../run_tester_node_at_infra.sh | 16 +++++++++----- .../run_tester_node_on_fleet.sh | 22 ++++++++++++------- .../service_peer_management.nim | 4 ++-- apps/liteprotocoltester/tester_config.nim | 17 +++++--------- waku/common/rate_limit/request_limiter.nim | 6 ++--- waku/common/rate_limit/service_metrics.nim | 10 ++++++++- .../rate_limit/single_token_limiter.nim | 6 ++--- waku/node/waku_node.nim | 12 +++++++--- waku/waku_filter_v2/protocol_metrics.nim | 6 ++++- waku/waku_lightpush/protocol.nim | 3 ++- waku/waku_lightpush_legacy/protocol.nim | 4 +++- 23 files changed, 115 insertions(+), 64 deletions(-) diff --git a/apps/liteprotocoltester/.env b/apps/liteprotocoltester/.env index 4f7c49976..0330284e1 100644 --- a/apps/liteprotocoltester/.env +++ b/apps/liteprotocoltester/.env @@ -12,16 +12,16 @@ MIN_MESSAGE_SIZE=15Kb MAX_MESSAGE_SIZE=145Kb ## for wakusim -#PUBSUB=/waku/2/rs/66/0 +#SHARD=0 #CONTENT_TOPIC=/tester/2/light-pubsub-test/wakusim #CLUSTER_ID=66 ## for status.prod -PUBSUB=/waku/2/rs/16/32 +#SHARDS=32 CONTENT_TOPIC=/tester/2/light-pubsub-test/fleet CLUSTER_ID=16 ## for TWN -#PUBSUB=/waku/2/rs/1/4 +#SHARD=4 #CONTENT_TOPIC=/tester/2/light-pubsub-test/twn #CLUSTER_ID=1 diff --git a/apps/liteprotocoltester/Dockerfile.liteprotocoltester.compile b/apps/liteprotocoltester/Dockerfile.liteprotocoltester.compile index cee1929ce..6d789ebd1 100644 --- a/apps/liteprotocoltester/Dockerfile.liteprotocoltester.compile +++ b/apps/liteprotocoltester/Dockerfile.liteprotocoltester.compile @@ -55,6 +55,8 @@ RUN chmod +x /usr/bin/liteprotocoltester FROM base_lpt AS standalone_lpt COPY --from=nim-build /app/apps/liteprotocoltester/run_tester_node.sh /usr/bin/ +COPY --from=nim-build /app/apps/liteprotocoltester/run_tester_node_on_fleet.sh /usr/bin/ + RUN chmod +x /usr/bin/run_tester_node.sh ENTRYPOINT ["/usr/bin/run_tester_node.sh", "/usr/bin/liteprotocoltester"] diff --git a/apps/liteprotocoltester/README.md b/apps/liteprotocoltester/README.md index eff025969..ea02ec1c1 100644 --- a/apps/liteprotocoltester/README.md +++ b/apps/liteprotocoltester/README.md @@ -127,7 +127,7 @@ Run a SENDER role liteprotocoltester and a RECEIVER role one on different termin | ---: | :--- | :--- | | NUM_MESSAGES | Number of message to publish, 0 means infinite | 120 | | MESSAGE_INTERVAL_MILLIS | Frequency of messages in milliseconds | 1000 | -| PUBSUB | Used pubsub_topic for testing | /waku/2/rs/66/0 | +| SHARD | Used shard for testing | 0 | | CONTENT_TOPIC | content_topic for testing | /tester/1/light-pubsub-example/proto | | CLUSTER_ID | cluster_id of the network | 16 | | START_PUBLISHING_AFTER_SECS | Delay in seconds before starting to publish to let service node connected | 5 | @@ -272,7 +272,7 @@ export NUM_MESSAGES=200 export MESSAGE_INTERVAL_MILLIS=1000 export MIN_MESSAGE_SIZE=15Kb export MAX_MESSAGE_SIZE=145Kb -export PUBSUB=/waku/2/rs/16/32 +export SHARD=32 export CONTENT_TOPIC=/tester/2/light-pubsub-test/fleet export CLUSTER_ID=16 @@ -307,7 +307,7 @@ export NUM_MESSAGES=300 export MESSAGE_INTERVAL_MILLIS=7000 export MIN_MESSAGE_SIZE=15Kb export MAX_MESSAGE_SIZE=145Kb -export PUBSUB=/waku/2/rs/1/4 +export SHARD=4 export CONTENT_TOPIC=/tester/2/light-pubsub-test/twn export CLUSTER_ID=1 diff --git a/apps/liteprotocoltester/docker-compose-on-simularor.yml b/apps/liteprotocoltester/docker-compose-on-simularor.yml index c63a294f2..9e899f78f 100644 --- a/apps/liteprotocoltester/docker-compose-on-simularor.yml +++ b/apps/liteprotocoltester/docker-compose-on-simularor.yml @@ -16,7 +16,7 @@ x-rln-environment: &rln_env x-test-running-conditions: &test_running_conditions NUM_MESSAGES: ${NUM_MESSAGES:-120} MESSAGE_INTERVAL_MILLIS: "${MESSAGE_INTERVAL_MILLIS:-1000}" - PUBSUB: ${PUBSUB:-/waku/2/rs/66/0} + SHARD: ${SHARD:-0} CONTENT_TOPIC: ${CONTENT_TOPIC:-/tester/2/light-pubsub-test/wakusim} CLUSTER_ID: ${CLUSTER_ID:-66} MIN_MESSAGE_SIZE: ${MIN_MESSAGE_SIZE:-1Kb} diff --git a/apps/liteprotocoltester/docker-compose.yml b/apps/liteprotocoltester/docker-compose.yml index afd2f1e72..0effbf8f0 100644 --- a/apps/liteprotocoltester/docker-compose.yml +++ b/apps/liteprotocoltester/docker-compose.yml @@ -16,7 +16,7 @@ x-rln-environment: &rln_env x-test-running-conditions: &test_running_conditions NUM_MESSAGES: ${NUM_MESSAGES:-120} MESSAGE_INTERVAL_MILLIS: "${MESSAGE_INTERVAL_MILLIS:-1000}" - PUBSUB: ${PUBSUB:-/waku/2/rs/66/0} + SHARD: ${SHARD:-0} CONTENT_TOPIC: ${CONTENT_TOPIC:-/tester/2/light-pubsub-test/wakusim} CLUSTER_ID: ${CLUSTER_ID:-66} MIN_MESSAGE_SIZE: ${MIN_MESSAGE_SIZE:-1Kb} diff --git a/apps/liteprotocoltester/filter_subscriber.nim b/apps/liteprotocoltester/filter_subscriber.nim index 143e0ca80..fbb11c92e 100644 --- a/apps/liteprotocoltester/filter_subscriber.nim +++ b/apps/liteprotocoltester/filter_subscriber.nim @@ -130,7 +130,9 @@ proc setupAndSubscribe*( var stats: PerPeerStatistics actualFilterPeer = servicePeer - let pushHandler = proc(pubsubTopic: PubsubTopic, message: WakuMessage) {.async.} = + let pushHandler = proc( + pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[void] {.async, closure.} = let payloadStr = string.fromBytes(message.payload) let testerMessage = js.Json.decode(payloadStr, ProtocolTesterMessage) let msgHash = computeMessageHash(pubsubTopic, message).to0xHex @@ -163,7 +165,7 @@ proc setupAndSubscribe*( if conf.numMessages > 0 and waitFor stats.checkIfAllMessagesReceived(maxWaitForLastMessage): - waitFor unsubscribe(wakuNode, conf.pubsubTopics[0], conf.contentTopics[0]) + waitFor unsubscribe(wakuNode, conf.getPubsubTopic(), conf.contentTopics[0]) info "All messages received. Exiting." ## for gracefull shutdown through signal hooks @@ -176,5 +178,5 @@ proc setupAndSubscribe*( # Start maintaining subscription asyncSpawn maintainSubscription( - wakuNode, conf.pubsubTopics[0], conf.contentTopics[0], conf.fixedServicePeer + wakuNode, conf.getPubsubTopic(), conf.contentTopics[0], conf.fixedServicePeer ) diff --git a/apps/liteprotocoltester/infra.env b/apps/liteprotocoltester/infra.env index 6d4542eca..ebf614732 100644 --- a/apps/liteprotocoltester/infra.env +++ b/apps/liteprotocoltester/infra.env @@ -4,7 +4,7 @@ NUM_MESSAGES=300 MESSAGE_INTERVAL_MILLIS=1000 MIN_MESSAGE_SIZE=15Kb MAX_MESSAGE_SIZE=145Kb -PUBSUB=/waku/2/rs/16/32 +SHARD=32 CONTENT_TOPIC=/tester/2/light-pubsub-test-at-infra/status-prod CLUSTER_ID=16 LIGHTPUSH_BOOTSTRAP=enr:-QEKuED9AJm2HGgrRpVaJY2nj68ao_QiPeUT43sK-aRM7sMJ6R4G11OSDOwnvVacgN1sTw-K7soC5dzHDFZgZkHU0u-XAYJpZIJ2NIJpcISnYxMvim11bHRpYWRkcnO4WgAqNiVib290LTAxLmRvLWFtczMuc3RhdHVzLnByb2Quc3RhdHVzLmltBnZfACw2JWJvb3QtMDEuZG8tYW1zMy5zdGF0dXMucHJvZC5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaEC3rRtFQSgc24uWewzXaxTY8hDAHB8sgnxr9k8Rjb5GeSDdGNwgnZfg3VkcIIjKIV3YWt1Mg0 diff --git a/apps/liteprotocoltester/lightpush_publisher.nim b/apps/liteprotocoltester/lightpush_publisher.nim index 32f802fe4..d79e68590 100644 --- a/apps/liteprotocoltester/lightpush_publisher.nim +++ b/apps/liteprotocoltester/lightpush_publisher.nim @@ -145,13 +145,20 @@ proc publishMessages( lightpushContentTopic, renderMsgSize, ) + + let publishStartTime = Moment.now() + let wlpRes = await wakuNode.legacyLightpushPublish( some(lightpushPubsubTopic), message, actualServicePeer ) + let publishDuration = Moment.now() - publishStartTime + let msgHash = computeMessageHash(lightpushPubsubTopic, message).to0xHex if wlpRes.isOk(): + lpt_publish_duration_seconds.observe(publishDuration.milliseconds.float / 1000) + sentMessages[messagesSent] = (hash: msgHash, relayed: true) notice "published message using lightpush", index = messagesSent + 1, @@ -251,7 +258,7 @@ proc setupAndPublish*( asyncSpawn publishMessages( wakuNode, servicePeer, - conf.pubsubTopics[0], + conf.getPubsubTopic(), conf.contentTopics[0], conf.numMessages, (min: parsedMinMsgSize, max: parsedMaxMsgSize), diff --git a/apps/liteprotocoltester/liteprotocoltester.nim b/apps/liteprotocoltester/liteprotocoltester.nim index c23b80e72..ef63e6e7d 100644 --- a/apps/liteprotocoltester/liteprotocoltester.nim +++ b/apps/liteprotocoltester/liteprotocoltester.nim @@ -99,7 +99,7 @@ when isMainModule: wakuConf.dnsAddrs = true wakuConf.dnsAddrsNameServers = @[parseIpAddress("8.8.8.8"), parseIpAddress("1.1.1.1")] - wakuConf.pubsubTopics = conf.pubsubTopics + wakuConf.shards = @[conf.shard] wakuConf.contentTopics = conf.contentTopics wakuConf.clusterId = conf.clusterId ## TODO: Depending on the tester needs we might extend here with shards, clusterId, etc... @@ -118,6 +118,7 @@ when isMainModule: wakuConf.store = false wakuConf.rest = false + wakuConf.relayServiceRatio = "40:60" # NOTE: {.threadvar.} is used to make the global variable GC safe for the closure uses it # It will always be called from main thread anyway. @@ -202,10 +203,8 @@ when isMainModule: var codec = WakuLightPushCodec # mounting relevant client, for PX filter client must be mounted ahead if conf.testFunc == TesterFunctionality.SENDER: - wakuApp.node.mountLegacyLightPushClient() codec = WakuLightPushCodec else: - waitFor wakuApp.node.mountFilterClient() codec = WakuFilterSubscribeCodec var lookForServiceNode = false diff --git a/apps/liteprotocoltester/lpt_metrics.nim b/apps/liteprotocoltester/lpt_metrics.nim index e68164d13..8b30619de 100644 --- a/apps/liteprotocoltester/lpt_metrics.nim +++ b/apps/liteprotocoltester/lpt_metrics.nim @@ -47,3 +47,10 @@ declarePublicGauge lpt_px_peers, declarePublicGauge lpt_dialed_peers, "Number of peers successfully dialed", ["agent"] declarePublicGauge lpt_dial_failures, "Number of dial failures by cause", ["agent"] + +declarePublicHistogram lpt_publish_duration_seconds, + "duration to lightpush messages", + buckets = [ + 0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1.0, 2.5, 5.0, 7.5, 10.0, + 15.0, 20.0, 30.0, Inf, + ] diff --git a/apps/liteprotocoltester/run_service_node.sh b/apps/liteprotocoltester/run_service_node.sh index 1d36292c1..07fdbe980 100755 --- a/apps/liteprotocoltester/run_service_node.sh +++ b/apps/liteprotocoltester/run_service_node.sh @@ -5,10 +5,10 @@ IP=$(ip a | grep "inet " | grep -Fv 127.0.0.1 | sed 's/.*inet \([^/]*\).*/\1/') echo "Service node IP: ${IP}" -if [ -n "${PUBSUB}" ]; then - PUBSUB=--pubsub-topic="${PUBSUB}" +if [ -n "${SHARD}" ]; then + SHARD=--shard="${SHARD}" else - PUBSUB=--pubsub-topic="/waku/2/rs/66/0" + SHARD=--shard="0" fi if [ -n "${CLUSTER_ID}" ]; then @@ -59,5 +59,5 @@ exec /usr/bin/wakunode\ --metrics-server-port=8003\ --metrics-server-address=0.0.0.0\ --nat=extip:${IP}\ - ${PUBSUB}\ + ${SHARD}\ ${CLUSTER_ID} diff --git a/apps/liteprotocoltester/run_tester_node.sh b/apps/liteprotocoltester/run_tester_node.sh index 8975fba91..4a80ca460 100755 --- a/apps/liteprotocoltester/run_tester_node.sh +++ b/apps/liteprotocoltester/run_tester_node.sh @@ -93,10 +93,10 @@ else FULL_NODE=--bootstrap-node="${SERIVCE_NODE_ADDR}" fi -if [ -n "${PUBSUB}" ]; then - PUBSUB=--pubsub-topic="${PUBSUB}" +if [ -n "${SHARD}" ]; then + SHARD=--shard="${SHARD}" else - PUBSUB=--pubsub-topic="/waku/2/rs/66/0" + SHARD=--shard="0" fi if [ -n "${CONTENT_TOPIC}" ]; then @@ -128,19 +128,25 @@ if [ -n "${MESSAGE_INTERVAL_MILLIS}" ]; then MESSAGE_INTERVAL_MILLIS=--message-interval="${MESSAGE_INTERVAL_MILLIS}" fi +if [ -n "${LOG_LEVEL}" ]; then + LOG_LEVEL=--log-level=${LOG_LEVEL} +else + LOG_LEVEL=--log-level=INFO +fi + echo "Running binary: ${BINARY_PATH}" echo "Tester node: ${FUNCTION}" echo "Using service node: ${SERIVCE_NODE_ADDR}" echo "My external IP: ${MY_EXT_IP}" exec "${BINARY_PATH}"\ - --log-level=INFO\ --nat=extip:${MY_EXT_IP}\ --test-peers\ + ${LOG_LEVEL}\ ${FULL_NODE}\ ${MESSAGE_INTERVAL_MILLIS}\ ${NUM_MESSAGES}\ - ${PUBSUB}\ + ${SHARD}\ ${CONTENT_TOPIC}\ ${CLUSTER_ID}\ ${FUNCTION}\ diff --git a/apps/liteprotocoltester/run_tester_node_at_infra.sh b/apps/liteprotocoltester/run_tester_node_at_infra.sh index 6cec4b006..e926875aa 100644 --- a/apps/liteprotocoltester/run_tester_node_at_infra.sh +++ b/apps/liteprotocoltester/run_tester_node_at_infra.sh @@ -48,10 +48,10 @@ fi MY_EXT_IP=$(wget -qO- --no-check-certificate https://api4.ipify.org) -if [ -n "${PUBSUB}" ]; then - PUBSUB=--pubsub-topic="${PUBSUB}" +if [ -n "${SHARD}" ]; then + SHARD=--shard="${SHARD}" else - PUBSUB=--pubsub-topic="/waku/2/rs/66/0" + SHARD=--shard="0" fi if [ -n "${CONTENT_TOPIC}" ]; then @@ -83,19 +83,25 @@ if [ -n "${MESSAGE_INTERVAL_MILLIS}" ]; then MESSAGE_INTERVAL_MILLIS=--message-interval="${MESSAGE_INTERVAL_MILLIS}" fi +if [ -n "${LOG_LEVEL}" ]; then + LOG_LEVEL=--log-level=${LOG_LEVEL} +else + LOG_LEVEL=--log-level=INFO +fi + echo "Running binary: ${BINARY_PATH}" echo "Node function is: ${FUNCTION}" echo "Using service/bootstrap node as: ${NODE_ARG}" echo "My external IP: ${MY_EXT_IP}" exec "${BINARY_PATH}"\ - --log-level=INFO\ --nat=extip:${MY_EXT_IP}\ --test-peers\ + ${LOG_LEVEL}\ ${NODE_ARG}\ ${MESSAGE_INTERVAL_MILLIS}\ ${NUM_MESSAGES}\ - ${PUBSUB}\ + ${SHARD}\ ${CONTENT_TOPIC}\ ${CLUSTER_ID}\ ${FUNCTION}\ diff --git a/apps/liteprotocoltester/run_tester_node_on_fleet.sh b/apps/liteprotocoltester/run_tester_node_on_fleet.sh index f0300cef2..538a890e6 100644 --- a/apps/liteprotocoltester/run_tester_node_on_fleet.sh +++ b/apps/liteprotocoltester/run_tester_node_on_fleet.sh @@ -48,10 +48,10 @@ fi MY_EXT_IP=$(wget -qO- --no-check-certificate https://api4.ipify.org) -if [ -n "${PUBSUB}" ]; then - PUBSUB=--pubsub-topic="${PUBSUB}" +if [ -n "${SHARD}" ]; then + SHARD=--shard=${SHARD} else - PUBSUB=--pubsub-topic="/waku/2/rs/66/0" + SHARD=--shard=0 fi if [ -n "${CONTENT_TOPIC}" ]; then @@ -79,8 +79,14 @@ if [ -n "${NUM_MESSAGES}" ]; then NUM_MESSAGES=--num-messages="${NUM_MESSAGES}" fi -if [ -n "${DELAY_MESSAGES}" ]; then - DELAY_MESSAGES=--delay-messages="${DELAY_MESSAGES}" +if [ -n "${MESSAGE_INTERVAL_MILLIS}" ]; then + MESSAGE_INTERVAL_MILLIS=--message-interval="${MESSAGE_INTERVAL_MILLIS}" +fi + +if [ -n "${LOG_LEVEL}" ]; then + LOG_LEVEL=--log-level=${LOG_LEVEL} +else + LOG_LEVEL=--log-level=INFO fi echo "Running binary: ${BINARY_PATH}" @@ -89,12 +95,12 @@ echo "Using service/bootstrap node as: ${NODE_ARG}" echo "My external IP: ${MY_EXT_IP}" exec "${BINARY_PATH}"\ - --log-level=INFO\ --nat=extip:${MY_EXT_IP}\ + ${LOG_LEVEL}\ ${NODE_ARG}\ - ${DELAY_MESSAGES}\ + ${MESSAGE_INTERVAL_MILLIS}\ ${NUM_MESSAGES}\ - ${PUBSUB}\ + ${SHARD}\ ${CONTENT_TOPIC}\ ${CLUSTER_ID}\ ${FUNCTION}\ diff --git a/apps/liteprotocoltester/service_peer_management.nim b/apps/liteprotocoltester/service_peer_management.nim index 83216ae3b..a303c3c58 100644 --- a/apps/liteprotocoltester/service_peer_management.nim +++ b/apps/liteprotocoltester/service_peer_management.nim @@ -189,14 +189,14 @@ proc pxLookupServiceNode*( if conf.testPeers: let peersOpt = - await tryCallAllPxPeers(node.peerManager, codec, conf.pubsubTopics[0]) + await tryCallAllPxPeers(node.peerManager, codec, conf.getPubsubTopic()) if peersOpt.isSome(): info "Found service peers for codec", codec = codec, peer_count = peersOpt.get().len() return ok(peersOpt.get().len > 0) else: let peerOpt = - await selectRandomCapablePeer(node.peerManager, codec, conf.pubsubTopics[0]) + await selectRandomCapablePeer(node.peerManager, codec, conf.getPubsubTopic()) if peerOpt.isSome(): info "Found service peer for codec", codec = codec, peer = peerOpt.get() return ok(true) diff --git a/apps/liteprotocoltester/tester_config.nim b/apps/liteprotocoltester/tester_config.nim index 115686be3..eccaafc06 100644 --- a/apps/liteprotocoltester/tester_config.nim +++ b/apps/liteprotocoltester/tester_config.nim @@ -18,6 +18,7 @@ import common/logging, factory/external_config, waku_core, + waku_core/topics/pubsub_topic, ] export confTomlDefs, confTomlNet, confEnvvarDefs, confEnvvarNet @@ -95,18 +96,9 @@ type LiteProtocolTesterConf* = object name: "message-interval" .}: uint32 - pubsubTopics* {. - desc: "Default pubsub topic to subscribe to. Argument may be repeated.", - defaultValue: @[LitePubsubTopic], - name: "pubsub-topic" - .}: seq[PubsubTopic] + shard* {.desc: "Shards index to subscribe to. ", defaultValue: 0, name: "shard".}: + uint16 - ## TODO: extend lite protocol tester configuration based on testing needs - # shards* {. - # desc: "Shards index to subscribe to [0..NUM_SHARDS_IN_NETWORK-1]. Argument may be repeated.", - # defaultValue: @[], - # name: "shard" - # .}: seq[uint16] contentTopics* {. desc: "Default content topic to subscribe to. Argument may be repeated.", defaultValue: @[LiteContentTopic], @@ -195,4 +187,7 @@ proc load*(T: type LiteProtocolTesterConf, version = ""): ConfResult[T] = except CatchableError: err(getCurrentExceptionMsg()) +proc getPubsubTopic*(conf: LiteProtocolTesterConf): PubsubTopic = + return $RelayShard(clusterId: conf.clusterId, shardId: conf.shard) + {.pop.} diff --git a/waku/common/rate_limit/request_limiter.nim b/waku/common/rate_limit/request_limiter.nim index 7f33d0348..0ede20be4 100644 --- a/waku/common/rate_limit/request_limiter.nim +++ b/waku/common/rate_limit/request_limiter.nim @@ -78,14 +78,14 @@ template checkUsageLimit*( bodyWithinLimit, bodyRejected: untyped, ) = if t.checkUsage(proto, conn): - let requestStartTime = getTime().toUnixFloat() + let requestStartTime = Moment.now() waku_service_requests.inc(labelValues = [proto, "served"]) bodyWithinLimit - let requestDurationSec = getTime().toUnixFloat() - requestStartTime + let requestDuration = Moment.now() - requestStartTime waku_service_request_handling_duration_seconds.observe( - requestDurationSec, labelValues = [proto] + requestDuration.milliseconds.float / 1000, labelValues = [proto] ) else: waku_service_requests.inc(labelValues = [proto, "rejected"]) diff --git a/waku/common/rate_limit/service_metrics.nim b/waku/common/rate_limit/service_metrics.nim index 7d24d9530..bff91f622 100644 --- a/waku/common/rate_limit/service_metrics.nim +++ b/waku/common/rate_limit/service_metrics.nim @@ -1,8 +1,11 @@ {.push raises: [].} import std/options +import chronos/timer import metrics, setting +export metrics + declarePublicGauge waku_service_requests_limit, "Applied rate limit of non-relay service", ["service"] @@ -19,4 +22,9 @@ proc setServiceLimitMetric*(service: string, limit: Option[RateLimitSetting]) = ) declarePublicHistogram waku_service_request_handling_duration_seconds, - "duration of non-relay service handling", ["service"] + "duration of non-relay service handling", + labels = ["service"], + buckets = [ + 0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1.0, 2.5, 5.0, 7.5, 10.0, + 15.0, 20.0, 30.0, Inf, + ] diff --git a/waku/common/rate_limit/single_token_limiter.nim b/waku/common/rate_limit/single_token_limiter.nim index da01f61bb..50fb2d64c 100644 --- a/waku/common/rate_limit/single_token_limiter.nim +++ b/waku/common/rate_limit/single_token_limiter.nim @@ -45,14 +45,14 @@ template checkUsageLimit*( bodyWithinLimit, bodyRejected: untyped, ) = if t.checkUsage(proto): - let requestStartTime = getTime().toUnixFloat() + let requestStartTime = Moment.now() waku_service_requests.inc(labelValues = [proto, "served"]) bodyWithinLimit - let requestDurationSec = getTime().toUnixFloat() - requestStartTime + let requestDuration = Moment.now() - requestStartTime waku_service_request_handling_duration_seconds.observe( - requestDurationSec, labelValues = [proto] + requestDuration.milliseconds.float / 1000, labelValues = [proto] ) else: waku_service_requests.inc(labelValues = [proto, "rejected"]) diff --git a/waku/node/waku_node.nim b/waku/node/waku_node.nim index 18986d5c0..a544bdc80 100644 --- a/waku/node/waku_node.nim +++ b/waku/node/waku_node.nim @@ -513,6 +513,10 @@ proc mountFilterClient*(node: WakuNode) {.async: (raises: []).} = ## rely on node provided cache. - This only applies for v2 filter client info "mounting filter client" + if not node.wakuFilterClient.isNil(): + trace "Filter client already mounted." + return + node.wakuFilterClient = WakuFilterClient.new(node.peerManager, node.rng) try: @@ -1021,8 +1025,9 @@ proc mountLegacyLightPush*( proc mountLegacyLightPushClient*(node: WakuNode) = info "mounting legacy light push client" - node.wakuLegacyLightpushClient = - WakuLegacyLightPushClient.new(node.peerManager, node.rng) + if node.wakuLegacyLightpushClient.isNil(): + node.wakuLegacyLightpushClient = + WakuLegacyLightPushClient.new(node.peerManager, node.rng) proc legacyLightpushPublish*( node: WakuNode, @@ -1133,7 +1138,8 @@ proc mountLightPush*( proc mountLightPushClient*(node: WakuNode) = info "mounting light push client" - node.wakuLightpushClient = WakuLightPushClient.new(node.peerManager, node.rng) + if node.wakuLightpushClient.isNil(): + node.wakuLightpushClient = WakuLightPushClient.new(node.peerManager, node.rng) proc lightpushPublishHandler( node: WakuNode, diff --git a/waku/waku_filter_v2/protocol_metrics.nim b/waku/waku_filter_v2/protocol_metrics.nim index b19f612f3..2d9f63c63 100644 --- a/waku/waku_filter_v2/protocol_metrics.nim +++ b/waku/waku_filter_v2/protocol_metrics.nim @@ -11,7 +11,11 @@ declarePublicGauge waku_filter_subscriptions, "number of subscribed filter clien declarePublicHistogram waku_filter_request_duration_seconds, "duration of filter subscribe requests", ["type"] declarePublicHistogram waku_filter_handle_message_duration_seconds, - "duration to push message to filter subscribers" + "duration to push message to filter subscribers", + buckets = [ + 0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1.0, 2.5, 5.0, 7.5, 10.0, + 15.0, 20.0, 30.0, Inf, + ] # Error types (metric label values) const diff --git a/waku/waku_lightpush/protocol.nim b/waku/waku_lightpush/protocol.nim index e2b096bc9..57a95e107 100644 --- a/waku/waku_lightpush/protocol.nim +++ b/waku/waku_lightpush/protocol.nim @@ -75,12 +75,13 @@ proc handleRequest*( waku_lightpush_v3_messages.inc(labelValues = ["PushRequest"]) + let msg_hash = pubsubTopic.computeMessageHash(pushRequest.message).to0xHex() notice "handling lightpush request", my_peer_id = wl.peerManager.switch.peerInfo.peerId, peer_id = peerId, requestId = pushRequest.requestId, pubsubTopic = pushRequest.pubsubTopic, - msg_hash = pubsubTopic.computeMessageHash(pushRequest.message).to0xHex(), + msg_hash = msg_hash, receivedTime = getNowInNanosecondTime() let handleRes = await wl.pushHandler(peerId, pubsubTopic, pushRequest.message) diff --git a/waku/waku_lightpush_legacy/protocol.nim b/waku/waku_lightpush_legacy/protocol.nim index feb6a1320..5de25ead9 100644 --- a/waku/waku_lightpush_legacy/protocol.nim +++ b/waku/waku_lightpush_legacy/protocol.nim @@ -42,12 +42,14 @@ proc handleRequest*( pubSubTopic = request.get().pubSubTopic message = request.get().message + let msg_hash = pubsubTopic.computeMessageHash(message).to0xHex() waku_lightpush_messages.inc(labelValues = ["PushRequest"]) + notice "handling lightpush request", peer_id = peerId, requestId = requestId, pubsubTopic = pubsubTopic, - msg_hash = pubsubTopic.computeMessageHash(message).to0xHex(), + msg_hash = msg_hash, receivedTime = getNowInNanosecondTime() let handleRes = await wl.pushHandler(peerId, pubsubTopic, message) From 5ae526ce4fd9c851b5d934879b718aae80317adc Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Thu, 17 Apr 2025 13:03:56 +0200 Subject: [PATCH 24/48] chore: Timestamp now in publish (#3373) * Ensure timestamp is always set in WakuMessage when publishing --- tests/waku_lightpush/lightpush_utils.nim | 2 +- tests/waku_lightpush/test_client.nim | 40 ++++++++++++++++++- tests/waku_relay/test_wakunode_relay.nim | 6 +++ .../test_wakunode_rln_relay.nim | 4 +- waku/waku_lightpush/client.nim | 12 +++++- waku/waku_lightpush_legacy/client.nim | 7 +++- waku/waku_relay/protocol.nim | 6 ++- 7 files changed, 69 insertions(+), 8 deletions(-) diff --git a/tests/waku_lightpush/lightpush_utils.nim b/tests/waku_lightpush/lightpush_utils.nim index b334f043a..9b867c707 100644 --- a/tests/waku_lightpush/lightpush_utils.nim +++ b/tests/waku_lightpush/lightpush_utils.nim @@ -1,6 +1,6 @@ {.used.} -import std/options, chronos, libp2p/crypto/crypto +import std/options, chronos, chronicles, libp2p/crypto/crypto import waku/node/peer_manager, diff --git a/tests/waku_lightpush/test_client.nim b/tests/waku_lightpush/test_client.nim index d1a7ba57e..d7a1b6b24 100644 --- a/tests/waku_lightpush/test_client.nim +++ b/tests/waku_lightpush/test_client.nim @@ -1,6 +1,11 @@ {.used.} -import std/[options, strscans], testutils/unittests, chronos, libp2p/crypto/crypto +import + std/[options, strscans], + testutils/unittests, + chronos, + chronicles, + libp2p/crypto/crypto import waku/[ @@ -307,6 +312,39 @@ suite "Waku Lightpush Client": # Cleanup await serverSwitch2.stop() + asyncTest "Check timestamp is not zero": + ## This test validates that, even the generated message has a timestamp of 0, + ## the node will eventually set a timestamp when publishing the message. + let + zeroTimestamp = 0 + meta = "TEST-META" + message = fakeWakuMessage( + payloads.ALPHABETIC, content_topics.CURRENT, meta, zeroTimestamp + ) + + # When publishing a valid payload + let publishResponse = + await client.publish(some(pubsubTopic), message, serverRemotePeerInfo) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + let (readPubsubTopic, readMessage) = handlerFuture.read() + + check: + pubsubTopic == readPubsubTopic + message.payload == readMessage.payload + message.contentTopic == readMessage.contentTopic + message.meta == readMessage.meta + message.timestamp != readMessage.timestamp + message.ephemeral == readMessage.ephemeral + message.proof == readMessage.proof + message.version == readMessage.version + readMessage.timestamp > 0 + suite "Verification of PushResponse Payload": asyncTest "Positive Responses": # When sending a valid PushRequest diff --git a/tests/waku_relay/test_wakunode_relay.nim b/tests/waku_relay/test_wakunode_relay.nim index 398470163..8e028acdc 100644 --- a/tests/waku_relay/test_wakunode_relay.nim +++ b/tests/waku_relay/test_wakunode_relay.nim @@ -90,6 +90,7 @@ suite "WakuNode - Relay": topic == $shard msg.contentTopic == contentTopic msg.payload == payload + msg.timestamp > 0 completionFut.complete(true) node3.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)) @@ -279,6 +280,7 @@ suite "WakuNode - Relay": topic == $shard msg.contentTopic == contentTopic msg.payload == payload + msg.timestamp > 0 completionFut.complete(true) node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)) @@ -327,6 +329,7 @@ suite "WakuNode - Relay": topic == $shard msg.contentTopic == contentTopic msg.payload == payload + msg.timestamp > 0 completionFut.complete(true) node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)) @@ -379,6 +382,7 @@ suite "WakuNode - Relay": topic == $shard msg.contentTopic == contentTopic msg.payload == payload + msg.timestamp > 0 completionFut.complete(true) node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)) @@ -429,6 +433,7 @@ suite "WakuNode - Relay": topic == $shard msg.contentTopic == contentTopic msg.payload == payload + msg.timestamp > 0 completionFut.complete(true) node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)) @@ -487,6 +492,7 @@ suite "WakuNode - Relay": topic == $shard msg.contentTopic == contentTopic msg.payload == payload + msg.timestamp > 0 completionFut.complete(true) node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)) diff --git a/tests/waku_rln_relay/test_wakunode_rln_relay.nim b/tests/waku_rln_relay/test_wakunode_rln_relay.nim index b07cca408..2a0fd5779 100644 --- a/tests/waku_rln_relay/test_wakunode_rln_relay.nim +++ b/tests/waku_rln_relay/test_wakunode_rln_relay.nim @@ -451,9 +451,9 @@ procSuite "WakuNode - RLN relay": completionFut1.complete(true) if msg == wm2: completionFut2.complete(true) - if msg == wm3: + if msg.payload == wm3.payload: completionFut3.complete(true) - if msg == wm4: + if msg.payload == wm4.payload: completionFut4.complete(true) # mount the relay handler for node3 diff --git a/waku/waku_lightpush/client.nim b/waku/waku_lightpush/client.nim index 7aa2d9fa9..2f03b0847 100644 --- a/waku/waku_lightpush/client.nim +++ b/waku/waku_lightpush/client.nim @@ -65,9 +65,13 @@ proc sendPushRequest( proc publish*( wl: WakuLightPushClient, pubSubTopic: Option[PubsubTopic] = none(PubsubTopic), - message: WakuMessage, + wakuMessage: WakuMessage, peer: PeerId | RemotePeerInfo, ): Future[WakuLightPushResult] {.async, gcsafe.} = + var message = wakuMessage + if message.timestamp == 0: + message.timestamp = getNowInNanosecondTime() + when peer is PeerId: info "publish", peerId = shortLog(peer), @@ -88,11 +92,15 @@ proc publish*( return lightpushSuccessResult(publishedCount) proc publishToAny*( - wl: WakuLightPushClient, pubSubTopic: PubsubTopic, message: WakuMessage + wl: WakuLightPushClient, pubSubTopic: PubsubTopic, wakuMessage: WakuMessage ): Future[WakuLightPushResult] {.async, gcsafe.} = ## This proc is similar to the publish one but in this case ## we don't specify a particular peer and instead we get it from peer manager + var message = wakuMessage + if message.timestamp == 0: + message.timestamp = getNowInNanosecondTime() + info "publishToAny", msg_hash = computeMessageHash(pubsubTopic, message).to0xHex let peer = wl.peerManager.selectPeer(WakuLightPushCodec).valueOr: diff --git a/waku/waku_lightpush_legacy/client.nim b/waku/waku_lightpush_legacy/client.nim index c3b4a158e..503cbe1eb 100644 --- a/waku/waku_lightpush_legacy/client.nim +++ b/waku/waku_lightpush_legacy/client.nim @@ -72,10 +72,15 @@ proc sendPushRequest( proc publish*( wl: WakuLegacyLightPushClient, pubSubTopic: PubsubTopic, - message: WakuMessage, + wakuMessage: WakuMessage, peer: RemotePeerInfo, ): Future[WakuLightPushResult[string]] {.async, gcsafe.} = ## On success, returns the msg_hash of the published message + + var message = wakuMessage + if message.timestamp == 0: + message.timestamp = getNowInNanosecondTime() + let msg_hash_hex_str = computeMessageHash(pubsubTopic, message).to0xHex() let pushRequest = PushRequest(pubSubTopic: pubSubTopic, message: message) ?await wl.sendPushRequest(pushRequest, peer) diff --git a/waku/waku_relay/protocol.nim b/waku/waku_relay/protocol.nim index 126ff608c..1698fac70 100644 --- a/waku/waku_relay/protocol.nim +++ b/waku/waku_relay/protocol.nim @@ -533,11 +533,15 @@ proc unsubscribe*(w: WakuRelay, pubsubTopic: PubsubTopic, handler: TopicHandler) procCall GossipSub(w).unsubscribe(pubsubTopic, handler) proc publish*( - w: WakuRelay, pubsubTopic: PubsubTopic, message: WakuMessage + w: WakuRelay, pubsubTopic: PubsubTopic, wakuMessage: WakuMessage ): Future[Result[int, PublishOutcome]] {.async.} = if pubsubTopic.isEmptyOrWhitespace(): return err(NoTopicSpecified) + var message = wakuMessage + if message.timestamp == 0: + message.timestamp = getNowInNanosecondTime() + let data = message.encode().buffer let msgHash = computeMessageHash(pubsubTopic, message).to0xHex() From 559776557b9ac13f6f018ccbd3a1bf1acae58906 Mon Sep 17 00:00:00 2001 From: gabrielmer <101006718+gabrielmer@users.noreply.github.com> Date: Fri, 18 Apr 2025 00:15:35 +0300 Subject: [PATCH 25/48] fix: libwaku's redundant allocs (#3380) --- library/libwaku.nim | 77 ++++++++------------------------------------- 1 file changed, 13 insertions(+), 64 deletions(-) diff --git a/library/libwaku.nim b/library/libwaku.nim index 050395bc5..48df3e2c6 100644 --- a/library/libwaku.nim +++ b/library/libwaku.nim @@ -225,19 +225,11 @@ proc waku_content_topic( initializeLibrary() checkLibwakuParams(ctx, callback, userData) - let appStr = appName.alloc() - let ctnStr = contentTopicName.alloc() - let encodingStr = encoding.alloc() - - let contentTopic = fmt"/{$appStr}/{appVersion}/{$ctnStr}/{$encodingStr}" + let contentTopic = fmt"/{$appName}/{$appVersion}/{$contentTopicName}/{$encoding}" callback( RET_OK, unsafeAddr contentTopic[0], cast[csize_t](len(contentTopic)), userData ) - deallocShared(appStr) - deallocShared(ctnStr) - deallocShared(encodingStr) - return RET_OK proc waku_pubsub_topic( @@ -248,15 +240,11 @@ proc waku_pubsub_topic( initializeLibrary() checkLibwakuParams(ctx, callback, userData) - let topicNameStr = topicName.alloc() - - let outPubsubTopic = fmt"/waku/2/{$topicNameStr}" + let outPubsubTopic = fmt"/waku/2/{$topicName}" callback( RET_OK, unsafeAddr outPubsubTopic[0], cast[csize_t](len(outPubsubTopic)), userData ) - deallocShared(topicNameStr) - return RET_OK proc waku_default_pubsub_topic( @@ -289,12 +277,9 @@ proc waku_relay_publish( initializeLibrary() checkLibwakuParams(ctx, callback, userData) - let jwm = jsonWakuMessage.alloc() - defer: - deallocShared(jwm) var jsonMessage: JsonMessage try: - let jsonContent = parseJson($jwm) + let jsonContent = parseJson($jsonWakuMessage) jsonMessage = JsonMessage.fromJsonNode(jsonContent).valueOr: raise newException(JsonParsingError, $error) except JsonParsingError: @@ -307,14 +292,10 @@ proc waku_relay_publish( callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) return RET_ERR - let pst = pubSubTopic.alloc() - defer: - deallocShared(pst) - handleRequest( ctx, RequestType.RELAY, - RelayRequest.createShared(RelayMsgType.PUBLISH, pst, nil, wakuMessage), + RelayRequest.createShared(RelayMsgType.PUBLISH, pubSubTopic, nil, wakuMessage), callback, userData, ) @@ -354,15 +335,12 @@ proc waku_relay_subscribe( initializeLibrary() checkLibwakuParams(ctx, callback, userData) - let pst = pubSubTopic.alloc() - defer: - deallocShared(pst) var cb = onReceivedMessage(ctx) handleRequest( ctx, RequestType.RELAY, - RelayRequest.createShared(RelayMsgType.SUBSCRIBE, pst, WakuRelayHandler(cb)), + RelayRequest.createShared(RelayMsgType.SUBSCRIBE, pubSubTopic, WakuRelayHandler(cb)), callback, userData, ) @@ -377,9 +355,6 @@ proc waku_relay_add_protected_shard( ): cint {.dynlib, exportc, cdecl.} = initializeLibrary() checkLibwakuParams(ctx, callback, userData) - let pubk = publicKey.alloc() - defer: - deallocShared(pubk) handleRequest( ctx, @@ -388,7 +363,7 @@ proc waku_relay_add_protected_shard( RelayMsgType.ADD_PROTECTED_SHARD, clusterId = clusterId, shardId = shardId, - publicKey = pubk, + publicKey = publicKey, ), callback, userData, @@ -403,15 +378,11 @@ proc waku_relay_unsubscribe( initializeLibrary() checkLibwakuParams(ctx, callback, userData) - let pst = pubSubTopic.alloc() - defer: - deallocShared(pst) - handleRequest( ctx, RequestType.RELAY, RelayRequest.createShared( - RelayMsgType.UNSUBSCRIBE, pst, WakuRelayHandler(onReceivedMessage(ctx)) + RelayMsgType.UNSUBSCRIBE, pubSubTopic, WakuRelayHandler(onReceivedMessage(ctx)) ), callback, userData, @@ -426,14 +397,10 @@ proc waku_relay_get_num_connected_peers( initializeLibrary() checkLibwakuParams(ctx, callback, userData) - let pst = pubSubTopic.alloc() - defer: - deallocShared(pst) - handleRequest( ctx, RequestType.RELAY, - RelayRequest.createShared(RelayMsgType.NUM_CONNECTED_PEERS, pst), + RelayRequest.createShared(RelayMsgType.NUM_CONNECTED_PEERS, pubSubTopic), callback, userData, ) @@ -447,14 +414,10 @@ proc waku_relay_get_connected_peers( initializeLibrary() checkLibwakuParams(ctx, callback, userData) - let pst = pubSubTopic.alloc() - defer: - deallocShared(pst) - handleRequest( ctx, RequestType.RELAY, - RelayRequest.createShared(RelayMsgType.LIST_CONNECTED_PEERS, pst), + RelayRequest.createShared(RelayMsgType.LIST_CONNECTED_PEERS, pubSubTopic), callback, userData, ) @@ -468,14 +431,10 @@ proc waku_relay_get_num_peers_in_mesh( initializeLibrary() checkLibwakuParams(ctx, callback, userData) - let pst = pubSubTopic.alloc() - defer: - deallocShared(pst) - handleRequest( ctx, RequestType.RELAY, - RelayRequest.createShared(RelayMsgType.NUM_MESH_PEERS, pst), + RelayRequest.createShared(RelayMsgType.NUM_MESH_PEERS, pubSubTopic), callback, userData, ) @@ -489,14 +448,10 @@ proc waku_relay_get_peers_in_mesh( initializeLibrary() checkLibwakuParams(ctx, callback, userData) - let pst = pubSubTopic.alloc() - defer: - deallocShared(pst) - handleRequest( ctx, RequestType.RELAY, - RelayRequest.createShared(RelayMsgType.LIST_MESH_PEERS, pst), + RelayRequest.createShared(RelayMsgType.LIST_MESH_PEERS, pubSubTopic), callback, userData, ) @@ -566,15 +521,9 @@ proc waku_lightpush_publish( initializeLibrary() checkLibwakuParams(ctx, callback, userData) - let jwm = jsonWakuMessage.alloc() - let pst = pubSubTopic.alloc() - defer: - deallocShared(jwm) - deallocShared(pst) - var jsonMessage: JsonMessage try: - let jsonContent = parseJson($jwm) + let jsonContent = parseJson($jsonWakuMessage) jsonMessage = JsonMessage.fromJsonNode(jsonContent).valueOr: raise newException(JsonParsingError, $error) except JsonParsingError: @@ -590,7 +539,7 @@ proc waku_lightpush_publish( handleRequest( ctx, RequestType.LIGHTPUSH, - LightpushRequest.createShared(LightpushMsgType.PUBLISH, pst, wakuMessage), + LightpushRequest.createShared(LightpushMsgType.PUBLISH, pubSubTopic, wakuMessage), callback, userData, ) From 8dd31c200b16545f67693f20ea0c52916857893d Mon Sep 17 00:00:00 2001 From: gabrielmer <101006718+gabrielmer@users.noreply.github.com> Date: Fri, 18 Apr 2025 00:16:35 +0300 Subject: [PATCH 26/48] fix: mistaken comments and broken link (#3381) --- library/events/json_connection_change_event.nim | 3 --- library/events/json_message_event.nim | 2 +- library/events/json_topic_health_change_event.nim | 3 --- 3 files changed, 1 insertion(+), 7 deletions(-) diff --git a/library/events/json_connection_change_event.nim b/library/events/json_connection_change_event.nim index 1a00237b6..ff2823640 100644 --- a/library/events/json_connection_change_event.nim +++ b/library/events/json_connection_change_event.nim @@ -9,9 +9,6 @@ type JsonConnectionChangeEvent* = ref object of JsonEvent proc new*( T: type JsonConnectionChangeEvent, peerId: string, peerEvent: PeerEventKind ): T = - # Returns a JsonConnectionChangeEvent event as indicated in - # https://rfc.vac.dev/spec/36/#jsonmessageevent-type - return JsonConnectionChangeEvent( eventType: "connection_change", peerId: peerId, peerEvent: peerEvent ) diff --git a/library/events/json_message_event.nim b/library/events/json_message_event.nim index 6f9dafa9f..f79fef86f 100644 --- a/library/events/json_message_event.nim +++ b/library/events/json_message_event.nim @@ -71,7 +71,7 @@ type JsonMessageEvent* = ref object of JsonEvent proc new*(T: type JsonMessageEvent, pubSubTopic: string, msg: WakuMessage): T = # Returns a WakuMessage event as indicated in - # https://rfc.vac.dev/spec/36/#jsonmessageevent-type + # https://github.com/vacp2p/rfc/blob/master/content/docs/rfcs/36/README.md#jsonmessageevent-type var payload = newSeq[byte](len(msg.payload)) if len(msg.payload) != 0: diff --git a/library/events/json_topic_health_change_event.nim b/library/events/json_topic_health_change_event.nim index c735eccbf..c194e890c 100644 --- a/library/events/json_topic_health_change_event.nim +++ b/library/events/json_topic_health_change_event.nim @@ -10,9 +10,6 @@ type JsonTopicHealthChangeEvent* = ref object of JsonEvent proc new*( T: type JsonTopicHealthChangeEvent, pubsubTopic: string, topicHealth: TopicHealth ): T = - # Returns a TopicHealthChange event as indicated in - # https://rfc.vac.dev/spec/36/#jsonmessageevent-type - return JsonTopicHealthChangeEvent( eventType: "relay_topic_health_change", pubsubTopic: pubsubTopic, From 2f49aae2b73860e3de936f77b66129cd0a3c26c4 Mon Sep 17 00:00:00 2001 From: Simon-Pierre Vivier Date: Tue, 22 Apr 2025 08:37:11 -0400 Subject: [PATCH 27/48] feat: Waku Sync dashboard new panel & update (#3379) --- metrics/waku-fleet-dashboard.json | 816 ++++++++++++++------- waku/waku_store_sync/protocols_metrics.nim | 6 +- waku/waku_store_sync/reconciliation.nim | 10 +- waku/waku_store_sync/transfer.nim | 4 +- 4 files changed, 570 insertions(+), 266 deletions(-) diff --git a/metrics/waku-fleet-dashboard.json b/metrics/waku-fleet-dashboard.json index ad9ef040a..230fcc8d0 100644 --- a/metrics/waku-fleet-dashboard.json +++ b/metrics/waku-fleet-dashboard.json @@ -55,7 +55,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -139,7 +140,8 @@ "mode": "absolute", "steps": [ { - "color": "blue" + "color": "blue", + "value": null } ] }, @@ -209,7 +211,8 @@ "mode": "percentage", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -292,7 +295,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -310,7 +314,7 @@ "properties": [ { "id": "custom.width", - "value": 122 + "value": 166 } ] }, @@ -430,7 +434,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -445,7 +450,7 @@ "h": 9, "w": 12, "x": 0, - "y": 145 + "y": 1312 }, "id": 81, "options": { @@ -526,7 +531,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -541,7 +547,7 @@ "h": 9, "w": 12, "x": 12, - "y": 145 + "y": 1312 }, "id": 82, "options": { @@ -624,7 +630,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -640,7 +647,7 @@ "h": 9, "w": 12, "x": 0, - "y": 154 + "y": 1321 }, "id": 78, "interval": "15s", @@ -727,7 +734,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -743,7 +751,7 @@ "h": 9, "w": 12, "x": 12, - "y": 154 + "y": 1321 }, "id": 79, "options": { @@ -827,7 +835,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -843,7 +852,7 @@ "h": 10, "w": 12, "x": 0, - "y": 192 + "y": 1330 }, "id": 124, "options": { @@ -931,7 +940,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -947,7 +957,7 @@ "h": 10, "w": 12, "x": 12, - "y": 192 + "y": 1330 }, "id": 126, "options": { @@ -982,6 +992,204 @@ "title": "Relay traffic per shard (out) - average of all peers", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 1340 + }, + "id": 169, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": false, + "expr": "avg by (protocol)(waku_connected_peers{direction=\"In\", instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"})", + "instant": false, + "legendFormat": "{{protocol}}", + "range": true, + "refId": "A" + } + ], + "title": "# peers per protocol (in)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 1340 + }, + "id": 170, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": false, + "expr": "avg by (protocol)(waku_connected_peers{direction=\"Out\", instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"})", + "instant": false, + "legendFormat": "{{protocol}}", + "range": true, + "refId": "A" + } + ], + "title": "# peers per protocol (out)", + "type": "timeseries" + }, { "datasource": { "type": "prometheus", @@ -1032,7 +1240,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -1048,7 +1257,7 @@ "h": 9, "w": 12, "x": 0, - "y": 202 + "y": 1350 }, "id": 11, "options": { @@ -1131,7 +1340,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -1146,7 +1356,7 @@ "h": 9, "w": 12, "x": 12, - "y": 202 + "y": 1350 }, "id": 54, "options": { @@ -1229,7 +1439,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -1244,7 +1455,7 @@ "h": 8, "w": 12, "x": 0, - "y": 211 + "y": 1359 }, "id": 66, "options": { @@ -1325,7 +1536,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -1340,7 +1552,7 @@ "h": 9, "w": 12, "x": 12, - "y": 211 + "y": 1359 }, "id": 122, "options": { @@ -1454,7 +1666,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -1463,38 +1676,13 @@ ] } }, - "overrides": [ - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - "store-02.ac-cn-hongkong-c.status.staging:v0.34.0-rc.1" - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] + "overrides": [] }, "gridPos": { "h": 8, "w": 12, "x": 12, - "y": 220 + "y": 1368 }, "id": 68, "options": { @@ -1592,7 +1780,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -1607,7 +1796,7 @@ "h": 6, "w": 12, "x": 0, - "y": 396 + "y": 2 }, "id": 48, "options": { @@ -1688,7 +1877,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -1703,7 +1893,7 @@ "h": 6, "w": 12, "x": 12, - "y": 396 + "y": 2 }, "id": 50, "options": { @@ -1784,7 +1974,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -1799,7 +1990,7 @@ "h": 6, "w": 12, "x": 0, - "y": 402 + "y": 343 }, "id": 60, "options": { @@ -1905,7 +2096,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -1920,7 +2112,7 @@ "h": 6, "w": 12, "x": 12, - "y": 402 + "y": 343 }, "id": 8, "options": { @@ -2004,7 +2196,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -2019,7 +2212,7 @@ "h": 6, "w": 12, "x": 0, - "y": 408 + "y": 349 }, "id": 2, "options": { @@ -2106,7 +2299,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -2122,7 +2316,7 @@ "h": 6, "w": 12, "x": 12, - "y": 408 + "y": 349 }, "id": 83, "options": { @@ -2205,7 +2399,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -2220,7 +2415,7 @@ "h": 6, "w": 12, "x": 0, - "y": 414 + "y": 355 }, "id": 3, "options": { @@ -2304,7 +2499,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -2319,7 +2515,7 @@ "h": 6, "w": 12, "x": 12, - "y": 414 + "y": 355 }, "id": 9, "options": { @@ -2429,7 +2625,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -2444,7 +2641,7 @@ "h": 6, "w": 12, "x": 0, - "y": 420 + "y": 361 }, "id": 6, "options": { @@ -2526,7 +2723,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -2541,7 +2739,7 @@ "h": 6, "w": 12, "x": 12, - "y": 420 + "y": 361 }, "id": 7, "options": { @@ -2650,7 +2848,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -2666,7 +2865,7 @@ "h": 8, "w": 12, "x": 0, - "y": 426 + "y": 367 }, "id": 44, "options": { @@ -2772,7 +2971,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -2788,7 +2988,7 @@ "h": 6, "w": 12, "x": 12, - "y": 426 + "y": 367 }, "id": 10, "options": { @@ -2871,7 +3071,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -2881,38 +3082,13 @@ }, "unit": "decbytes" }, - "overrides": [ - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - "boot-01.ac-cn-hongkong-c.status.staging seq[byte]" - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] + "overrides": [] }, "gridPos": { "h": 8, "w": 12, "x": 12, - "y": 432 + "y": 373 }, "id": 64, "options": { @@ -2999,7 +3175,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3014,7 +3191,7 @@ "h": 6, "w": 12, "x": 0, - "y": 434 + "y": 375 }, "id": 4, "options": { @@ -3096,7 +3273,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3111,7 +3289,7 @@ "h": 6, "w": 12, "x": 12, - "y": 440 + "y": 381 }, "id": 5, "options": { @@ -3207,7 +3385,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3222,7 +3401,7 @@ "h": 8, "w": 8, "x": 0, - "y": 447 + "y": 3 }, "id": 159, "options": { @@ -3246,7 +3425,7 @@ "uid": "P6693426190CB2316" }, "editorMode": "code", - "expr": "waku_rln_proofs_generated_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "expr": "waku_rln_total_generated_proofs", "legendFormat": "{{instance}}", "range": true, "refId": "A" @@ -3303,7 +3482,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3318,7 +3498,7 @@ "h": 8, "w": 8, "x": 8, - "y": 447 + "y": 3 }, "id": 117, "options": { @@ -3400,7 +3580,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3415,7 +3596,7 @@ "h": 8, "w": 8, "x": 16, - "y": 447 + "y": 3 }, "id": 160, "options": { @@ -3439,7 +3620,7 @@ "uid": "P6693426190CB2316" }, "editorMode": "code", - "expr": "waku_rln_proofs_remaining{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "expr": "waku_rln_remaining_proofs_per_epoch", "legendFormat": "{{instance}}", "range": true, "refId": "A" @@ -3496,7 +3677,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3512,7 +3694,7 @@ "h": 8, "w": 12, "x": 0, - "y": 455 + "y": 306 }, "id": 119, "options": { @@ -3593,7 +3775,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3608,7 +3791,7 @@ "h": 8, "w": 12, "x": 12, - "y": 455 + "y": 306 }, "id": 121, "options": { @@ -3689,7 +3872,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3704,7 +3888,7 @@ "h": 8, "w": 8, "x": 0, - "y": 463 + "y": 314 }, "id": 113, "options": { @@ -3786,7 +3970,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3801,7 +3986,7 @@ "h": 8, "w": 8, "x": 8, - "y": 463 + "y": 314 }, "id": 115, "options": { @@ -3908,7 +4093,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3923,7 +4109,7 @@ "h": 6, "w": 12, "x": 0, - "y": 472 + "y": 4 }, "id": 36, "options": { @@ -4004,7 +4190,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4019,7 +4206,7 @@ "h": 6, "w": 12, "x": 12, - "y": 472 + "y": 4 }, "id": 38, "options": { @@ -4115,7 +4302,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4130,7 +4318,7 @@ "h": 6, "w": 12, "x": 0, - "y": 478 + "y": 212 }, "id": 62, "options": { @@ -4213,7 +4401,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4228,7 +4417,7 @@ "h": 6, "w": 12, "x": 12, - "y": 478 + "y": 212 }, "id": 40, "options": { @@ -4329,7 +4518,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null } ] }, @@ -4341,7 +4531,7 @@ "h": 11, "w": 12, "x": 0, - "y": 484 + "y": 218 }, "id": 144, "options": { @@ -4433,7 +4623,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null } ] }, @@ -4445,7 +4636,7 @@ "h": 11, "w": 12, "x": 12, - "y": 484 + "y": 218 }, "id": 145, "options": { @@ -4538,7 +4729,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null } ] }, @@ -4550,7 +4742,7 @@ "h": 8, "w": 12, "x": 0, - "y": 495 + "y": 229 }, "id": 146, "options": { @@ -4642,7 +4834,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null } ] }, @@ -4654,7 +4847,7 @@ "h": 8, "w": 12, "x": 12, - "y": 495 + "y": 229 }, "id": 148, "options": { @@ -4746,7 +4939,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null } ] }, @@ -4758,7 +4952,7 @@ "h": 8, "w": 12, "x": 0, - "y": 503 + "y": 237 }, "id": 158, "options": { @@ -4849,7 +5043,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null } ] }, @@ -4861,7 +5056,7 @@ "h": 8, "w": 12, "x": 12, - "y": 503 + "y": 237 }, "id": 157, "options": { @@ -4951,7 +5146,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null } ] }, @@ -4963,7 +5159,7 @@ "h": 8, "w": 12, "x": 0, - "y": 511 + "y": 245 }, "id": 149, "options": { @@ -5049,7 +5245,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null } ] }, @@ -5086,7 +5283,7 @@ "h": 8, "w": 12, "x": 12, - "y": 511 + "y": 245 }, "id": 147, "options": { @@ -5148,7 +5345,7 @@ "h": 7, "w": 12, "x": 0, - "y": 519 + "y": 253 }, "id": 77, "maxDataPoints": 60, @@ -5247,7 +5444,7 @@ "h": 7, "w": 12, "x": 12, - "y": 519 + "y": 253 }, "id": 75, "maxDataPoints": 60, @@ -5393,7 +5590,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -5409,7 +5607,7 @@ "h": 13, "w": 12, "x": 0, - "y": 526 + "y": 260 }, "id": 142, "options": { @@ -5494,7 +5692,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -5510,7 +5709,7 @@ "h": 13, "w": 12, "x": 12, - "y": 526 + "y": 260 }, "id": 130, "options": { @@ -5558,11 +5757,13 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -5571,6 +5772,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -5592,7 +5794,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -5608,7 +5811,7 @@ "h": 13, "w": 12, "x": 0, - "y": 539 + "y": 273 }, "id": 132, "options": { @@ -5624,10 +5827,12 @@ "sortDesc": false }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, + "pluginVersion": "11.5.2", "targets": [ { "datasource": { @@ -5656,11 +5861,13 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -5669,6 +5876,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -5690,7 +5898,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -5706,7 +5915,7 @@ "h": 13, "w": 12, "x": 12, - "y": 539 + "y": 273 }, "id": 143, "options": { @@ -5722,10 +5931,12 @@ "sortDesc": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, + "pluginVersion": "11.5.2", "targets": [ { "datasource": { @@ -5754,11 +5965,13 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -5767,6 +5980,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -5788,7 +6002,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -5804,7 +6019,7 @@ "h": 13, "w": 12, "x": 0, - "y": 552 + "y": 286 }, "id": 128, "options": { @@ -5820,10 +6035,12 @@ "sortDesc": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, + "pluginVersion": "11.5.2", "targets": [ { "datasource": { @@ -5852,11 +6069,13 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -5865,6 +6084,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -5886,7 +6106,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -5902,7 +6123,7 @@ "h": 13, "w": 12, "x": 12, - "y": 552 + "y": 286 }, "id": 141, "options": { @@ -5918,10 +6139,12 @@ "sortDesc": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, + "pluginVersion": "11.5.2", "targets": [ { "datasource": { @@ -6006,14 +6229,10 @@ { "color": "green", "value": null - }, - { - "color": "red", - "value": 80 } ] }, - "unit": "binBps" + "unit": "deckbytes" }, "overrides": [] }, @@ -6046,7 +6265,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "sum by(direction, protocol) (rate(total_bytes_exchanged_sum[$__rate_interval]))", + "expr": "sum by(direction, protocol) (total_bytes_exchanged_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"})", "fullMetaSearch": false, "includeNullMetadata": false, "legendFormat": "__auto", @@ -6055,7 +6274,7 @@ "useBackend": false } ], - "title": "Bytes Exchanged Rate", + "title": "Bytes Exchanged", "type": "timeseries" }, { @@ -6063,7 +6282,7 @@ "type": "prometheus", "uid": "P6693426190CB2316" }, - "description": "the number of messages sent and received by the transfer protocol per second.", + "description": "the number of messages sent and received by the transfer protocol.", "fieldConfig": { "defaults": { "color": { @@ -6109,10 +6328,6 @@ { "color": "green", "value": null - }, - { - "color": "red", - "value": 80 } ] } @@ -6148,7 +6363,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "sum by(direction) (rate(total_transfer_messages_exchanged_total[$__rate_interval]))", + "expr": "sum by(direction) (total_transfer_messages_exchanged_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"})", "fullMetaSearch": false, "includeNullMetadata": true, "legendFormat": "__auto", @@ -6157,7 +6372,7 @@ "useBackend": false } ], - "title": "Messages Exchanged Rate", + "title": "Messages Exchanged", "type": "timeseries" }, { @@ -6225,7 +6440,7 @@ "disableTextWrap": false, "editorMode": "builder", "exemplar": false, - "expr": "sum by(le) (reconciliation_roundtrips_bucket)", + "expr": "sum by(le) (reconciliation_roundtrips_bucket{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"})", "format": "heatmap", "fullMetaSearch": false, "hide": false, @@ -6240,6 +6455,78 @@ "title": "Distribution of Round-Trips per Reconciliation", "type": "bargauge" }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 13 + }, + "id": 171, + "options": { + "displayMode": "lcd", + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "maxVizHeight": 300, + "minVizHeight": 16, + "minVizWidth": 8, + "namePlacement": "left", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "sizing": "auto", + "valueMode": "hidden" + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "disableTextWrap": false, + "editorMode": "builder", + "expr": "sum by(le) (reconciliation_differences_bucket{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"})", + "format": "heatmap", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "__auto", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Distribution of differences per reconciliation.", + "type": "bargauge" + }, { "collapsed": true, "gridPos": { @@ -6298,7 +6585,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -6313,7 +6601,7 @@ "h": 8, "w": 8, "x": 0, - "y": 156 + "y": 51 }, "id": 93, "options": { @@ -6397,7 +6685,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -6412,7 +6701,7 @@ "h": 8, "w": 8, "x": 8, - "y": 156 + "y": 51 }, "id": 89, "options": { @@ -6493,7 +6782,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -6508,7 +6798,7 @@ "h": 8, "w": 8, "x": 16, - "y": 156 + "y": 51 }, "id": 91, "options": { @@ -6565,7 +6855,7 @@ "h": 8, "w": 12, "x": 0, - "y": 164 + "y": 59 }, "id": 95, "options": { @@ -6647,7 +6937,7 @@ "h": 8, "w": 12, "x": 12, - "y": 164 + "y": 59 }, "id": 97, "options": { @@ -6753,7 +7043,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -6763,39 +7054,13 @@ }, "unit": "reqps" }, - "overrides": [ - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - "boot-01.ac-cn-hongkong-c.status.prod - rejected", - "boot-01.ac-cn-hongkong-c.status.prod - served" - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] + "overrides": [] }, "gridPos": { "h": 13, "w": 12, "x": 0, - "y": 172 + "y": 67 }, "id": 134, "options": { @@ -6882,7 +7147,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -6898,7 +7164,7 @@ "h": 13, "w": 12, "x": 12, - "y": 172 + "y": 67 }, "id": 136, "options": { @@ -6996,7 +7262,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -7011,7 +7278,7 @@ "h": 8, "w": 12, "x": 0, - "y": 82 + "y": 126 }, "id": 30, "options": { @@ -7092,7 +7359,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -7107,7 +7375,7 @@ "h": 8, "w": 12, "x": 12, - "y": 82 + "y": 126 }, "id": 32, "options": { @@ -7189,7 +7457,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -7205,7 +7474,7 @@ "h": 12, "w": 12, "x": 0, - "y": 129 + "y": 134 }, "id": 138, "options": { @@ -7292,7 +7561,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -7308,7 +7578,7 @@ "h": 12, "w": 12, "x": 12, - "y": 129 + "y": 134 }, "id": 140, "options": { @@ -7371,11 +7641,13 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -7384,6 +7656,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -7405,7 +7678,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -7421,7 +7695,7 @@ "h": 12, "w": 12, "x": 0, - "y": 760 + "y": 147 }, "id": 153, "options": { @@ -7437,10 +7711,12 @@ "sortDesc": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, + "pluginVersion": "11.5.2", "targets": [ { "datasource": { @@ -7469,11 +7745,13 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -7482,6 +7760,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -7503,7 +7782,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -7519,7 +7799,7 @@ "h": 12, "w": 12, "x": 12, - "y": 760 + "y": 147 }, "id": 154, "options": { @@ -7535,10 +7815,12 @@ "sortDesc": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, + "pluginVersion": "11.5.2", "targets": [ { "datasource": { @@ -7571,7 +7853,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -7587,13 +7870,21 @@ "h": 12, "w": 12, "x": 0, - "y": 772 + "y": 159 }, "id": 156, "options": { "displayMode": "basic", + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "maxVizHeight": 300, "minVizHeight": 10, "minVizWidth": 0, + "namePlacement": "auto", "orientation": "horizontal", "reduceOptions": { "calcs": [ @@ -7602,9 +7893,11 @@ "fields": "", "values": false }, - "showUnfilled": true + "showUnfilled": true, + "sizing": "auto", + "valueMode": "color" }, - "pluginVersion": "9.2.5", + "pluginVersion": "11.5.2", "targets": [ { "datasource": { @@ -7633,11 +7926,13 @@ "mode": "palette-classic" }, "custom": { + "axisBorderShow": false, "axisCenteredZero": false, "axisColorMode": "text", "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -7646,6 +7941,7 @@ "tooltip": false, "viz": false }, + "insertNulls": false, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, @@ -7667,7 +7963,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -7683,7 +7980,7 @@ "h": 12, "w": 12, "x": 12, - "y": 772 + "y": 159 }, "id": 155, "options": { @@ -7699,10 +7996,12 @@ "sortDesc": true }, "tooltip": { + "hideZeros": false, "mode": "single", "sort": "none" } }, + "pluginVersion": "11.5.2", "targets": [ { "datasource": { @@ -7782,7 +8081,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -7797,7 +8097,7 @@ "h": 7, "w": 8, "x": 0, - "y": 84 + "y": 172 }, "id": 13, "options": { @@ -7880,7 +8180,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -7895,7 +8196,7 @@ "h": 7, "w": 8, "x": 8, - "y": 84 + "y": 172 }, "id": 18, "options": { @@ -8056,7 +8357,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -8071,7 +8373,7 @@ "h": 7, "w": 8, "x": 16, - "y": 84 + "y": 172 }, "id": 42, "options": { @@ -8152,7 +8454,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null } ] }, @@ -8164,7 +8467,7 @@ "h": 8, "w": 12, "x": 0, - "y": 91 + "y": 179 }, "id": 103, "options": { @@ -8248,7 +8551,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null } ] }, @@ -8260,7 +8564,7 @@ "h": 8, "w": 12, "x": 12, - "y": 91 + "y": 179 }, "id": 102, "options": { @@ -8311,7 +8615,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null } ] }, @@ -8323,7 +8628,7 @@ "h": 8, "w": 24, "x": 0, - "y": 99 + "y": 187 }, "id": 101, "options": { @@ -8385,7 +8690,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null } ] }, @@ -8397,7 +8703,7 @@ "h": 8, "w": 12, "x": 0, - "y": 107 + "y": 195 }, "id": 105, "options": { @@ -8456,7 +8762,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null } ] }, @@ -8468,7 +8775,7 @@ "h": 8, "w": 12, "x": 12, - "y": 107 + "y": 195 }, "id": 104, "options": { @@ -8574,7 +8881,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -8590,7 +8898,7 @@ "h": 8, "w": 12, "x": 0, - "y": 116 + "y": 204 }, "id": 109, "options": { @@ -8742,6 +9050,6 @@ "timezone": "browser", "title": "Nim-Waku V2", "uid": "qrp_ZCTGz", - "version": 169, + "version": 178, "weekStart": "" } \ No newline at end of file diff --git a/waku/waku_store_sync/protocols_metrics.nim b/waku/waku_store_sync/protocols_metrics.nim index bb22f11c7..4195500e9 100644 --- a/waku/waku_store_sync/protocols_metrics.nim +++ b/waku/waku_store_sync/protocols_metrics.nim @@ -8,13 +8,13 @@ const declarePublicHistogram reconciliation_roundtrips, "the nubmer of roundtrips for each reconciliation", - buckets = [0.0, 1.0, 2.0, 3.0, 5.0, 10.0, Inf] + buckets = [1.0, 2.0, 3.0, 5.0, 8.0, 13.0, Inf] declarePublicHistogram reconciliation_differences, "the nubmer of differences for each reconciliation", - buckets = [0.0, 10.0, 50.0, 100.0, 500.0, 1000.0, 10000.0, Inf] + buckets = [0.0, 10.0, 50.0, 100.0, 500.0, 1000.0, 5000.0, Inf] -declarePublicSummary total_bytes_exchanged, +declarePublicCounter total_bytes_exchanged, "the number of bytes sent and received by the protocols", ["protocol", "direction"] declarePublicCounter total_transfer_messages_exchanged, diff --git a/waku/waku_store_sync/reconciliation.nim b/waku/waku_store_sync/reconciliation.nim index f7c13d42c..19dff363b 100644 --- a/waku/waku_store_sync/reconciliation.nim +++ b/waku/waku_store_sync/reconciliation.nim @@ -108,7 +108,7 @@ proc processRequest( await conn.close() return err("remote " & $conn.peerId & " connection read error: " & error.msg) - total_bytes_exchanged.observe(buffer.len, labelValues = [Reconciliation, Receiving]) + total_bytes_exchanged.inc(buffer.len, labelValues = [Reconciliation, Receiving]) let recvPayload = RangesData.deltaDecode(buffer).valueOr: await conn.close() @@ -148,9 +148,7 @@ proc processRequest( rawPayload = sendPayload.deltaEncode() - total_bytes_exchanged.observe( - rawPayload.len, labelValues = [Reconciliation, Sending] - ) + total_bytes_exchanged.inc(rawPayload.len, labelValues = [Reconciliation, Sending]) let writeRes = catch: await conn.writeLP(rawPayload) @@ -197,9 +195,7 @@ proc initiate( let sendPayload = initPayload.deltaEncode() - total_bytes_exchanged.observe( - sendPayload.len, labelValues = [Reconciliation, Sending] - ) + total_bytes_exchanged.inc(sendPayload.len, labelValues = [Reconciliation, Sending]) let writeRes = catch: await connection.writeLP(sendPayload) diff --git a/waku/waku_store_sync/transfer.nim b/waku/waku_store_sync/transfer.nim index 81bed5ece..5a52cac9c 100644 --- a/waku/waku_store_sync/transfer.nim +++ b/waku/waku_store_sync/transfer.nim @@ -51,7 +51,7 @@ proc sendMessage( ): Future[Result[void, string]] {.async.} = let rawPayload = payload.encode().buffer - total_bytes_exchanged.observe(rawPayload.len, labelValues = [Transfer, Sending]) + total_bytes_exchanged.inc(rawPayload.len, labelValues = [Transfer, Sending]) let writeRes = catch: await conn.writeLP(rawPayload) @@ -144,7 +144,7 @@ proc initProtocolHandler(self: SyncTransfer) = # connection closed normally break - total_bytes_exchanged.observe(buffer.len, labelValues = [Transfer, Receiving]) + total_bytes_exchanged.inc(buffer.len, labelValues = [Transfer, Receiving]) let payload = WakuMessageAndTopic.decode(buffer).valueOr: error "decoding error", error = $error From 95b665fa452c85555cc791d1a37546a474c83d59 Mon Sep 17 00:00:00 2001 From: Sasha <118575614+weboko@users.noreply.github.com> Date: Tue, 22 Apr 2025 19:04:52 +0200 Subject: [PATCH 28/48] chore: add js-waku link to readme for interop tests (#3383) --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 9b6dba4a4..057d0b622 100644 --- a/README.md +++ b/README.md @@ -133,6 +133,9 @@ Binary will be created as `.bin` under the `build` d make test/tests/common/test_enr_builder.nim ``` +### Testing against `js-waku` +Refer to [js-waku repo](https://github.com/waku-org/js-waku/tree/master/packages/tests) for instructions. + ## Formatting Nim files are expected to be formatted using the [`nph`](https://github.com/arnetheduck/nph) version present in `vendor/nph`. From 0304f063b83afddc1ddfba399c18ac31688fa776 Mon Sep 17 00:00:00 2001 From: Simon-Pierre Vivier Date: Wed, 23 Apr 2025 08:26:34 -0400 Subject: [PATCH 29/48] waku sync cached message metric (#3387) --- metrics/waku-fleet-dashboard.json | 379 ++++++++++++--------- waku/waku_store_sync/protocols_metrics.nim | 3 + waku/waku_store_sync/reconciliation.nim | 2 + 3 files changed, 221 insertions(+), 163 deletions(-) diff --git a/metrics/waku-fleet-dashboard.json b/metrics/waku-fleet-dashboard.json index 230fcc8d0..1d8be0b1b 100644 --- a/metrics/waku-fleet-dashboard.json +++ b/metrics/waku-fleet-dashboard.json @@ -450,7 +450,7 @@ "h": 9, "w": 12, "x": 0, - "y": 1312 + "y": 1074 }, "id": 81, "options": { @@ -547,7 +547,7 @@ "h": 9, "w": 12, "x": 12, - "y": 1312 + "y": 1074 }, "id": 82, "options": { @@ -647,7 +647,7 @@ "h": 9, "w": 12, "x": 0, - "y": 1321 + "y": 1083 }, "id": 78, "interval": "15s", @@ -751,7 +751,7 @@ "h": 9, "w": 12, "x": 12, - "y": 1321 + "y": 1083 }, "id": 79, "options": { @@ -852,7 +852,7 @@ "h": 10, "w": 12, "x": 0, - "y": 1330 + "y": 1092 }, "id": 124, "options": { @@ -957,7 +957,7 @@ "h": 10, "w": 12, "x": 12, - "y": 1330 + "y": 1092 }, "id": 126, "options": { @@ -1054,7 +1054,7 @@ "h": 10, "w": 12, "x": 0, - "y": 1340 + "y": 1102 }, "id": 169, "options": { @@ -1153,7 +1153,7 @@ "h": 10, "w": 12, "x": 12, - "y": 1340 + "y": 1102 }, "id": 170, "options": { @@ -1257,7 +1257,7 @@ "h": 9, "w": 12, "x": 0, - "y": 1350 + "y": 1112 }, "id": 11, "options": { @@ -1356,7 +1356,7 @@ "h": 9, "w": 12, "x": 12, - "y": 1350 + "y": 1112 }, "id": 54, "options": { @@ -1455,7 +1455,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1359 + "y": 1121 }, "id": 66, "options": { @@ -1552,7 +1552,7 @@ "h": 9, "w": 12, "x": 12, - "y": 1359 + "y": 1121 }, "id": 122, "options": { @@ -1682,7 +1682,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1368 + "y": 1130 }, "id": 68, "options": { @@ -1990,7 +1990,7 @@ "h": 6, "w": 12, "x": 0, - "y": 343 + "y": 670 }, "id": 60, "options": { @@ -2112,7 +2112,7 @@ "h": 6, "w": 12, "x": 12, - "y": 343 + "y": 670 }, "id": 8, "options": { @@ -2212,7 +2212,7 @@ "h": 6, "w": 12, "x": 0, - "y": 349 + "y": 676 }, "id": 2, "options": { @@ -2316,7 +2316,7 @@ "h": 6, "w": 12, "x": 12, - "y": 349 + "y": 676 }, "id": 83, "options": { @@ -2415,7 +2415,7 @@ "h": 6, "w": 12, "x": 0, - "y": 355 + "y": 682 }, "id": 3, "options": { @@ -2515,7 +2515,7 @@ "h": 6, "w": 12, "x": 12, - "y": 355 + "y": 682 }, "id": 9, "options": { @@ -2625,8 +2625,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2641,7 +2640,7 @@ "h": 6, "w": 12, "x": 0, - "y": 361 + "y": 688 }, "id": 6, "options": { @@ -2723,8 +2722,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2739,7 +2737,7 @@ "h": 6, "w": 12, "x": 12, - "y": 361 + "y": 688 }, "id": 7, "options": { @@ -2848,8 +2846,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2865,7 +2862,7 @@ "h": 8, "w": 12, "x": 0, - "y": 367 + "y": 694 }, "id": 44, "options": { @@ -2971,8 +2968,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2988,7 +2984,7 @@ "h": 6, "w": 12, "x": 12, - "y": 367 + "y": 694 }, "id": 10, "options": { @@ -3071,8 +3067,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3088,7 +3083,7 @@ "h": 8, "w": 12, "x": 12, - "y": 373 + "y": 700 }, "id": 64, "options": { @@ -3175,8 +3170,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3191,7 +3185,7 @@ "h": 6, "w": 12, "x": 0, - "y": 375 + "y": 702 }, "id": 4, "options": { @@ -3273,8 +3267,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -3289,7 +3282,7 @@ "h": 6, "w": 12, "x": 12, - "y": 381 + "y": 708 }, "id": 5, "options": { @@ -3694,7 +3687,7 @@ "h": 8, "w": 12, "x": 0, - "y": 306 + "y": 27 }, "id": 119, "options": { @@ -3791,7 +3784,7 @@ "h": 8, "w": 12, "x": 12, - "y": 306 + "y": 27 }, "id": 121, "options": { @@ -3888,7 +3881,7 @@ "h": 8, "w": 8, "x": 0, - "y": 314 + "y": 35 }, "id": 113, "options": { @@ -3986,7 +3979,7 @@ "h": 8, "w": 8, "x": 8, - "y": 314 + "y": 35 }, "id": 115, "options": { @@ -4109,7 +4102,7 @@ "h": 6, "w": 12, "x": 0, - "y": 4 + "y": 355 }, "id": 36, "options": { @@ -4206,7 +4199,7 @@ "h": 6, "w": 12, "x": 12, - "y": 4 + "y": 355 }, "id": 38, "options": { @@ -4318,7 +4311,7 @@ "h": 6, "w": 12, "x": 0, - "y": 212 + "y": 361 }, "id": 62, "options": { @@ -4417,7 +4410,7 @@ "h": 6, "w": 12, "x": 12, - "y": 212 + "y": 361 }, "id": 40, "options": { @@ -4531,7 +4524,7 @@ "h": 11, "w": 12, "x": 0, - "y": 218 + "y": 367 }, "id": 144, "options": { @@ -4636,7 +4629,7 @@ "h": 11, "w": 12, "x": 12, - "y": 218 + "y": 367 }, "id": 145, "options": { @@ -4729,8 +4722,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -4742,7 +4734,7 @@ "h": 8, "w": 12, "x": 0, - "y": 229 + "y": 378 }, "id": 146, "options": { @@ -4834,8 +4826,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -4847,7 +4838,7 @@ "h": 8, "w": 12, "x": 12, - "y": 229 + "y": 378 }, "id": 148, "options": { @@ -4939,8 +4930,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -4952,7 +4942,7 @@ "h": 8, "w": 12, "x": 0, - "y": 237 + "y": 386 }, "id": 158, "options": { @@ -5043,8 +5033,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -5056,7 +5045,7 @@ "h": 8, "w": 12, "x": 12, - "y": 237 + "y": 386 }, "id": 157, "options": { @@ -5146,8 +5135,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -5159,7 +5147,7 @@ "h": 8, "w": 12, "x": 0, - "y": 245 + "y": 394 }, "id": 149, "options": { @@ -5245,8 +5233,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -5283,7 +5270,7 @@ "h": 8, "w": 12, "x": 12, - "y": 245 + "y": 394 }, "id": 147, "options": { @@ -5345,7 +5332,7 @@ "h": 7, "w": 12, "x": 0, - "y": 253 + "y": 402 }, "id": 77, "maxDataPoints": 60, @@ -5444,7 +5431,7 @@ "h": 7, "w": 12, "x": 12, - "y": 253 + "y": 402 }, "id": 75, "maxDataPoints": 60, @@ -5590,8 +5577,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -5607,7 +5593,7 @@ "h": 13, "w": 12, "x": 0, - "y": 260 + "y": 409 }, "id": 142, "options": { @@ -5692,8 +5678,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -5709,7 +5694,7 @@ "h": 13, "w": 12, "x": 12, - "y": 260 + "y": 409 }, "id": 130, "options": { @@ -5794,8 +5779,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -5811,7 +5795,7 @@ "h": 13, "w": 12, "x": 0, - "y": 273 + "y": 422 }, "id": 132, "options": { @@ -5898,8 +5882,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -5915,7 +5898,7 @@ "h": 13, "w": 12, "x": 12, - "y": 273 + "y": 422 }, "id": 143, "options": { @@ -6002,8 +5985,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -6019,7 +6001,7 @@ "h": 13, "w": 12, "x": 0, - "y": 286 + "y": 435 }, "id": 128, "options": { @@ -6106,8 +6088,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -6123,7 +6104,7 @@ "h": 13, "w": 12, "x": 12, - "y": 286 + "y": 435 }, "id": 141, "options": { @@ -6362,7 +6343,7 @@ "uid": "P6693426190CB2316" }, "disableTextWrap": false, - "editorMode": "builder", + "editorMode": "code", "expr": "sum by(direction) (total_transfer_messages_exchanged_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"})", "fullMetaSearch": false, "includeNullMetadata": true, @@ -6527,13 +6508,107 @@ "title": "Distribution of differences per reconciliation.", "type": "bargauge" }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "The total number of messages cached by nodes.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 21 + }, + "id": 172, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "disableTextWrap": false, + "editorMode": "code", + "expr": "total_messages_cached_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "__auto", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Messages Cached", + "type": "timeseries" + }, { "collapsed": true, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 21 + "y": 29 }, "id": 87, "panels": [ @@ -6585,8 +6660,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -6601,7 +6675,7 @@ "h": 8, "w": 8, "x": 0, - "y": 51 + "y": 2595 }, "id": 93, "options": { @@ -6685,8 +6759,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -6701,7 +6774,7 @@ "h": 8, "w": 8, "x": 8, - "y": 51 + "y": 2595 }, "id": 89, "options": { @@ -6782,8 +6855,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -6798,7 +6870,7 @@ "h": 8, "w": 8, "x": 16, - "y": 51 + "y": 2595 }, "id": 91, "options": { @@ -6855,7 +6927,7 @@ "h": 8, "w": 12, "x": 0, - "y": 59 + "y": 2603 }, "id": 95, "options": { @@ -6937,7 +7009,7 @@ "h": 8, "w": 12, "x": 12, - "y": 59 + "y": 2603 }, "id": 97, "options": { @@ -7043,8 +7115,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -7060,7 +7131,7 @@ "h": 13, "w": 12, "x": 0, - "y": 67 + "y": 2611 }, "id": 134, "options": { @@ -7147,8 +7218,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -7164,7 +7234,7 @@ "h": 13, "w": 12, "x": 12, - "y": 67 + "y": 2611 }, "id": 136, "options": { @@ -7210,7 +7280,7 @@ "h": 1, "w": 24, "x": 0, - "y": 22 + "y": 30 }, "id": 28, "panels": [ @@ -7262,8 +7332,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -7278,7 +7347,7 @@ "h": 8, "w": 12, "x": 0, - "y": 126 + "y": 2670 }, "id": 30, "options": { @@ -7359,8 +7428,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -7375,7 +7443,7 @@ "h": 8, "w": 12, "x": 12, - "y": 126 + "y": 2670 }, "id": 32, "options": { @@ -7457,8 +7525,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -7474,7 +7541,7 @@ "h": 12, "w": 12, "x": 0, - "y": 134 + "y": 2678 }, "id": 138, "options": { @@ -7561,8 +7628,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -7578,7 +7644,7 @@ "h": 12, "w": 12, "x": 12, - "y": 134 + "y": 2678 }, "id": 140, "options": { @@ -7625,7 +7691,7 @@ "h": 1, "w": 24, "x": 0, - "y": 23 + "y": 31 }, "id": 151, "panels": [ @@ -7678,8 +7744,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -7695,7 +7760,7 @@ "h": 12, "w": 12, "x": 0, - "y": 147 + "y": 2691 }, "id": 153, "options": { @@ -7782,8 +7847,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -7799,7 +7863,7 @@ "h": 12, "w": 12, "x": 12, - "y": 147 + "y": 2691 }, "id": 154, "options": { @@ -7853,8 +7917,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -7870,7 +7933,7 @@ "h": 12, "w": 12, "x": 0, - "y": 159 + "y": 2703 }, "id": 156, "options": { @@ -7963,8 +8026,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -7980,7 +8042,7 @@ "h": 12, "w": 12, "x": 12, - "y": 159 + "y": 2703 }, "id": 155, "options": { @@ -8028,7 +8090,7 @@ "h": 1, "w": 24, "x": 0, - "y": 24 + "y": 32 }, "id": 15, "panels": [ @@ -8081,8 +8143,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -8097,7 +8158,7 @@ "h": 7, "w": 8, "x": 0, - "y": 172 + "y": 2716 }, "id": 13, "options": { @@ -8180,8 +8241,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -8196,7 +8256,7 @@ "h": 7, "w": 8, "x": 8, - "y": 172 + "y": 2716 }, "id": 18, "options": { @@ -8357,8 +8417,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -8373,7 +8432,7 @@ "h": 7, "w": 8, "x": 16, - "y": 172 + "y": 2716 }, "id": 42, "options": { @@ -8454,8 +8513,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -8467,7 +8525,7 @@ "h": 8, "w": 12, "x": 0, - "y": 179 + "y": 2723 }, "id": 103, "options": { @@ -8551,8 +8609,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -8564,7 +8621,7 @@ "h": 8, "w": 12, "x": 12, - "y": 179 + "y": 2723 }, "id": 102, "options": { @@ -8615,8 +8672,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -8628,7 +8684,7 @@ "h": 8, "w": 24, "x": 0, - "y": 187 + "y": 2731 }, "id": 101, "options": { @@ -8690,8 +8746,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -8703,7 +8758,7 @@ "h": 8, "w": 12, "x": 0, - "y": 195 + "y": 2739 }, "id": 105, "options": { @@ -8762,8 +8817,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -8775,7 +8829,7 @@ "h": 8, "w": 12, "x": 12, - "y": 195 + "y": 2739 }, "id": 104, "options": { @@ -8829,7 +8883,7 @@ "h": 1, "w": 24, "x": 0, - "y": 25 + "y": 33 }, "id": 107, "panels": [ @@ -8881,8 +8935,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -8898,7 +8951,7 @@ "h": 8, "w": 12, "x": 0, - "y": 204 + "y": 2748 }, "id": 109, "options": { @@ -9050,6 +9103,6 @@ "timezone": "browser", "title": "Nim-Waku V2", "uid": "qrp_ZCTGz", - "version": 178, + "version": 180, "weekStart": "" } \ No newline at end of file diff --git a/waku/waku_store_sync/protocols_metrics.nim b/waku/waku_store_sync/protocols_metrics.nim index 4195500e9..53595f931 100644 --- a/waku/waku_store_sync/protocols_metrics.nim +++ b/waku/waku_store_sync/protocols_metrics.nim @@ -19,3 +19,6 @@ declarePublicCounter total_bytes_exchanged, declarePublicCounter total_transfer_messages_exchanged, "the number of messages sent and received by the transfer protocol", ["direction"] + +declarePublicGauge total_messages_cached, + "the number of messages cached by the node after prunning" diff --git a/waku/waku_store_sync/reconciliation.nim b/waku/waku_store_sync/reconciliation.nim index 19dff363b..c08a9e434 100644 --- a/waku/waku_store_sync/reconciliation.nim +++ b/waku/waku_store_sync/reconciliation.nim @@ -362,6 +362,8 @@ proc periodicPrune(self: SyncReconciliation) {.async.} = let count = self.storage.prune(time) + total_messages_cached.set(self.storage.length()) + debug "periodic prune done", elements_pruned = count proc idsReceiverLoop(self: SyncReconciliation) {.async.} = From ab8a30d3d61eb6bc83f69b759b0d5fc0b0fe5da2 Mon Sep 17 00:00:00 2001 From: NagyZoltanPeter <113987313+NagyZoltanPeter@users.noreply.github.com> Date: Thu, 24 Apr 2025 08:36:02 +0200 Subject: [PATCH 30/48] chore: extended /admin/v1 RESP API with different option to look at current connected/relay/mesh state of the node (#3382) * Extended /admin/v1 RESP API with different option to look at current connected/relay/mesh state of the node * Added score information for peer info retrievals --- tests/wakunode_rest/test_rest_admin.nim | 104 ++++++- waku/node/peer_manager/peer_manager.nim | 2 +- waku/waku_api/rest/admin/client.nim | 38 +++ waku/waku_api/rest/admin/handlers.nim | 351 +++++++++++++++++------- waku/waku_api/rest/admin/types.nim | 184 +++++++++---- waku/waku_api/rest/serdes.nim | 14 +- waku/waku_core/peers.nim | 31 ++- waku/waku_enr/sharding.nim | 2 +- waku/waku_relay/protocol.nim | 49 +++- 9 files changed, 589 insertions(+), 186 deletions(-) diff --git a/tests/wakunode_rest/test_rest_admin.nim b/tests/wakunode_rest/test_rest_admin.nim index 99ddacd8c..bdab61a75 100644 --- a/tests/wakunode_rest/test_rest_admin.nim +++ b/tests/wakunode_rest/test_rest_admin.nim @@ -1,11 +1,11 @@ {.used.} import - std/[sequtils, net], - stew/shims/net, + std/[sequtils, strformat, net], testutils/unittests, presto, presto/client as presto_client, + presto /../ tests/helpers, libp2p/crypto/crypto import @@ -43,10 +43,11 @@ suite "Waku v2 Rest API - Admin": node3 = newTestWakuNode(generateSecp256k1Key(), getPrimaryIPAddr(), Port(60604)) await allFutures(node1.start(), node2.start(), node3.start()) + let shards = @[RelayShard(clusterId: 1, shardId: 0)] await allFutures( - node1.mountRelay(), - node2.mountRelay(), - node3.mountRelay(), + node1.mountRelay(shards = shards), + node2.mountRelay(shards = shards), + node3.mountRelay(shards = shards), node3.mountPeerExchange(), ) @@ -203,3 +204,96 @@ suite "Waku v2 Rest API - Admin": getRes.data.anyIt(it.origin == Discv5) # Check peer 3 getRes.data.anyIt(it.origin == PeerExchange) + + asyncTest "get peers by id": + # Connect to nodes 2 and 3 using the Admin API + let postRes = await client.postPeers( + @[constructMultiaddrStr(peerInfo2), constructMultiaddrStr(peerInfo3)] + ) + + check: + postRes.status == 200 + + let getRes = await client.getPeerById($peerInfo2.peerId) + + check: + getRes.status == 200 + $getRes.contentType == $MIMETYPE_JSON + getRes.data.protocols.find(WakuRelayCodec) >= 0 + getRes.data.multiaddr == constructMultiaddrStr(peerInfo2) + + ## nim-presto library's RestClient does not support text error case decode if + ## the RestResponse expects a JSON with complex type + # let getRes2 = await client.getPeerById("bad peer id") + let getRes2 = await httpClient( + restServer.httpServer.address, MethodGet, "/admin/v1/peer/bad+peer+id", "" + ) + check: + getRes2.status == 400 + getRes2.data == "Invalid argument:peerid: incorrect PeerId string" + + asyncTest "get connected peers": + # Connect to nodes 2 and 3 using the Admin API + let postRes = await client.postPeers( + @[constructMultiaddrStr(peerInfo2), constructMultiaddrStr(peerInfo3)] + ) + + check: + postRes.status == 200 + + let getRes = await client.getConnectedPeers() + + check: + getRes.status == 200 + $getRes.contentType == $MIMETYPE_JSON + getRes.data.len() == 2 + # Check peer 2 + getRes.data.anyIt(it.multiaddr == constructMultiaddrStr(peerInfo2)) + # Check peer 3 + getRes.data.anyIt(it.multiaddr == constructMultiaddrStr(peerInfo3)) + + # Seems shard info is not available in the peer manager + # let getRes2 = await client.getConnectedPeersByShard(0) + # check: + # getRes2.status == 200 + # $getRes2.contentType == $MIMETYPE_JSON + # getRes2.data.len() == 2 + + let getRes3 = await client.getConnectedPeersByShard(99) + check: + getRes3.status == 200 + $getRes3.contentType == $MIMETYPE_JSON + getRes3.data.len() == 0 + + asyncTest "get relay peers": + # Connect to nodes 2 and 3 using the Admin API + let postRes = await client.postPeers( + @[constructMultiaddrStr(peerInfo2), constructMultiaddrStr(peerInfo3)] + ) + + check: + postRes.status == 200 + + let getRes = await client.getConnectedRelayPeers() + + check: + getRes.status == 200 + $getRes.contentType == $MIMETYPE_JSON + require getRes.data.len() == 1 # Check peer 2 + check getRes.data[0].peers.anyIt(it.multiaddr == constructMultiaddrStr(peerInfo2)) + # Check peer 2 + check getRes.data[0].peers.anyIt(it.multiaddr == constructMultiaddrStr(peerInfo3)) + # Check peer 3 + + # Todo: investigate why the test setup missing remote peer's shard info + # let getRes2 = await client.getConnectedRelayPeersByShard(0) + # check: + # getRes2.status == 200 + # $getRes2.contentType == $MIMETYPE_JSON + # getRes2.data.peers.len() == 2 + + let getRes3 = await client.getConnectedRelayPeersByShard(99) + check: + getRes3.status == 200 + $getRes3.contentType == $MIMETYPE_JSON + getRes3.data.peers.len() == 0 diff --git a/waku/node/peer_manager/peer_manager.nim b/waku/node/peer_manager/peer_manager.nim index 39baeea3e..602718d5d 100644 --- a/waku/node/peer_manager/peer_manager.nim +++ b/waku/node/peer_manager/peer_manager.nim @@ -154,7 +154,7 @@ proc addPeer*( pm.storage.insertOrReplace(remotePeerInfo) -proc getPeer(pm: PeerManager, peerId: PeerId): RemotePeerInfo = +proc getPeer*(pm: PeerManager, peerId: PeerId): RemotePeerInfo = return pm.switch.peerStore.getPeer(peerId) proc loadFromStorage(pm: PeerManager) {.gcsafe.} = diff --git a/waku/waku_api/rest/admin/client.nim b/waku/waku_api/rest/admin/client.nim index ebcebe965..4b46ca136 100644 --- a/waku/waku_api/rest/admin/client.nim +++ b/waku/waku_api/rest/admin/client.nim @@ -22,6 +22,44 @@ proc postPeers*( rest, endpoint: "/admin/v1/peers", meth: HttpMethod.MethodPost .} +proc getPeerById*( + peerId: string +): RestResponse[WakuPeer] {. + rest, endpoint: "/admin/v1/peer/{peerId}", meth: HttpMethod.MethodGet +.} + +proc getConnectedPeers*(): RestResponse[seq[WakuPeer]] {. + rest, endpoint: "/admin/v1/peers/connected", meth: HttpMethod.MethodGet +.} + +proc getConnectedPeersByShard*( + shardId: uint16 +): RestResponse[seq[WakuPeer]] {. + rest, endpoint: "/admin/v1/peers/connected/on/{shardId}", meth: HttpMethod.MethodGet +.} + +proc getConnectedRelayPeers*(): RestResponse[PeersOfShards] {. + rest, endpoint: "/admin/v1/peers/connected/relay", meth: HttpMethod.MethodGet +.} + +proc getConnectedRelayPeersByShard*( + shardId: uint16 +): RestResponse[PeersOfShard] {. + rest, + endpoint: "/admin/v1/peers/connected/relay/on/{shardId}", + meth: HttpMethod.MethodGet +.} + +proc getMeshPeers*(): RestResponse[PeersOfShards] {. + rest, endpoint: "/admin/v1/peers/mesh", meth: HttpMethod.MethodGet +.} + +proc getMeshPeersByShard*( + shardId: uint16 +): RestResponse[PeersOfShard] {. + rest, endpoint: "/admin/v1/peers/mesh/on/{shardId}", meth: HttpMethod.MethodGet +.} + proc getFilterSubscriptions*(): RestResponse[seq[FilterSubscription]] {. rest, endpoint: "/admin/v1/filter/subscriptions", meth: HttpMethod.MethodGet .} diff --git a/waku/waku_api/rest/admin/handlers.nim b/waku/waku_api/rest/admin/handlers.nim index f2eb4a8ba..ada60e870 100644 --- a/waku/waku_api/rest/admin/handlers.nim +++ b/waku/waku_api/rest/admin/handlers.nim @@ -1,22 +1,26 @@ {.push raises: [].} import - std/[strformat, sequtils, tables], + std/[sets, strformat, sequtils, tables], chronicles, json_serialization, presto/route, - libp2p/[peerinfo, switch] + libp2p/[peerinfo, switch, peerid, protocols/pubsub/pubsubpeer] import - ../../../waku_core, - ../../../waku_store_legacy/common, - ../../../waku_store/common, - ../../../waku_filter_v2, - ../../../waku_lightpush_legacy/common, - ../../../waku_relay, - ../../../waku_peer_exchange, - ../../../waku_node, - ../../../node/peer_manager, + waku/[ + waku_core, + waku_core/topics/pubsub_topic, + waku_store_legacy/common, + waku_store/common, + waku_filter_v2, + waku_lightpush_legacy/common, + waku_relay, + waku_peer_exchange, + waku_node, + node/peer_manager, + waku_enr/sharding, + ], ../responses, ../serdes, ../rest_serdes, @@ -27,103 +31,260 @@ export types logScope: topics = "waku node rest admin api" -const ROUTE_ADMIN_V1_PEERS* = "/admin/v1/peers" +const ROUTE_ADMIN_V1_PEERS* = "/admin/v1/peers" # returns all peers +const ROUTE_ADMIN_V1_SINGLE_PEER* = "/admin/v1/peer/{peerId}" + +const ROUTE_ADMIN_V1_CONNECTED_PEERS* = "/admin/v1/peers/connected" +const ROUTE_ADMIN_V1_CONNECTED_PEERS_ON_SHARD* = + "/admin/v1/peers/connected/on/{shardId}" +const ROUTE_ADMIN_V1_CONNECTED_RELAY_PEERS* = "/admin/v1/peers/connected/relay" +const ROUTE_ADMIN_V1_CONNECTED_RELAY_PEERS_ON_SHARD* = + "/admin/v1/peers/connected/relay/on/{shardId}" +const ROUTE_ADMIN_V1_MESH_PEERS* = "/admin/v1/peers/mesh" +const ROUTE_ADMIN_V1_MESH_PEERS_ON_SHARD* = "/admin/v1/peers/mesh/on/{shardId}" + const ROUTE_ADMIN_V1_FILTER_SUBS* = "/admin/v1/filter/subscriptions" type PeerProtocolTuple = - tuple[multiaddr: string, protocol: string, connected: bool, origin: PeerOrigin] + tuple[ + multiaddr: string, + protocol: string, + shards: seq[uint16], + connected: Connectedness, + agent: string, + origin: PeerOrigin, + ] proc tuplesToWakuPeers(peers: var WakuPeers, peersTup: seq[PeerProtocolTuple]) = for peer in peersTup: - peers.add(peer.multiaddr, peer.protocol, peer.connected, peer.origin) + peers.add( + peer.multiaddr, peer.protocol, peer.shards, peer.connected, peer.agent, + peer.origin, + ) + +proc populateAdminPeerInfo(peers: var WakuPeers, node: WakuNode, codec: string) = + let peersForCodec = node.peerManager.switch.peerStore.peers(codec).mapIt( + ( + multiaddr: constructMultiaddrStr(it), + protocol: codec, + shards: it.getShards(), + connected: it.connectedness, + agent: it.agent, + origin: it.origin, + ) + ) + tuplesToWakuPeers(peers, peersForCodec) + +proc populateAdminPeerInfoForCodecs(node: WakuNode, codecs: seq[string]): WakuPeers = + var peers: WakuPeers = @[] + + for codec in codecs: + populateAdminPeerInfo(peers, node, codec) + + return peers proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) = router.api(MethodGet, ROUTE_ADMIN_V1_PEERS) do() -> RestApiResponse: - var peers: WakuPeers = @[] - - let relayPeers = node.peerManager.switch.peerStore.peers(WakuRelayCodec).mapIt( - ( - multiaddr: constructMultiaddrStr(it), - protocol: WakuRelayCodec, - connected: it.connectedness == Connectedness.Connected, - origin: it.origin, - ) - ) - tuplesToWakuPeers(peers, relayPeers) - - let filterV2Peers = node.peerManager.switch.peerStore - .peers(WakuFilterSubscribeCodec) - .mapIt( - ( - multiaddr: constructMultiaddrStr(it), - protocol: WakuFilterSubscribeCodec, - connected: it.connectedness == Connectedness.Connected, - origin: it.origin, - ) - ) - tuplesToWakuPeers(peers, filterV2Peers) - - let storePeers = node.peerManager.switch.peerStore.peers(WakuStoreCodec).mapIt( - ( - multiaddr: constructMultiaddrStr(it), - protocol: WakuStoreCodec, - connected: it.connectedness == Connectedness.Connected, - origin: it.origin, - ) - ) - tuplesToWakuPeers(peers, storePeers) - - let legacyStorePeers = node.peerManager.switch.peerStore - .peers(WakuLegacyStoreCodec) - .mapIt( - ( - multiaddr: constructMultiaddrStr(it), - protocol: WakuLegacyStoreCodec, - connected: it.connectedness == Connectedness.Connected, - origin: it.origin, - ) - ) - tuplesToWakuPeers(peers, legacyStorePeers) - - let legacyLightpushPeers = node.peerManager.switch.peerStore - .peers(WakuLegacyLightPushCodec) - .mapIt( - ( - multiaddr: constructMultiaddrStr(it), - protocol: WakuLegacyLightPushCodec, - connected: it.connectedness == Connectedness.Connected, - origin: it.origin, - ) - ) - tuplesToWakuPeers(peers, legacyLightpushPeers) - - let lightpushPeers = node.peerManager.switch.peerStore - .peers(WakuLightPushCodec) - .mapIt( - ( - multiaddr: constructMultiaddrStr(it), - protocol: WakuLightPushCodec, - connected: it.connectedness == Connectedness.Connected, - origin: it.origin, - ) - ) - tuplesToWakuPeers(peers, lightpushPeers) - - let pxPeers = node.peerManager.switch.peerStore.peers(WakuPeerExchangeCodec).mapIt( - ( - multiaddr: constructMultiaddrStr(it), - protocol: WakuPeerExchangeCodec, - connected: it.connectedness == Connectedness.Connected, - origin: it.origin, - ) - ) - tuplesToWakuPeers(peers, pxPeers) + let peers = populateAdminPeerInfoForCodecs( + node, + @[ + WakuRelayCodec, WakuFilterSubscribeCodec, WakuStoreCodec, WakuLegacyStoreCodec, + WakuLegacyLightPushCodec, WakuLightPushCodec, WakuPeerExchangeCodec, + WakuReconciliationCodec, + ], + ) let resp = RestApiResponse.jsonResponse(peers, status = Http200) if resp.isErr(): - error "An error ocurred while building the json respose: ", error = resp.error + error "An error occurred while building the json response: ", error = resp.error return RestApiResponse.internalServerError( - fmt("An error ocurred while building the json respose: {resp.error}") + fmt("An error occurred while building the json response: {resp.error}") + ) + + return resp.get() + + router.api(MethodGet, ROUTE_ADMIN_V1_SINGLE_PEER) do( + peerId: string + ) -> RestApiResponse: + let peerIdString = peerId.valueOr: + return RestApiResponse.badRequest("Invalid argument:" & $error) + + let peerIdVal: PeerId = PeerId.init(peerIdString).valueOr: + return RestApiResponse.badRequest("Invalid argument:" & $error) + + if node.peerManager.switch.peerStore.peerExists(peerIdVal): + let peerInfo = node.peerManager.switch.peerStore.getPeer(peerIdVal) + let peer = WakuPeer.init(peerInfo) + let resp = RestApiResponse.jsonResponse(peer, status = Http200) + if resp.isErr(): + error "An error occurred while building the json response: ", error = resp.error + return RestApiResponse.internalServerError( + fmt("An error occurred while building the json response: {resp.error}") + ) + + return resp.get() + else: + return RestApiResponse.notFound(fmt("Peer with ID {peerId} not found")) + + router.api(MethodGet, ROUTE_ADMIN_V1_CONNECTED_PEERS) do() -> RestApiResponse: + let allPeers = populateAdminPeerInfoForCodecs( + node, + @[ + WakuRelayCodec, WakuFilterSubscribeCodec, WakuStoreCodec, WakuLegacyStoreCodec, + WakuLegacyLightPushCodec, WakuLightPushCodec, WakuPeerExchangeCodec, + WakuReconciliationCodec, + ], + ) + + let connectedPeers = allPeers.filterIt(it.connected == Connectedness.Connected) + + let resp = RestApiResponse.jsonResponse(connectedPeers, status = Http200) + if resp.isErr(): + error "An error occurred while building the json response: ", error = resp.error + return RestApiResponse.internalServerError( + fmt("An error occurred while building the json response: {resp.error}") + ) + + return resp.get() + + router.api(MethodGet, ROUTE_ADMIN_V1_CONNECTED_PEERS_ON_SHARD) do( + shardId: uint16 + ) -> RestApiResponse: + let shard = shardId.valueOr: + return RestApiResponse.badRequest(fmt("Invalid shardId: {error}")) + + let allPeers = populateAdminPeerInfoForCodecs( + node, + @[ + WakuRelayCodec, WakuFilterSubscribeCodec, WakuStoreCodec, WakuLegacyStoreCodec, + WakuLegacyLightPushCodec, WakuLightPushCodec, WakuPeerExchangeCodec, + WakuReconciliationCodec, + ], + ) + + let connectedPeers = allPeers.filterIt( + it.connected == Connectedness.Connected and it.shards.contains(shard) + ) + + let resp = RestApiResponse.jsonResponse(connectedPeers, status = Http200) + if resp.isErr(): + error "An error occurred while building the json response: ", error = resp.error + return RestApiResponse.internalServerError( + fmt("An error occurred while building the json response: {resp.error}") + ) + + return resp.get() + + router.api(MethodGet, ROUTE_ADMIN_V1_CONNECTED_RELAY_PEERS) do() -> RestApiResponse: + if node.wakuRelay.isNil(): + return RestApiResponse.serviceUnavailable( + "Error: Relay Protocol is not mounted to the node" + ) + + var relayPeers: PeersOfShards = @[] + for topic in node.wakuRelay.getSubscribedTopics(): + let relayShard = RelayShard.parse(topic).valueOr: + error "Invalid subscribed topic", error = error, topic = topic + continue + let pubsubPeers = + node.wakuRelay.getConnectedPubSubPeers(topic).get(initHashSet[PubSubPeer](0)) + relayPeers.add( + PeersOfShard( + shard: relayShard.shardId, + peers: toSeq(pubsubPeers).mapIt(WakuPeer.init(it, node.peerManager)), + ) + ) + + let resp = RestApiResponse.jsonResponse(relayPeers, status = Http200) + if resp.isErr(): + error "An error occurred while building the json response: ", error = resp.error + return RestApiResponse.internalServerError( + fmt("An error occurred while building the json response: {resp.error}") + ) + + return resp.get() + + router.api(MethodGet, ROUTE_ADMIN_V1_CONNECTED_RELAY_PEERS_ON_SHARD) do( + shardId: uint16 + ) -> RestApiResponse: + let shard = shardId.valueOr: + return RestApiResponse.badRequest(fmt("Invalid shardId: {error}")) + + if node.wakuRelay.isNil(): + return RestApiResponse.serviceUnavailable( + "Error: Relay Protocol is not mounted to the node" + ) + + let topic = + toPubsubTopic(RelayShard(clusterId: node.wakuSharding.clusterId, shardId: shard)) + let pubsubPeers = + node.wakuRelay.getConnectedPubSubPeers(topic).get(initHashSet[PubSubPeer](0)) + let relayPeer = PeersOfShard( + shard: shard, peers: toSeq(pubsubPeers).mapIt(WakuPeer.init(it, node.peerManager)) + ) + + let resp = RestApiResponse.jsonResponse(relayPeer, status = Http200) + if resp.isErr(): + error "An error occurred while building the json response: ", error = resp.error + return RestApiResponse.internalServerError( + fmt("An error occurred while building the json response: {resp.error}") + ) + + return resp.get() + + router.api(MethodGet, ROUTE_ADMIN_V1_MESH_PEERS) do() -> RestApiResponse: + if node.wakuRelay.isNil(): + return RestApiResponse.serviceUnavailable( + "Error: Relay Protocol is not mounted to the node" + ) + + var relayPeers: PeersOfShards = @[] + for topic in node.wakuRelay.getSubscribedTopics(): + let relayShard = RelayShard.parse(topic).valueOr: + error "Invalid subscribed topic", error = error, topic = topic + continue + let peers = + node.wakuRelay.getPubSubPeersInMesh(topic).get(initHashSet[PubSubPeer](0)) + relayPeers.add( + PeersOfShard( + shard: relayShard.shardId, + peers: toSeq(peers).mapIt(WakuPeer.init(it, node.peerManager)), + ) + ) + + let resp = RestApiResponse.jsonResponse(relayPeers, status = Http200) + if resp.isErr(): + error "An error occurred while building the json response: ", error = resp.error + return RestApiResponse.internalServerError( + fmt("An error occurred while building the json response: {resp.error}") + ) + + return resp.get() + + router.api(MethodGet, ROUTE_ADMIN_V1_MESH_PEERS_ON_SHARD) do( + shardId: uint16 + ) -> RestApiResponse: + let shard = shardId.valueOr: + return RestApiResponse.badRequest(fmt("Invalid shardId: {error}")) + + if node.wakuRelay.isNil(): + return RestApiResponse.serviceUnavailable( + "Error: Relay Protocol is not mounted to the node" + ) + + let topic = + toPubsubTopic(RelayShard(clusterId: node.wakuSharding.clusterId, shardId: shard)) + let peers = + node.wakuRelay.getPubSubPeersInMesh(topic).get(initHashSet[PubSubPeer](0)) + let relayPeer = PeersOfShard( + shard: shard, peers: toSeq(peers).mapIt(WakuPeer.init(it, node.peerManager)) + ) + + let resp = RestApiResponse.jsonResponse(relayPeer, status = Http200) + if resp.isErr(): + error "An error occurred while building the json response: ", error = resp.error + return RestApiResponse.internalServerError( + fmt("An error occurred while building the json response: {resp.error}") ) return resp.get() diff --git a/waku/waku_api/rest/admin/types.nim b/waku/waku_api/rest/admin/types.nim index bb7dd2b0c..0c0786e3d 100644 --- a/waku/waku_api/rest/admin/types.nim +++ b/waku/waku_api/rest/admin/types.nim @@ -4,22 +4,29 @@ import chronicles, json_serialization, json_serialization/std/options, - json_serialization/lexer -import ../serdes, ../../../waku_core + json_serialization/lexer, + results, + libp2p/protocols/pubsub/pubsubpeer +import waku/[waku_core, node/peer_manager], ../serdes #### Types - -type ProtocolState* = object - protocol*: string - connected*: bool - type WakuPeer* = object multiaddr*: string - protocols*: seq[ProtocolState] + protocols*: seq[string] + shards*: seq[uint16] + connected*: Connectedness + agent*: string origin*: PeerOrigin + score*: Option[float64] type WakuPeers* = seq[WakuPeer] +type PeersOfShard* = object + shard*: uint16 + peers*: WakuPeers + +type PeersOfShards* = seq[PeersOfShard] + type FilterTopic* = object pubsubTopic*: string contentTopic*: string @@ -29,22 +36,25 @@ type FilterSubscription* = object filterCriteria*: seq[FilterTopic] #### Serialization and deserialization - -proc writeValue*( - writer: var JsonWriter[RestJson], value: ProtocolState -) {.raises: [IOError].} = - writer.beginRecord() - writer.writeField("protocol", value.protocol) - writer.writeField("connected", value.connected) - writer.endRecord() - proc writeValue*( writer: var JsonWriter[RestJson], value: WakuPeer ) {.raises: [IOError].} = writer.beginRecord() writer.writeField("multiaddr", value.multiaddr) writer.writeField("protocols", value.protocols) + writer.writeField("shards", value.shards) + writer.writeField("connected", value.connected) + writer.writeField("agent", value.agent) writer.writeField("origin", value.origin) + writer.writeField("score", value.score) + writer.endRecord() + +proc writeValue*( + writer: var JsonWriter[RestJson], value: PeersOfShard +) {.raises: [IOError].} = + writer.beginRecord() + writer.writeField("shard", value.shard) + writer.writeField("peers", value.peers) writer.endRecord() proc writeValue*( @@ -63,43 +73,17 @@ proc writeValue*( writer.writeField("filterCriteria", value.filterCriteria) writer.endRecord() -proc readValue*( - reader: var JsonReader[RestJson], value: var ProtocolState -) {.gcsafe, raises: [SerializationError, IOError].} = - var - protocol: Option[string] - connected: Option[bool] - - for fieldName in readObjectFields(reader): - case fieldName - of "protocol": - if protocol.isSome(): - reader.raiseUnexpectedField("Multiple `protocol` fields found", "ProtocolState") - protocol = some(reader.readValue(string)) - of "connected": - if connected.isSome(): - reader.raiseUnexpectedField( - "Multiple `connected` fields found", "ProtocolState" - ) - connected = some(reader.readValue(bool)) - else: - unrecognizedFieldWarning(value) - - if connected.isNone(): - reader.raiseUnexpectedValue("Field `connected` is missing") - - if protocol.isNone(): - reader.raiseUnexpectedValue("Field `protocol` is missing") - - value = ProtocolState(protocol: protocol.get(), connected: connected.get()) - proc readValue*( reader: var JsonReader[RestJson], value: var WakuPeer ) {.gcsafe, raises: [SerializationError, IOError].} = var multiaddr: Option[string] - protocols: Option[seq[ProtocolState]] + protocols: Option[seq[string]] + shards: Option[seq[uint16]] + connected: Option[Connectedness] + agent: Option[string] origin: Option[PeerOrigin] + score: Option[float64] for fieldName in readObjectFields(reader): case fieldName @@ -110,11 +94,27 @@ proc readValue*( of "protocols": if protocols.isSome(): reader.raiseUnexpectedField("Multiple `protocols` fields found", "WakuPeer") - protocols = some(reader.readValue(seq[ProtocolState])) + protocols = some(reader.readValue(seq[string])) + of "shards": + if shards.isSome(): + reader.raiseUnexpectedField("Multiple `shards` fields found", "WakuPeer") + shards = some(reader.readValue(seq[uint16])) + of "connected": + if connected.isSome(): + reader.raiseUnexpectedField("Multiple `connected` fields found", "WakuPeer") + connected = some(reader.readValue(Connectedness)) + of "agent": + if agent.isSome(): + reader.raiseUnexpectedField("Multiple `agent` fields found", "WakuPeer") + agent = some(reader.readValue(string)) of "origin": if origin.isSome(): reader.raiseUnexpectedField("Multiple `origin` fields found", "WakuPeer") origin = some(reader.readValue(PeerOrigin)) + of "score": + if score.isSome(): + reader.raiseUnexpectedField("Multiple `score` fields found", "WakuPeer") + score = some(reader.readValue(float64)) else: unrecognizedFieldWarning(value) @@ -124,13 +124,56 @@ proc readValue*( if protocols.isNone(): reader.raiseUnexpectedValue("Field `protocols` are missing") + if shards.isNone(): + reader.raiseUnexpectedValue("Field `shards` is missing") + + if connected.isNone(): + reader.raiseUnexpectedValue("Field `connected` is missing") + + if agent.isNone(): + reader.raiseUnexpectedValue("Field `agent` is missing") + if origin.isNone(): reader.raiseUnexpectedValue("Field `origin` is missing") value = WakuPeer( - multiaddr: multiaddr.get(), protocols: protocols.get(), origin: origin.get() + multiaddr: multiaddr.get(), + protocols: protocols.get(), + shards: shards.get(), + connected: connected.get(), + agent: agent.get(), + origin: origin.get(), + score: score, ) +proc readValue*( + reader: var JsonReader[RestJson], value: var PeersOfShard +) {.gcsafe, raises: [SerializationError, IOError].} = + var + shard: Option[uint16] + peers: Option[WakuPeers] + + for fieldName in readObjectFields(reader): + case fieldName + of "shard": + if shard.isSome(): + reader.raiseUnexpectedField("Multiple `shard` fields found", "PeersOfShard") + shard = some(reader.readValue(uint16)) + of "peers": + if peers.isSome(): + reader.raiseUnexpectedField("Multiple `peers` fields found", "PeersOfShard") + peers = some(reader.readValue(WakuPeers)) + else: + unrecognizedFieldWarning(value) + + if shard.isNone(): + reader.raiseUnexpectedValue("Field `shard` is missing") + + if peers.isNone(): + reader.raiseUnexpectedValue("Field `peers` are missing") + + value = PeersOfShard(shard: shard.get(), peers: peers.get()) + proc readValue*( reader: var JsonReader[RestJson], value: var FilterTopic ) {.gcsafe, raises: [SerializationError, IOError].} = @@ -195,26 +238,47 @@ proc readValue*( value = FilterSubscription(peerId: peerId.get(), filterCriteria: filterCriteria.get()) -## Utility for populating WakuPeers and ProtocolState -func `==`*(a, b: ProtocolState): bool {.inline.} = - return a.protocol == b.protocol - -func `==`*(a: ProtocolState, b: string): bool {.inline.} = - return a.protocol == b - func `==`*(a, b: WakuPeer): bool {.inline.} = return a.multiaddr == b.multiaddr +proc init*(T: type WakuPeer, peerInfo: RemotePeerInfo): WakuPeer = + result = WakuPeer( + multiaddr: constructMultiaddrStr(peerInfo), + protocols: peerInfo.protocols, + shards: peerInfo.getShards(), + connected: peerInfo.connectedness, + agent: peerInfo.agent, + origin: peerInfo.origin, + score: none(float64), + ) + +proc init*(T: type WakuPeer, pubsubPeer: PubSubPeer, pm: PeerManager): WakuPeer = + let peerInfo = pm.getPeer(pubsubPeer.peerId) + result = WakuPeer( + multiaddr: constructMultiaddrStr(peerInfo), + protocols: peerInfo.protocols, + shards: peerInfo.getShards(), + connected: peerInfo.connectedness, + agent: peerInfo.agent, + origin: peerInfo.origin, + score: some(pubsubPeer.score), + ) + proc add*( peers: var WakuPeers, multiaddr: string, protocol: string, - connected: bool, + shards: seq[uint16], + connected: Connectedness, + agent: string, origin: PeerOrigin, ) = var peer: WakuPeer = WakuPeer( multiaddr: multiaddr, - protocols: @[ProtocolState(protocol: protocol, connected: connected)], + protocols: @[protocol], + shards: shards, + connected: connected, + agent: agent, origin: origin, ) let idx = peers.find(peer) @@ -222,4 +286,4 @@ proc add*( if idx < 0: peers.add(peer) else: - peers[idx].protocols.add(ProtocolState(protocol: protocol, connected: connected)) + peers[idx].protocols.add(protocol) diff --git a/waku/waku_api/rest/serdes.nim b/waku/waku_api/rest/serdes.nim index eb6bc1545..d54d17e78 100644 --- a/waku/waku_api/rest/serdes.nim +++ b/waku/waku_api/rest/serdes.nim @@ -1,9 +1,9 @@ {.push raises: [].} import - std/typetraits, + std/[typetraits, parseutils], results, - stew/byteutils, + stew/[byteutils, base10], chronicles, serialization, json_serialization, @@ -100,3 +100,13 @@ proc encodeString*(value: string): RestResult[string] = proc decodeString*(t: typedesc[string], value: string): RestResult[string] = ok(value) + +proc encodeString*(value: SomeUnsignedInt): RestResult[string] = + ok(Base10.toString(value)) + +proc decodeString*(T: typedesc[SomeUnsignedInt], value: string): RestResult[T] = + let v = Base10.decode(T, value) + if v.isErr(): + return err(v.error()) + else: + return ok(v.get()) diff --git a/waku/waku_core/peers.nim b/waku/waku_core/peers.nim index fdd3d7948..883f266bd 100644 --- a/waku/waku_core/peers.nim +++ b/waku/waku_core/peers.nim @@ -18,7 +18,7 @@ import libp2p/routing_record, regex, json_serialization -import ../waku_enr/capabilities +import ../waku_enr type Connectedness* = enum @@ -231,7 +231,7 @@ proc parsePeerInfo*(maddrs: varargs[string]): Result[RemotePeerInfo, string] = parsePeerInfo(multiAddresses) -func getTransportProtocol(typedR: TypedRecord): Option[IpTransportProtocol] = +func getTransportProtocol(typedR: enr.TypedRecord): Option[IpTransportProtocol] = if typedR.tcp6.isSome() or typedR.tcp.isSome(): return some(IpTransportProtocol.tcpProtocol) @@ -255,9 +255,9 @@ proc parseUrlPeerAddr*( return ok(some(parsedPeerInfo.value)) -proc toRemotePeerInfo*(enr: enr.Record): Result[RemotePeerInfo, cstring] = +proc toRemotePeerInfo*(enrRec: enr.Record): Result[RemotePeerInfo, cstring] = ## Converts an ENR to dialable RemotePeerInfo - let typedR = TypedRecord.fromRecord(enr) + let typedR = enr.TypedRecord.fromRecord(enrRec) if not typedR.secp256k1.isSome(): return err("enr: no secp256k1 key in record") @@ -303,7 +303,7 @@ proc toRemotePeerInfo*(enr: enr.Record): Result[RemotePeerInfo, cstring] = return err("enr: no addresses in record") let protocolsRes = catch: - enr.getCapabilitiesCodecs() + enrRec.getCapabilitiesCodecs() var protocols: seq[string] if not protocolsRes.isErr(): @@ -312,7 +312,7 @@ proc toRemotePeerInfo*(enr: enr.Record): Result[RemotePeerInfo, cstring] = error "Could not retrieve supported protocols from enr", peerId = peerId, msg = protocolsRes.error.msg - return ok(RemotePeerInfo.init(peerId, addrs, some(enr), protocols)) + return ok(RemotePeerInfo.init(peerId, addrs, some(enrRec), protocols)) converter toRemotePeerInfo*(peerRecord: PeerRecord): RemotePeerInfo = ## Converts peer records to dialable RemotePeerInfo @@ -350,8 +350,8 @@ func hasUdpPort*(peer: RemotePeerInfo): bool = return false let - enr = peer.enr.get() - typedEnr = TypedRecord.fromRecord(enr) + enrRec = peer.enr.get() + typedEnr = enr.TypedRecord.fromRecord(enrRec) typedEnr.udp.isSome() or typedEnr.udp6.isSome() @@ -361,3 +361,18 @@ proc getAgent*(peer: RemotePeerInfo): string = return "unknown" return peer.agent + +proc getShards*(peer: RemotePeerInfo): seq[uint16] = + if peer.enr.isNone(): + return @[] + + let enrRec = peer.enr.get() + let typedRecord = enrRec.toTyped().valueOr: + trace "invalid ENR record", error = error + return @[] + + let shards = typedRecord.relaySharding() + if shards.isSome(): + return shards.get().shardIds + + return @[] diff --git a/waku/waku_enr/sharding.nim b/waku/waku_enr/sharding.nim index 88dc4e200..4ee77bf96 100644 --- a/waku/waku_enr/sharding.nim +++ b/waku/waku_enr/sharding.nim @@ -8,7 +8,7 @@ import eth/keys, libp2p/[multiaddress, multicodec], libp2p/crypto/crypto -import ../common/enr, ../waku_core +import ../common/enr, ../waku_core/topics/pubsub_topic logScope: topics = "waku enr sharding" diff --git a/waku/waku_relay/protocol.nim b/waku/waku_relay/protocol.nim index 1698fac70..4eeaf4607 100644 --- a/waku/waku_relay/protocol.nim +++ b/waku/waku_relay/protocol.nim @@ -323,31 +323,42 @@ proc addObserver*(w: WakuRelay, observer: PubSubObserver) {.gcsafe.} = proc getDHigh*(T: type WakuRelay): int = return GossipsubParameters.dHigh -proc getPeersInMesh*( +proc getPubSubPeersInMesh*( w: WakuRelay, pubsubTopic: PubsubTopic -): Result[seq[PeerId], string] = - ## Returns the list of peerIds in a mesh defined by the passed pubsub topic. +): Result[HashSet[PubSubPeer], string] = + ## Returns the list of PubSubPeers in a mesh defined by the passed pubsub topic. ## The 'mesh' atribute is defined in the GossipSub ref object. if not w.mesh.hasKey(pubsubTopic): - debug "getPeersInMesh - there is no mesh peer for the given pubsub topic", + debug "getPubSubPeersInMesh - there is no mesh peer for the given pubsub topic", pubsubTopic = pubsubTopic - return ok(newSeq[PeerId]()) + return ok(initHashSet[PubSubPeer]()) let peersRes = catch: w.mesh[pubsubTopic] let peers: HashSet[PubSubPeer] = peersRes.valueOr: - return err("getPeersInMesh - exception accessing " & pubsubTopic & ": " & error.msg) + return err( + "getPubSubPeersInMesh - exception accessing " & pubsubTopic & ": " & error.msg + ) - let peerIds = toSeq(peers).mapIt(it.peerId) + return ok(peers) + +proc getPeersInMesh*( + w: WakuRelay, pubsubTopic: PubsubTopic +): Result[seq[PeerId], string] = + ## Returns the list of peerIds in a mesh defined by the passed pubsub topic. + ## The 'mesh' atribute is defined in the GossipSub ref object. + let pubSubPeers = w.getPubSubPeersInMesh(pubsubTopic).valueOr: + return err(error) + let peerIds = toSeq(pubSubPeers).mapIt(it.peerId) return ok(peerIds) proc getNumPeersInMesh*(w: WakuRelay, pubsubTopic: PubsubTopic): Result[int, string] = ## Returns the number of peers in a mesh defined by the passed pubsub topic. - let peers = w.getPeersInMesh(pubsubTopic).valueOr: + let peers = w.getPubSubPeersInMesh(pubsubTopic).valueOr: return err( "getNumPeersInMesh - failed retrieving peers in mesh: " & pubsubTopic & ": " & error @@ -557,18 +568,17 @@ proc publish*( return ok(relayedPeerCount) -proc getConnectedPeers*( +proc getConnectedPubSubPeers*( w: WakuRelay, pubsubTopic: PubsubTopic -): Result[seq[PeerId], string] = +): Result[HashSet[PubsubPeer], string] = ## Returns the list of peerIds of connected peers and subscribed to the passed pubsub topic. ## The 'gossipsub' atribute is defined in the GossipSub ref object. if pubsubTopic == "": ## Return all the connected peers - var peerIds = newSeq[PeerId]() + var peerIds = initHashSet[PubsubPeer]() for k, v in w.gossipsub: - peerIds.add(toSeq(v).mapIt(it.peerId)) - # alternatively: peerIds &= toSeq(v).mapIt(it.peerId) + peerIds = peerIds + v return ok(peerIds) if not w.gossipsub.hasKey(pubsubTopic): @@ -584,6 +594,17 @@ proc getConnectedPeers*( return err("getConnectedPeers - exception accessing " & pubsubTopic & ": " & error.msg) + return ok(peers) + +proc getConnectedPeers*( + w: WakuRelay, pubsubTopic: PubsubTopic +): Result[seq[PeerId], string] = + ## Returns the list of peerIds of connected peers and subscribed to the passed pubsub topic. + ## The 'gossipsub' atribute is defined in the GossipSub ref object. + + let peers = w.getConnectedPubSubPeers(pubsubTopic).valueOr: + return err(error) + let peerIds = toSeq(peers).mapIt(it.peerId) return ok(peerIds) @@ -593,7 +614,7 @@ proc getNumConnectedPeers*( ## Returns the number of connected peers and subscribed to the passed pubsub topic. ## Return all the connected peers - let peers = w.getConnectedPeers(pubsubTopic).valueOr: + let peers = w.getConnectedPubSubPeers(pubsubTopic).valueOr: return err( "getNumConnectedPeers - failed retrieving peers in mesh: " & pubsubTopic & ": " & error From 8394c15a1ad4b1bff2ae933fa34156ec48b1e32a Mon Sep 17 00:00:00 2001 From: NagyZoltanPeter <113987313+NagyZoltanPeter@users.noreply.github.com> Date: Thu, 24 Apr 2025 08:36:30 +0200 Subject: [PATCH 31/48] fix: bad HttpCode conversion, add missing lightpush v3 rest api tests (#3389) * Fix bad HttpCode conversion, add missing lightpush v3 rest api tests --- tests/all_tests_waku.nim | 1 + tests/wakunode_rest/test_rest_lightpush.nim | 282 ++++++++++++++++++ .../test_rest_lightpush_legacy.nim | 25 -- waku/waku_api/rest/lightpush/handlers.nim | 26 +- waku/waku_lightpush/common.nim | 6 + 5 files changed, 303 insertions(+), 37 deletions(-) create mode 100644 tests/wakunode_rest/test_rest_lightpush.nim diff --git a/tests/all_tests_waku.nim b/tests/all_tests_waku.nim index f23f4249c..20da29fe2 100644 --- a/tests/all_tests_waku.nim +++ b/tests/all_tests_waku.nim @@ -99,6 +99,7 @@ import ./wakunode_rest/test_rest_relay_serdes, ./wakunode_rest/test_rest_serdes, ./wakunode_rest/test_rest_filter, + ./wakunode_rest/test_rest_lightpush, ./wakunode_rest/test_rest_lightpush_legacy, ./wakunode_rest/test_rest_admin, ./wakunode_rest/test_rest_cors, diff --git a/tests/wakunode_rest/test_rest_lightpush.nim b/tests/wakunode_rest/test_rest_lightpush.nim new file mode 100644 index 000000000..2c4ec0959 --- /dev/null +++ b/tests/wakunode_rest/test_rest_lightpush.nim @@ -0,0 +1,282 @@ +{.used.} + +import + std/sequtils, + stew/byteutils, + stew/shims/net, + testutils/unittests, + presto, + presto/client as presto_client, + libp2p/crypto/crypto + +import + waku/[ + waku_api/message_cache, + waku_core, + waku_node, + node/peer_manager, + waku_lightpush/common, + waku_api/rest/server, + waku_api/rest/client, + waku_api/rest/responses, + waku_api/rest/lightpush/types, + waku_api/rest/lightpush/handlers as lightpush_api, + waku_api/rest/lightpush/client as lightpush_api_client, + waku_relay, + common/rate_limit/setting, + ], + ../testlib/wakucore, + ../testlib/wakunode + +proc testWakuNode(): WakuNode = + let + privkey = generateSecp256k1Key() + bindIp = parseIpAddress("0.0.0.0") + extIp = parseIpAddress("127.0.0.1") + port = Port(0) + + return newTestWakuNode(privkey, bindIp, port, some(extIp), some(port)) + +type RestLightPushTest = object + serviceNode: WakuNode + pushNode: WakuNode + consumerNode: WakuNode + restServer: WakuRestServerRef + restClient: RestClientRef + +proc init( + T: type RestLightPushTest, rateLimit: RateLimitSetting = (0, 0.millis) +): Future[T] {.async.} = + var testSetup = RestLightPushTest() + testSetup.serviceNode = testWakuNode() + testSetup.pushNode = testWakuNode() + testSetup.consumerNode = testWakuNode() + + await allFutures( + testSetup.serviceNode.start(), + testSetup.pushNode.start(), + testSetup.consumerNode.start(), + ) + + await testSetup.consumerNode.mountRelay() + await testSetup.serviceNode.mountRelay() + await testSetup.serviceNode.mountLightPush(rateLimit) + testSetup.pushNode.mountLightPushClient() + + testSetup.serviceNode.peerManager.addServicePeer( + testSetup.consumerNode.peerInfo.toRemotePeerInfo(), WakuRelayCodec + ) + + await testSetup.serviceNode.connectToNodes( + @[testSetup.consumerNode.peerInfo.toRemotePeerInfo()] + ) + + testSetup.pushNode.peerManager.addServicePeer( + testSetup.serviceNode.peerInfo.toRemotePeerInfo(), WakuLightPushCodec + ) + + var restPort = Port(0) + let restAddress = parseIpAddress("127.0.0.1") + testSetup.restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + restPort = testSetup.restServer.httpServer.address.port + # update with bound port for restClient use + + installLightPushRequestHandler(testSetup.restServer.router, testSetup.pushNode) + + testSetup.restServer.start() + + testSetup.restClient = newRestHttpClient(initTAddress(restAddress, restPort)) + + return testSetup + +proc shutdown(self: RestLightPushTest) {.async.} = + await self.restServer.stop() + await self.restServer.closeWait() + await allFutures( + self.serviceNode.stop(), self.pushNode.stop(), self.consumerNode.stop() + ) + +suite "Waku v2 Rest API - lightpush": + asyncTest "Push message with proof": + let restLightPushTest = await RestLightPushTest.init() + + let message: RelayWakuMessage = fakeWakuMessage( + contentTopic = DefaultContentTopic, + payload = toBytes("TEST-1"), + proof = toBytes("proof-test"), + ) + .toRelayWakuMessage() + + check message.proof.isSome() + + let requestBody = + PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: message) + + let response = + await restLightPushTest.restClient.sendPushRequest(body = requestBody) + + ## Validate that the push request failed because the node is not + ## connected to other node but, doesn't fail because of not properly + ## handling the proof message attribute within the REST request. + check: + response.status == 505 + response.data.statusDesc == some("No peers for topic, skipping publish") + response.data.relayPeerCount == none[uint32]() + + asyncTest "Push message request": + # Given + let restLightPushTest = await RestLightPushTest.init() + + restLightPushTest.consumerNode.subscribe( + (kind: PubsubSub, topic: DefaultPubsubTopic) + ) + restLightPushTest.serviceNode.subscribe( + (kind: PubsubSub, topic: DefaultPubsubTopic) + ) + require: + toSeq(restLightPushTest.serviceNode.wakuRelay.subscribedTopics).len == 1 + + # When + let message: RelayWakuMessage = fakeWakuMessage( + contentTopic = DefaultContentTopic, payload = toBytes("TEST-1") + ) + .toRelayWakuMessage() + + let requestBody = + PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: message) + let response = await restLightPushTest.restClient.sendPushRequest(requestBody) + + echo "response", $response + + # Then + check: + response.status == 200 + response.data.relayPeerCount == some(1.uint32) + + await restLightPushTest.shutdown() + + asyncTest "Push message bad-request": + # Given + let restLightPushTest = await RestLightPushTest.init() + + restLightPushTest.serviceNode.subscribe( + (kind: PubsubSub, topic: DefaultPubsubTopic) + ) + require: + toSeq(restLightPushTest.serviceNode.wakuRelay.subscribedTopics).len == 1 + + # When + let badMessage1: RelayWakuMessage = fakeWakuMessage( + contentTopic = DefaultContentTopic, payload = toBytes("") + ) + .toRelayWakuMessage() + let badRequestBody1 = + PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: badMessage1) + + let badMessage2: RelayWakuMessage = + fakeWakuMessage(contentTopic = "", payload = toBytes("Sthg")).toRelayWakuMessage() + let badRequestBody2 = + PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: badMessage2) + + let badRequestBody3 = + PushRequest(pubsubTopic: none(PubsubTopic), message: badMessage2) + + # var response: RestResponse[PushResponse] + + var response = await restLightPushTest.restClient.sendPushRequest(badRequestBody1) + + # Then + check: + response.status == 400 + response.data.statusDesc.isSome() + response.data.statusDesc.get().startsWith("Invalid push request") + + # when + response = await restLightPushTest.restClient.sendPushRequest(badRequestBody2) + + # Then + check: + response.status == 400 + response.data.statusDesc.isSome() + response.data.statusDesc.get().startsWith("Invalid push request") + + # when + response = await restLightPushTest.restClient.sendPushRequest(badRequestBody3) + + # Then + check: + response.data.statusDesc.isSome() + response.data.statusDesc.get().startsWith("Invalid push request") + + await restLightPushTest.shutdown() + + asyncTest "Request rate limit push message": + # Given + let budgetCap = 3 + let tokenPeriod = 500.millis + let restLightPushTest = await RestLightPushTest.init((budgetCap, tokenPeriod)) + + restLightPushTest.consumerNode.subscribe( + (kind: PubsubSub, topic: DefaultPubsubTopic) + ) + restLightPushTest.serviceNode.subscribe( + (kind: PubsubSub, topic: DefaultPubsubTopic) + ) + require: + toSeq(restLightPushTest.serviceNode.wakuRelay.subscribedTopics).len == 1 + + # When + let pushProc = proc() {.async.} = + let message: RelayWakuMessage = fakeWakuMessage( + contentTopic = DefaultContentTopic, payload = toBytes("TEST-1") + ) + .toRelayWakuMessage() + + let requestBody = + PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: message) + let response = await restLightPushTest.restClient.sendPushRequest(requestBody) + + echo "response", $response + + # Then + check: + response.status == 200 + response.data.relayPeerCount == some(1.uint32) + + let pushRejectedProc = proc() {.async.} = + let message: RelayWakuMessage = fakeWakuMessage( + contentTopic = DefaultContentTopic, payload = toBytes("TEST-1") + ) + .toRelayWakuMessage() + + let requestBody = + PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: message) + let response = await restLightPushTest.restClient.sendPushRequest(requestBody) + + echo "response", $response + + # Then + check: + response.status == 429 + response.data.statusDesc.isSome() # Ensure error status description is present + response.data.statusDesc.get().startsWith( + "Request rejected due to too many requests" + ) # Check specific error message + + await pushProc() + await pushProc() + await pushProc() + await pushRejectedProc() + + await sleepAsync(tokenPeriod) + + for runCnt in 0 ..< 3: + let startTime = Moment.now() + for sendCnt in 0 ..< budgetCap: + await pushProc() + + let endTime = Moment.now() + let elapsed: Duration = (endTime - startTime) + await sleepAsync(tokenPeriod - elapsed + 10.millis) + + await restLightPushTest.shutdown() diff --git a/tests/wakunode_rest/test_rest_lightpush_legacy.nim b/tests/wakunode_rest/test_rest_lightpush_legacy.nim index 61d1de88d..8176aed7a 100644 --- a/tests/wakunode_rest/test_rest_lightpush_legacy.nim +++ b/tests/wakunode_rest/test_rest_lightpush_legacy.nim @@ -274,28 +274,3 @@ suite "Waku v2 Rest API - lightpush": await sleepAsync(tokenPeriod - elapsed + 10.millis) await restLightPushTest.shutdown() - - ## TODO: Re-work this test when lightpush protocol change is done: https://github.com/waku-org/pm/issues/93 - ## This test is similar when no available peer exists for publish. Currently it is returning success, - ## that makes this test not useful. - # asyncTest "Push message request service not available": - # # Given - # let restLightPushTest = await RestLightPushTest.init() - - # # When - # let message : RelayWakuMessage = fakeWakuMessage(contentTopic = DefaultContentTopic, - # payload = toBytes("TEST-1")).toRelayWakuMessage() - - # let requestBody = PushRequest(pubsubTopic: some("NoExistTopic"), - # message: message) - # let response = await restLightPushTest.client.sendPushRequest(requestBody) - - # echo "response", $response - - # # Then - # check: - # response.status == 503 - # $response.contentType == $MIMETYPE_TEXT - # response.data == "Failed to request a message push: Can not publish to any peers" - - # await restLightPushTest.shutdown() diff --git a/waku/waku_api/rest/lightpush/handlers.nim b/waku/waku_api/rest/lightpush/handlers.nim index 601aab74c..cafcd89d2 100644 --- a/waku/waku_api/rest/lightpush/handlers.nim +++ b/waku/waku_api/rest/lightpush/handlers.nim @@ -26,18 +26,15 @@ logScope: const FutTimeoutForPushRequestProcessing* = 5.seconds -const NoPeerNoDiscoError = - RestApiResponse.serviceUnavailable("No suitable service peer & no discovery method") - -const NoPeerNoneFoundError = - RestApiResponse.serviceUnavailable("No suitable service peer & none discovered") +const NoPeerNoDiscoError = "No suitable service peer & no discovery method" +const NoPeerNoneFoundError = "No suitable service peer & none discovered" proc useSelfHostedLightPush(node: WakuNode): bool = return node.wakuLightPush != nil and node.wakuLightPushClient == nil proc convertErrorKindToHttpStatus(statusCode: LightpushStatusCode): HttpCode = ## Lightpush status codes are matching HTTP status codes by design - return HttpCode(statusCode.int32) + return toHttpCode(statusCode.int).get(Http500) proc makeRestResponse(response: WakuLightPushResult): RestApiResponse = var httpStatus: HttpCode = Http200 @@ -72,10 +69,11 @@ proc installLightPushRequestHandler*( debug "post", ROUTE_LIGHTPUSH, contentBody let req: PushRequest = decodeRequestBody[PushRequest](contentBody).valueOr: - return RestApiResponse.badRequest("Invalid push request: " & $error) + return + makeRestResponse(lightpushResultBadRequest("Invalid push request! " & $error)) let msg = req.message.toWakuMessage().valueOr: - return RestApiResponse.badRequest("Invalid message: " & $error) + return makeRestResponse(lightpushResultBadRequest("Invalid message! " & $error)) var toPeer = none(RemotePeerInfo) if useSelfHostedLightPush(node): @@ -83,19 +81,23 @@ proc installLightPushRequestHandler*( else: let aPeer = node.peerManager.selectPeer(WakuLightPushCodec).valueOr: let handler = discHandler.valueOr: - return NoPeerNoDiscoError + return makeRestResponse(lightpushResultServiceUnavailable(NoPeerNoDiscoError)) let peerOp = (await handler()).valueOr: - return RestApiResponse.internalServerError("No value in peerOp: " & $error) + return makeRestResponse( + lightpushResultInternalError("No value in peerOp: " & $error) + ) peerOp.valueOr: - return NoPeerNoneFoundError + return + makeRestResponse(lightpushResultServiceUnavailable(NoPeerNoneFoundError)) toPeer = some(aPeer) let subFut = node.lightpushPublish(req.pubsubTopic, msg, toPeer) if not await subFut.withTimeout(FutTimeoutForPushRequestProcessing): error "Failed to request a message push due to timeout!" - return RestApiResponse.serviceUnavailable("Push request timed out") + return + makeRestResponse(lightpushResultServiceUnavailable("Push request timed out")) return makeRestResponse(subFut.value()) diff --git a/waku/waku_lightpush/common.nim b/waku/waku_lightpush/common.nim index 356ccf8f2..4c2984e8f 100644 --- a/waku/waku_lightpush/common.nim +++ b/waku/waku_lightpush/common.nim @@ -42,6 +42,12 @@ func lightpushSuccessResult*(relayPeerCount: uint32): WakuLightPushResult = func lightpushResultInternalError*(msg: string): WakuLightPushResult = return err((LightpushStatusCode.INTERNAL_SERVER_ERROR, some(msg))) +func lightpushResultBadRequest*(msg: string): WakuLightPushResult = + return err((LightpushStatusCode.BAD_REQUEST, some(msg))) + +func lightpushResultServiceUnavailable*(msg: string): WakuLightPushResult = + return err((LightpushStatusCode.SERVICE_NOT_AVAILABLE, some(msg))) + func lighpushErrorResult*( statusCode: LightpushStatusCode, desc: Option[string] ): WakuLightPushResult = From 0c63ce4e9bd219092b7496d8b6b74b28b238ae3e Mon Sep 17 00:00:00 2001 From: Simon-Pierre Vivier Date: Thu, 24 Apr 2025 09:07:21 -0400 Subject: [PATCH 32/48] feat: refactor waku sync DOS protection (#3391) --- tests/waku_store_sync/sync_utils.nim | 6 ++--- tests/waku_store_sync/test_protocol.nim | 31 ++++++---------------- waku/node/waku_node.nim | 2 +- waku/waku_store_sync/reconciliation.nim | 18 +++++++------ waku/waku_store_sync/transfer.nim | 35 +++++++++---------------- 5 files changed, 34 insertions(+), 58 deletions(-) diff --git a/tests/waku_store_sync/sync_utils.nim b/tests/waku_store_sync/sync_utils.nim index a81ad6e2f..e7fd82b57 100644 --- a/tests/waku_store_sync/sync_utils.nim +++ b/tests/waku_store_sync/sync_utils.nim @@ -1,4 +1,4 @@ -import std/[options, random], chronos +import std/[options, random], chronos, chronicles import waku/[ @@ -23,7 +23,7 @@ proc randomHash*(rng: var Rand): WakuMessageHash = proc newTestWakuRecon*( switch: Switch, idsRx: AsyncQueue[SyncID], - wantsTx: AsyncQueue[(PeerId, Fingerprint)], + wantsTx: AsyncQueue[PeerId], needsTx: AsyncQueue[(PeerId, Fingerprint)], cluster: uint16 = 1, shards: seq[uint16] = @[0, 1, 2, 3, 4, 5, 6, 7], @@ -51,7 +51,7 @@ proc newTestWakuRecon*( proc newTestWakuTransfer*( switch: Switch, idsTx: AsyncQueue[SyncID], - wantsRx: AsyncQueue[(PeerId, Fingerprint)], + wantsRx: AsyncQueue[PeerId], needsRx: AsyncQueue[(PeerId, Fingerprint)], ): SyncTransfer = let peerManager = PeerManager.new(switch) diff --git a/tests/waku_store_sync/test_protocol.nim b/tests/waku_store_sync/test_protocol.nim index df14de6a1..efdd6a885 100644 --- a/tests/waku_store_sync/test_protocol.nim +++ b/tests/waku_store_sync/test_protocol.nim @@ -27,7 +27,7 @@ suite "Waku Sync: reconciliation": var idsChannel {.threadvar.}: AsyncQueue[SyncID] - localWants {.threadvar.}: AsyncQueue[(PeerId, WakuMessageHash)] + localWants {.threadvar.}: AsyncQueue[PeerId] remoteNeeds {.threadvar.}: AsyncQueue[(PeerId, WakuMessageHash)] var server {.threadvar.}: SyncReconciliation @@ -43,7 +43,7 @@ suite "Waku Sync: reconciliation": await allFutures(serverSwitch.start(), clientSwitch.start()) idsChannel = newAsyncQueue[SyncID]() - localWants = newAsyncQueue[(PeerId, WakuMessageHash)]() + localWants = newAsyncQueue[PeerId]() remoteNeeds = newAsyncQueue[(PeerId, WakuMessageHash)]() server = await newTestWakuRecon(serverSwitch, idsChannel, localWants, remoteNeeds) @@ -61,7 +61,6 @@ suite "Waku Sync: reconciliation": asyncTest "sync 2 nodes both empty": check: idsChannel.len == 0 - localWants.len == 0 remoteNeeds.len == 0 let res = await client.storeSynchronization(some(serverPeerInfo)) @@ -69,7 +68,6 @@ suite "Waku Sync: reconciliation": check: idsChannel.len == 0 - localWants.len == 0 remoteNeeds.len == 0 asyncTest "sync 2 nodes empty client full server": @@ -141,8 +139,6 @@ suite "Waku Sync: reconciliation": check: remoteNeeds.contains((serverPeerInfo.peerId, hash3)) == false remoteNeeds.contains((clientPeerInfo.peerId, hash2)) == false - localWants.contains((clientPeerInfo.peerId, hash3)) == false - localWants.contains((serverPeerInfo.peerId, hash2)) == false var syncRes = await client.storeSynchronization(some(serverPeerInfo)) assert syncRes.isOk(), $syncRes.error @@ -150,8 +146,6 @@ suite "Waku Sync: reconciliation": check: remoteNeeds.contains((serverPeerInfo.peerId, hash3)) == true remoteNeeds.contains((clientPeerInfo.peerId, hash2)) == true - localWants.contains((clientPeerInfo.peerId, hash3)) == true - localWants.contains((serverPeerInfo.peerId, hash2)) == true asyncTest "sync 2 nodes different shards": let @@ -170,8 +164,6 @@ suite "Waku Sync: reconciliation": check: remoteNeeds.contains((serverPeerInfo.peerId, hash3)) == false remoteNeeds.contains((clientPeerInfo.peerId, hash2)) == false - localWants.contains((clientPeerInfo.peerId, hash3)) == false - localWants.contains((serverPeerInfo.peerId, hash2)) == false server = await newTestWakuRecon( serverSwitch, idsChannel, localWants, remoteNeeds, shards = @[0.uint16, 1, 2, 3] @@ -185,7 +177,6 @@ suite "Waku Sync: reconciliation": check: remoteNeeds.len == 0 - localWants.len == 0 asyncTest "sync 2 nodes same hashes": let @@ -200,14 +191,12 @@ suite "Waku Sync: reconciliation": client.messageIngress(hash2, msg2) check: - localWants.len == 0 remoteNeeds.len == 0 let res = await client.storeSynchronization(some(serverPeerInfo)) assert res.isOk(), $res.error check: - localWants.len == 0 remoteNeeds.len == 0 asyncTest "sync 2 nodes 100K msgs 1 diff": @@ -236,14 +225,12 @@ suite "Waku Sync: reconciliation": timestamp += Timestamp(part) check: - localWants.contains((serverPeerInfo.peerId, WakuMessageHash(diff))) == false remoteNeeds.contains((clientPeerInfo.peerId, WakuMessageHash(diff))) == false let res = await client.storeSynchronization(some(serverPeerInfo)) assert res.isOk(), $res.error check: - localWants.contains((serverPeerInfo.peerId, WakuMessageHash(diff))) == true remoteNeeds.contains((clientPeerInfo.peerId, WakuMessageHash(diff))) == true asyncTest "sync 2 nodes 10K msgs 1K diffs": @@ -286,7 +273,6 @@ suite "Waku Sync: reconciliation": continue check: - localWants.len == 0 remoteNeeds.len == 0 let res = await client.storeSynchronization(some(serverPeerInfo)) @@ -294,7 +280,6 @@ suite "Waku Sync: reconciliation": # timimg issue make it hard to match exact numbers check: - localWants.len > 900 remoteNeeds.len > 900 suite "Waku Sync: transfer": @@ -310,10 +295,10 @@ suite "Waku Sync: transfer": var serverIds {.threadvar.}: AsyncQueue[SyncID] - serverLocalWants {.threadvar.}: AsyncQueue[(PeerId, WakuMessageHash)] + serverLocalWants {.threadvar.}: AsyncQueue[PeerId] serverRemoteNeeds {.threadvar.}: AsyncQueue[(PeerId, WakuMessageHash)] clientIds {.threadvar.}: AsyncQueue[SyncID] - clientLocalWants {.threadvar.}: AsyncQueue[(PeerId, WakuMessageHash)] + clientLocalWants {.threadvar.}: AsyncQueue[PeerId] clientRemoteNeeds {.threadvar.}: AsyncQueue[(PeerId, WakuMessageHash)] var @@ -341,7 +326,7 @@ suite "Waku Sync: transfer": clientPeerManager = PeerManager.new(clientSwitch) serverIds = newAsyncQueue[SyncID]() - serverLocalWants = newAsyncQueue[(PeerId, WakuMessageHash)]() + serverLocalWants = newAsyncQueue[PeerId]() serverRemoteNeeds = newAsyncQueue[(PeerId, WakuMessageHash)]() server = SyncTransfer.new( @@ -353,7 +338,7 @@ suite "Waku Sync: transfer": ) clientIds = newAsyncQueue[SyncID]() - clientLocalWants = newAsyncQueue[(PeerId, WakuMessageHash)]() + clientLocalWants = newAsyncQueue[PeerId]() clientRemoteNeeds = newAsyncQueue[(PeerId, WakuMessageHash)]() client = SyncTransfer.new( @@ -389,8 +374,8 @@ suite "Waku Sync: transfer": serverDriver = serverDriver.put(DefaultPubsubTopic, msgs) - # add server info and msg hash to client want channel - let want = (serverPeerInfo.peerId, hash) + # add server info to client want channel + let want = serverPeerInfo.peerId await clientLocalWants.put(want) # add client info and msg hash to server need channel diff --git a/waku/node/waku_node.nim b/waku/node/waku_node.nim index a544bdc80..ce86c3c57 100644 --- a/waku/node/waku_node.nim +++ b/waku/node/waku_node.nim @@ -212,7 +212,7 @@ proc mountStoreSync*( storeSyncRelayJitter = 20, ): Future[Result[void, string]] {.async.} = let idsChannel = newAsyncQueue[SyncID](0) - let wantsChannel = newAsyncQueue[(PeerId, WakuMessageHash)](0) + let wantsChannel = newAsyncQueue[PeerId](0) let needsChannel = newAsyncQueue[(PeerId, WakuMessageHash)](0) var cluster: uint16 diff --git a/waku/waku_store_sync/reconciliation.nim b/waku/waku_store_sync/reconciliation.nim index c08a9e434..d9912a3df 100644 --- a/waku/waku_store_sync/reconciliation.nim +++ b/waku/waku_store_sync/reconciliation.nim @@ -46,13 +46,10 @@ type SyncReconciliation* = ref object of LPProtocol storage: SyncStorage - # Receive IDs from transfer protocol for storage + # AsyncQueues are used as communication channels between + # reconciliation and transfer protocols. idsRx: AsyncQueue[SyncID] - - # Send Hashes to transfer protocol for reception - localWantsTx: AsyncQueue[(PeerId, WakuMessageHash)] - - # Send Hashes to transfer protocol for transmission + localWantsTx: AsyncQueue[PeerId] remoteNeedsTx: AsyncQueue[(PeerId, WakuMessageHash)] # params @@ -100,6 +97,9 @@ proc processRequest( roundTrips = 0 diffs = 0 + # Signal to transfer protocol that this reconciliation is starting + await self.localWantsTx.addLast(conn.peerId) + while true: let readRes = catch: await conn.readLp(int.high) @@ -143,7 +143,6 @@ proc processRequest( diffs.inc() for hash in hashToRecv: - self.localWantsTx.addLastNoWait((conn.peerId, hash)) diffs.inc() rawPayload = sendPayload.deltaEncode() @@ -168,6 +167,9 @@ proc processRequest( continue + # Signal to transfer protocol that this reconciliation is done + await self.localWantsTx.addLast(conn.peerId) + reconciliation_roundtrips.observe(roundTrips) reconciliation_differences.observe(diffs) @@ -296,7 +298,7 @@ proc new*( syncInterval: timer.Duration = DefaultSyncInterval, relayJitter: timer.Duration = DefaultGossipSubJitter, idsRx: AsyncQueue[SyncID], - localWantsTx: AsyncQueue[(PeerId, WakuMessageHash)], + localWantsTx: AsyncQueue[PeerId], remoteNeedsTx: AsyncQueue[(PeerId, WakuMessageHash)], ): Future[Result[T, string]] {.async.} = let res = await initFillStorage(syncRange, wakuArchive) diff --git a/waku/waku_store_sync/transfer.nim b/waku/waku_store_sync/transfer.nim index 5a52cac9c..c1e5d3e37 100644 --- a/waku/waku_store_sync/transfer.nim +++ b/waku/waku_store_sync/transfer.nim @@ -37,9 +37,9 @@ type SyncTransfer* = ref object of LPProtocol idsTx: AsyncQueue[SyncID] # Receive Hashes from reconciliation protocol for reception - localWantsRx: AsyncQueue[(PeerId, WakuMessageHash)] + localWantsRx: AsyncQueue[PeerId] localWantsRxFut: Future[void] - inSessions: Table[PeerId, HashSet[WakuMessageHash]] + inSessions: HashSet[PeerId] # Receive Hashes from reconciliation protocol for transmission remoteNeedsRx: AsyncQueue[(PeerId, WakuMessageHash)] @@ -78,19 +78,14 @@ proc openConnection( return ok(conn) proc wantsReceiverLoop(self: SyncTransfer) {.async.} = - ## Waits for message hashes, - ## store the peers and hashes locally as - ## "supposed to be received" + ## Waits for peer ids of nodes + ## we are reconciliating with while true: # infinite loop - let (peerId, fingerprint) = await self.localWantsRx.popFirst() + let peerId = await self.localWantsRx.popFirst() - self.inSessions.withValue(peerId, value): - value[].incl(fingerprint) - do: - var hashes = initHashSet[WakuMessageHash]() - hashes.incl(fingerprint) - self.inSessions[peerId] = hashes + if self.inSessions.containsOrIncl(peerId): + self.inSessions.excl(peerId) return @@ -137,6 +132,10 @@ proc needsReceiverLoop(self: SyncTransfer) {.async.} = proc initProtocolHandler(self: SyncTransfer) = let handler = proc(conn: Connection, proto: string) {.async, closure.} = while true: + if not self.inSessions.contains(conn.peerId): + error "unwanted peer, disconnecting", remote = conn.peerId + break + let readRes = catch: await conn.readLp(int64(DefaultMaxWakuMessageSize)) @@ -157,16 +156,6 @@ proc initProtocolHandler(self: SyncTransfer) = let hash = computeMessageHash(pubsub, msg) - self.inSessions.withValue(conn.peerId, value): - if value[].missingOrExcl(hash): - error "unwanted hash received, disconnecting" - self.inSessions.del(conn.peerId) - break - do: - error "unwanted hash received, disconnecting" - self.inSessions.del(conn.peerId) - break - #TODO verify msg RLN proof... (await self.wakuArchive.syncMessageIngress(hash, pubsub, msg)).isOkOr: @@ -193,7 +182,7 @@ proc new*( peerManager: PeerManager, wakuArchive: WakuArchive, idsTx: AsyncQueue[SyncID], - localWantsRx: AsyncQueue[(PeerId, WakuMessageHash)], + localWantsRx: AsyncQueue[PeerId], remoteNeedsRx: AsyncQueue[(PeerId, WakuMessageHash)], ): T = var transfer = SyncTransfer( From fc4ca7798cc8397ad5787e6d8cabcc1404a4fe46 Mon Sep 17 00:00:00 2001 From: NagyZoltanPeter <113987313+NagyZoltanPeter@users.noreply.github.com> Date: Fri, 25 Apr 2025 14:15:39 +0200 Subject: [PATCH 33/48] Added docker-quick-image / docker-quick-liteprotocoltester targets to build runable docker image from the locally build wakunode2 or liteprotocoltester - this speeds up build-test rounds (#3394) --- Makefile | 25 ++++++++++++++++++++++--- ci/Jenkinsfile.release | 2 +- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 5eb893442..5da2d6076 100644 --- a/Makefile +++ b/Makefile @@ -40,8 +40,8 @@ ifeq ($(detected_OS),Windows) NIM_PARAMS += --passL:"-L$(MINGW_PATH)/lib" NIM_PARAMS += --passL:"-Lvendor/nim-nat-traversal/vendor/miniupnp/miniupnpc" NIM_PARAMS += --passL:"-Lvendor/nim-nat-traversal/vendor/libnatpmp-upstream" - - LIBS = -static -lws2_32 -lbcrypt -liphlpapi -luserenv -lntdll -lminiupnpc -lnatpmp -lpq + + LIBS = -static -lws2_32 -lbcrypt -liphlpapi -luserenv -lntdll -lminiupnpc -lnatpmp -lpq NIM_PARAMS += $(foreach lib,$(LIBS),--passL:"$(lib)") endif @@ -83,7 +83,7 @@ HEAPTRACKER_INJECT ?= 0 ifeq ($(HEAPTRACKER), 1) # Needed to make nimbus-build-system use the Nim's 'heaptrack_support' branch DOCKER_NIM_COMMIT := NIM_COMMIT=heaptrack_support -TARGET := prod-with-heaptrack +TARGET := heaptrack-build ifeq ($(HEAPTRACKER_INJECT), 1) # the Nim compiler will load 'libheaptrack_inject.so' @@ -340,6 +340,17 @@ docker-image: --target $(TARGET) \ --tag $(DOCKER_IMAGE_NAME) . +docker-quick-image: MAKE_TARGET ?= wakunode2 +docker-quick-image: DOCKER_IMAGE_TAG ?= $(MAKE_TARGET)-$(GIT_VERSION) +docker-quick-image: DOCKER_IMAGE_NAME ?= wakuorg/nwaku:$(DOCKER_IMAGE_TAG) +docker-quick-image: NIM_PARAMS := $(NIM_PARAMS) -d:chronicles_colors:none -d:insecure -d:postgres --passL:$(LIBRLN_FILE) --passL:-lm +docker-quick-image: | build deps librln wakunode2 + docker build \ + --build-arg="MAKE_TARGET=$(MAKE_TARGET)" \ + --tag $(DOCKER_IMAGE_NAME) \ + --file docker/binaries/Dockerfile.bn.amd64 \ + . + docker-push: docker push $(DOCKER_IMAGE_NAME) @@ -367,6 +378,14 @@ docker-liteprotocoltester: --file apps/liteprotocoltester/Dockerfile.liteprotocoltester.compile \ . +docker-quick-liteprotocoltester: DOCKER_LPT_TAG ?= latest +docker-quick-liteprotocoltester: DOCKER_LPT_NAME ?= wakuorg/liteprotocoltester:$(DOCKER_LPT_TAG) +docker-quick-liteprotocoltester: | liteprotocoltester + docker build \ + --tag $(DOCKER_LPT_NAME) \ + --file apps/liteprotocoltester/Dockerfile.liteprotocoltester \ + . + docker-liteprotocoltester-push: docker push $(DOCKER_LPT_NAME) diff --git a/ci/Jenkinsfile.release b/ci/Jenkinsfile.release index fcc353be8..1a2125402 100644 --- a/ci/Jenkinsfile.release +++ b/ci/Jenkinsfile.release @@ -78,7 +78,7 @@ pipeline { "--build-arg=NIMFLAGS='${params.NIMFLAGS} -d:postgres ' " + "--build-arg=LOG_LEVEL='${params.LOWEST_LOG_LEVEL_ALLOWED}' " + "--build-arg=DEBUG='${params.DEBUG ? "1" : "0"} ' " + - "--target=${params.HEAPTRACK ? "prod-with-heaptrack" : "prod"} ." + "--target=${params.HEAPTRACK ? "heaptrack-build" : "prod"} ." ) } } } From 2d6e5ef9ada5e12e29a4ea14cb008654c8a561e4 Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Fri, 25 Apr 2025 14:52:37 +0200 Subject: [PATCH 34/48] chore: rln_relay simplify code a little (#3392) --- tests/waku_rln_relay/test_waku_rln_relay.nim | 12 ++++++------ waku/waku_rln_relay/rln_relay.nim | 14 +++++--------- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/tests/waku_rln_relay/test_waku_rln_relay.nim b/tests/waku_rln_relay/test_waku_rln_relay.nim index bc1c3f640..95ec7b4c7 100644 --- a/tests/waku_rln_relay/test_waku_rln_relay.nim +++ b/tests/waku_rln_relay/test_waku_rln_relay.nim @@ -722,13 +722,13 @@ suite "Waku rln relay": # validate messages # validateMessage proc checks the validity of the message fields and adds it to the log (if valid) let - msgValidate1 = wakuRlnRelay.validateMessageAndUpdateLog(wm1, some(time)) + msgValidate1 = wakuRlnRelay.validateMessageAndUpdateLog(wm1) # wm2 is published within the same Epoch as wm1 and should be found as spam - msgValidate2 = wakuRlnRelay.validateMessageAndUpdateLog(wm2, some(time)) + msgValidate2 = wakuRlnRelay.validateMessageAndUpdateLog(wm2) # a valid message should be validated successfully - msgValidate3 = wakuRlnRelay.validateMessageAndUpdateLog(wm3, some(time)) + msgValidate3 = wakuRlnRelay.validateMessageAndUpdateLog(wm3) # wm4 has no rln proof and should not be validated - msgValidate4 = wakuRlnRelay.validateMessageAndUpdateLog(wm4, some(time)) + msgValidate4 = wakuRlnRelay.validateMessageAndUpdateLog(wm4) check: msgValidate1 == MessageValidationResult.Valid @@ -778,9 +778,9 @@ suite "Waku rln relay": # validate messages # validateMessage proc checks the validity of the message fields and adds it to the log (if valid) let - msgValidate1 = wakuRlnRelay1.validateMessageAndUpdateLog(wm1, some(time)) + msgValidate1 = wakuRlnRelay1.validateMessageAndUpdateLog(wm1) # since this message is from a different sender, it should be validated successfully - msgValidate2 = wakuRlnRelay1.validateMessageAndUpdateLog(wm2, some(time)) + msgValidate2 = wakuRlnRelay1.validateMessageAndUpdateLog(wm2) check: msgValidate1 == MessageValidationResult.Valid diff --git a/waku/waku_rln_relay/rln_relay.nim b/waku/waku_rln_relay/rln_relay.nim index c3f3903f9..b48d6894e 100644 --- a/waku/waku_rln_relay/rln_relay.nim +++ b/waku/waku_rln_relay/rln_relay.nim @@ -184,7 +184,7 @@ proc absDiff*(e1, e2: Epoch): uint64 = return epoch2 - epoch1 proc validateMessage*( - rlnPeer: WakuRLNRelay, msg: WakuMessage, timeOption = none(float64) + rlnPeer: WakuRLNRelay, msg: WakuMessage ): MessageValidationResult = ## validate the supplied `msg` based on the waku-rln-relay routing protocol i.e., ## the `msg`'s epoch is within MaxEpochGap of the current epoch @@ -204,12 +204,8 @@ proc validateMessage*( # checks if the `msg`'s epoch is far from the current epoch # it corresponds to the validation of rln external nullifier - var epoch: Epoch - if timeOption.isSome(): - epoch = rlnPeer.calcEpoch(timeOption.get()) - else: - # get current rln epoch - epoch = rlnPeer.getCurrentEpoch() + # get current rln epoch + let epoch: Epoch = rlnPeer.getCurrentEpoch() let msgEpoch = proof.epoch @@ -273,12 +269,12 @@ proc validateMessage*( return MessageValidationResult.Valid proc validateMessageAndUpdateLog*( - rlnPeer: WakuRLNRelay, msg: WakuMessage, timeOption = none(float64) + rlnPeer: WakuRLNRelay, msg: WakuMessage ): MessageValidationResult = ## validates the message and updates the log to prevent double messaging ## in future messages - let isValidMessage = rlnPeer.validateMessage(msg, timeOption) + let isValidMessage = rlnPeer.validateMessage(msg) let decodeRes = RateLimitProof.init(msg.proof) if decodeRes.isErr(): From 98c3979119383a77c4caee650f357407412b8096 Mon Sep 17 00:00:00 2001 From: NagyZoltanPeter <113987313+NagyZoltanPeter@users.noreply.github.com> Date: Fri, 25 Apr 2025 15:36:41 +0200 Subject: [PATCH 35/48] chore: return all peers from rest admin (#3395) * Updated version of getting peers by /admin endpoints --- tests/wakunode_rest/test_rest_admin.nim | 10 +- waku/waku_api/rest/admin/client.nim | 14 ++- waku/waku_api/rest/admin/handlers.nim | 154 ++++++++++++------------ 3 files changed, 91 insertions(+), 87 deletions(-) diff --git a/tests/wakunode_rest/test_rest_admin.nim b/tests/wakunode_rest/test_rest_admin.nim index bdab61a75..a3546f1f8 100644 --- a/tests/wakunode_rest/test_rest_admin.nim +++ b/tests/wakunode_rest/test_rest_admin.nim @@ -120,7 +120,9 @@ suite "Waku v2 Rest API - Admin": check: getRes.status == 200 $getRes.contentType == $MIMETYPE_JSON - getRes.data.len() == 0 + getRes.data.len() == 1 + getRes.data[0].multiaddr == nonExistentPeer + getRes.data[0].connected == CannotConnect asyncTest "Get filter data": await allFutures( @@ -274,7 +276,7 @@ suite "Waku v2 Rest API - Admin": check: postRes.status == 200 - let getRes = await client.getConnectedRelayPeers() + let getRes = await client.getRelayPeers() check: getRes.status == 200 @@ -286,13 +288,13 @@ suite "Waku v2 Rest API - Admin": # Check peer 3 # Todo: investigate why the test setup missing remote peer's shard info - # let getRes2 = await client.getConnectedRelayPeersByShard(0) + # let getRes2 = await client.getRelayPeersByShard(0) # check: # getRes2.status == 200 # $getRes2.contentType == $MIMETYPE_JSON # getRes2.data.peers.len() == 2 - let getRes3 = await client.getConnectedRelayPeersByShard(99) + let getRes3 = await client.getRelayPeersByShard(99) check: getRes3.status == 200 $getRes3.contentType == $MIMETYPE_JSON diff --git a/waku/waku_api/rest/admin/client.nim b/waku/waku_api/rest/admin/client.nim index 4b46ca136..7d45544e2 100644 --- a/waku/waku_api/rest/admin/client.nim +++ b/waku/waku_api/rest/admin/client.nim @@ -28,6 +28,10 @@ proc getPeerById*( rest, endpoint: "/admin/v1/peer/{peerId}", meth: HttpMethod.MethodGet .} +proc getServicePeers*(): RestResponse[seq[WakuPeer]] {. + rest, endpoint: "/admin/v1/peers/service", meth: HttpMethod.MethodGet +.} + proc getConnectedPeers*(): RestResponse[seq[WakuPeer]] {. rest, endpoint: "/admin/v1/peers/connected", meth: HttpMethod.MethodGet .} @@ -38,16 +42,14 @@ proc getConnectedPeersByShard*( rest, endpoint: "/admin/v1/peers/connected/on/{shardId}", meth: HttpMethod.MethodGet .} -proc getConnectedRelayPeers*(): RestResponse[PeersOfShards] {. - rest, endpoint: "/admin/v1/peers/connected/relay", meth: HttpMethod.MethodGet +proc getRelayPeers*(): RestResponse[PeersOfShards] {. + rest, endpoint: "/admin/v1/peers/relay", meth: HttpMethod.MethodGet .} -proc getConnectedRelayPeersByShard*( +proc getRelayPeersByShard*( shardId: uint16 ): RestResponse[PeersOfShard] {. - rest, - endpoint: "/admin/v1/peers/connected/relay/on/{shardId}", - meth: HttpMethod.MethodGet + rest, endpoint: "/admin/v1/peers/relay/on/{shardId}", meth: HttpMethod.MethodGet .} proc getMeshPeers*(): RestResponse[PeersOfShards] {. diff --git a/waku/waku_api/rest/admin/handlers.nim b/waku/waku_api/rest/admin/handlers.nim index ada60e870..9cf6ec131 100644 --- a/waku/waku_api/rest/admin/handlers.nim +++ b/waku/waku_api/rest/admin/handlers.nim @@ -34,12 +34,13 @@ logScope: const ROUTE_ADMIN_V1_PEERS* = "/admin/v1/peers" # returns all peers const ROUTE_ADMIN_V1_SINGLE_PEER* = "/admin/v1/peer/{peerId}" +const ROUTE_ADMIN_V1_SERVICE_PEERS* = "/admin/v1/peers/service" # returns all peers + const ROUTE_ADMIN_V1_CONNECTED_PEERS* = "/admin/v1/peers/connected" const ROUTE_ADMIN_V1_CONNECTED_PEERS_ON_SHARD* = "/admin/v1/peers/connected/on/{shardId}" -const ROUTE_ADMIN_V1_CONNECTED_RELAY_PEERS* = "/admin/v1/peers/connected/relay" -const ROUTE_ADMIN_V1_CONNECTED_RELAY_PEERS_ON_SHARD* = - "/admin/v1/peers/connected/relay/on/{shardId}" +const ROUTE_ADMIN_V1_RELAY_PEERS* = "/admin/v1/peers/relay" +const ROUTE_ADMIN_V1_RELAY_PEERS_ON_SHARD* = "/admin/v1/peers/relay/on/{shardId}" const ROUTE_ADMIN_V1_MESH_PEERS* = "/admin/v1/peers/mesh" const ROUTE_ADMIN_V1_MESH_PEERS_ON_SHARD* = "/admin/v1/peers/mesh/on/{shardId}" @@ -62,46 +63,48 @@ proc tuplesToWakuPeers(peers: var WakuPeers, peersTup: seq[PeerProtocolTuple]) = peer.origin, ) -proc populateAdminPeerInfo(peers: var WakuPeers, node: WakuNode, codec: string) = - let peersForCodec = node.peerManager.switch.peerStore.peers(codec).mapIt( - ( - multiaddr: constructMultiaddrStr(it), - protocol: codec, - shards: it.getShards(), - connected: it.connectedness, - agent: it.agent, - origin: it.origin, +proc populateAdminPeerInfo( + peers: var WakuPeers, node: WakuNode, codec: Option[string] = none[string]() +) = + if codec.isNone(): + peers = node.peerManager.switch.peerStore.peers().mapIt(WakuPeer.init(it)) + else: + let peersTuples = node.peerManager.switch.peerStore.peers(codec.get()).mapIt( + ( + multiaddr: constructMultiaddrStr(it), + protocol: codec.get(), + shards: it.getShards(), + connected: it.connectedness, + agent: it.agent, + origin: it.origin, + ) ) - ) - tuplesToWakuPeers(peers, peersForCodec) + tuplesToWakuPeers(peers, peersTuples) + +proc populateAdminPeerInfoForAll(node: WakuNode): WakuPeers = + var peers: WakuPeers = @[] + populateAdminPeerInfo(peers, node) + return peers proc populateAdminPeerInfoForCodecs(node: WakuNode, codecs: seq[string]): WakuPeers = var peers: WakuPeers = @[] for codec in codecs: - populateAdminPeerInfo(peers, node, codec) + populateAdminPeerInfo(peers, node, some(codec)) return peers proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) = router.api(MethodGet, ROUTE_ADMIN_V1_PEERS) do() -> RestApiResponse: - let peers = populateAdminPeerInfoForCodecs( - node, - @[ - WakuRelayCodec, WakuFilterSubscribeCodec, WakuStoreCodec, WakuLegacyStoreCodec, - WakuLegacyLightPushCodec, WakuLightPushCodec, WakuPeerExchangeCodec, - WakuReconciliationCodec, - ], - ) + let peers = populateAdminPeerInfoForAll(node) - let resp = RestApiResponse.jsonResponse(peers, status = Http200) - if resp.isErr(): - error "An error occurred while building the json response: ", error = resp.error + let resp = RestApiResponse.jsonResponse(peers, status = Http200).valueOr: + error "An error occurred while building the json response: ", error = error return RestApiResponse.internalServerError( - fmt("An error occurred while building the json response: {resp.error}") + fmt("An error occurred while building the json response: {error}") ) - return resp.get() + return resp router.api(MethodGet, ROUTE_ADMIN_V1_SINGLE_PEER) do( peerId: string @@ -115,19 +118,18 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) = if node.peerManager.switch.peerStore.peerExists(peerIdVal): let peerInfo = node.peerManager.switch.peerStore.getPeer(peerIdVal) let peer = WakuPeer.init(peerInfo) - let resp = RestApiResponse.jsonResponse(peer, status = Http200) - if resp.isErr(): - error "An error occurred while building the json response: ", error = resp.error + let resp = RestApiResponse.jsonResponse(peer, status = Http200).valueOr: + error "An error occurred while building the json response: ", error = error return RestApiResponse.internalServerError( - fmt("An error occurred while building the json response: {resp.error}") + fmt("An error occurred while building the json response: {error}") ) - return resp.get() + return resp else: return RestApiResponse.notFound(fmt("Peer with ID {peerId} not found")) - router.api(MethodGet, ROUTE_ADMIN_V1_CONNECTED_PEERS) do() -> RestApiResponse: - let allPeers = populateAdminPeerInfoForCodecs( + router.api(MethodGet, ROUTE_ADMIN_V1_SERVICE_PEERS) do() -> RestApiResponse: + let peers = populateAdminPeerInfoForCodecs( node, @[ WakuRelayCodec, WakuFilterSubscribeCodec, WakuStoreCodec, WakuLegacyStoreCodec, @@ -136,16 +138,26 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) = ], ) - let connectedPeers = allPeers.filterIt(it.connected == Connectedness.Connected) - - let resp = RestApiResponse.jsonResponse(connectedPeers, status = Http200) - if resp.isErr(): - error "An error occurred while building the json response: ", error = resp.error + let resp = RestApiResponse.jsonResponse(peers, status = Http200).valueOr: + error "An error occurred while building the json response: ", error = error return RestApiResponse.internalServerError( - fmt("An error occurred while building the json response: {resp.error}") + fmt("An error occurred while building the json response: {error}") ) - return resp.get() + return resp + + router.api(MethodGet, ROUTE_ADMIN_V1_CONNECTED_PEERS) do() -> RestApiResponse: + let allPeers = populateAdminPeerInfoForAll(node) + + let connectedPeers = allPeers.filterIt(it.connected == Connectedness.Connected) + + let resp = RestApiResponse.jsonResponse(connectedPeers, status = Http200).valueOr: + error "An error occurred while building the json response: ", error = error + return RestApiResponse.internalServerError( + fmt("An error occurred while building the json response: {error}") + ) + + return resp router.api(MethodGet, ROUTE_ADMIN_V1_CONNECTED_PEERS_ON_SHARD) do( shardId: uint16 @@ -153,29 +165,21 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) = let shard = shardId.valueOr: return RestApiResponse.badRequest(fmt("Invalid shardId: {error}")) - let allPeers = populateAdminPeerInfoForCodecs( - node, - @[ - WakuRelayCodec, WakuFilterSubscribeCodec, WakuStoreCodec, WakuLegacyStoreCodec, - WakuLegacyLightPushCodec, WakuLightPushCodec, WakuPeerExchangeCodec, - WakuReconciliationCodec, - ], - ) + let allPeers = populateAdminPeerInfoForAll(node) let connectedPeers = allPeers.filterIt( it.connected == Connectedness.Connected and it.shards.contains(shard) ) - let resp = RestApiResponse.jsonResponse(connectedPeers, status = Http200) - if resp.isErr(): - error "An error occurred while building the json response: ", error = resp.error + let resp = RestApiResponse.jsonResponse(connectedPeers, status = Http200).valueOr: + error "An error occurred while building the json response: ", error = error return RestApiResponse.internalServerError( - fmt("An error occurred while building the json response: {resp.error}") + fmt("An error occurred while building the json response: {error}") ) - return resp.get() + return resp - router.api(MethodGet, ROUTE_ADMIN_V1_CONNECTED_RELAY_PEERS) do() -> RestApiResponse: + router.api(MethodGet, ROUTE_ADMIN_V1_RELAY_PEERS) do() -> RestApiResponse: if node.wakuRelay.isNil(): return RestApiResponse.serviceUnavailable( "Error: Relay Protocol is not mounted to the node" @@ -195,16 +199,15 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) = ) ) - let resp = RestApiResponse.jsonResponse(relayPeers, status = Http200) - if resp.isErr(): - error "An error occurred while building the json response: ", error = resp.error + let resp = RestApiResponse.jsonResponse(relayPeers, status = Http200).valueOr: + error "An error occurred while building the json response: ", error = error return RestApiResponse.internalServerError( - fmt("An error occurred while building the json response: {resp.error}") + fmt("An error occurred while building the json response: {error}") ) - return resp.get() + return resp - router.api(MethodGet, ROUTE_ADMIN_V1_CONNECTED_RELAY_PEERS_ON_SHARD) do( + router.api(MethodGet, ROUTE_ADMIN_V1_RELAY_PEERS_ON_SHARD) do( shardId: uint16 ) -> RestApiResponse: let shard = shardId.valueOr: @@ -223,14 +226,13 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) = shard: shard, peers: toSeq(pubsubPeers).mapIt(WakuPeer.init(it, node.peerManager)) ) - let resp = RestApiResponse.jsonResponse(relayPeer, status = Http200) - if resp.isErr(): - error "An error occurred while building the json response: ", error = resp.error + let resp = RestApiResponse.jsonResponse(relayPeer, status = Http200).valueOr: + error "An error occurred while building the json response: ", error = error return RestApiResponse.internalServerError( - fmt("An error occurred while building the json response: {resp.error}") + fmt("An error occurred while building the json response: {error}") ) - return resp.get() + return resp router.api(MethodGet, ROUTE_ADMIN_V1_MESH_PEERS) do() -> RestApiResponse: if node.wakuRelay.isNil(): @@ -252,14 +254,13 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) = ) ) - let resp = RestApiResponse.jsonResponse(relayPeers, status = Http200) - if resp.isErr(): - error "An error occurred while building the json response: ", error = resp.error + let resp = RestApiResponse.jsonResponse(relayPeers, status = Http200).valueOr: + error "An error occurred while building the json response: ", error = error return RestApiResponse.internalServerError( - fmt("An error occurred while building the json response: {resp.error}") + fmt("An error occurred while building the json response: {error}") ) - return resp.get() + return resp router.api(MethodGet, ROUTE_ADMIN_V1_MESH_PEERS_ON_SHARD) do( shardId: uint16 @@ -280,14 +281,13 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) = shard: shard, peers: toSeq(peers).mapIt(WakuPeer.init(it, node.peerManager)) ) - let resp = RestApiResponse.jsonResponse(relayPeer, status = Http200) - if resp.isErr(): - error "An error occurred while building the json response: ", error = resp.error + let resp = RestApiResponse.jsonResponse(relayPeer, status = Http200).valueOr: + error "An error occurred while building the json response: ", error = error return RestApiResponse.internalServerError( - fmt("An error occurred while building the json response: {resp.error}") + fmt("An error occurred while building the json response: {error}") ) - return resp.get() + return resp proc installAdminV1PostPeersHandler(router: var RestRouter, node: WakuNode) = router.api(MethodPost, ROUTE_ADMIN_V1_PEERS) do( From 7c7ed5634f108705cc5fb3b427dcbf47860352f0 Mon Sep 17 00:00:00 2001 From: gabrielmer <101006718+gabrielmer@users.noreply.github.com> Date: Fri, 25 Apr 2025 20:23:53 +0300 Subject: [PATCH 36/48] chore: improve disconnection handling (#3385) --- waku/factory/builder.nim | 5 ++- waku/factory/internal_config.nim | 1 + waku/factory/waku.nim | 16 +++++++++ waku/node/config.nim | 3 ++ waku/node/peer_manager/peer_manager.nim | 46 +++++++++++++++++++++++++ 5 files changed, 70 insertions(+), 1 deletion(-) diff --git a/waku/factory/builder.nim b/waku/factory/builder.nim index caa84db63..78b07ed9b 100644 --- a/waku/factory/builder.nim +++ b/waku/factory/builder.nim @@ -196,6 +196,8 @@ proc build*(builder: WakuNodeBuilder): Result[WakuNode, string] = except CatchableError: return err("failed to create switch: " & getCurrentExceptionMsg()) + let netConfig = builder.netConfig.get() + let peerManager = PeerManager.new( switch = switch, storage = builder.peerStorage.get(nil), @@ -203,12 +205,13 @@ proc build*(builder: WakuNodeBuilder): Result[WakuNode, string] = maxServicePeers = some(builder.maxServicePeers), colocationLimit = builder.colocationLimit, shardedPeerManagement = builder.shardAware, + dnsNameServers = netConfig.dnsNameServers, ) var node: WakuNode try: node = WakuNode.new( - netConfig = builder.netConfig.get(), + netConfig = netConfig, enr = builder.record.get(), switch = switch, peerManager = peerManager, diff --git a/waku/factory/internal_config.nim b/waku/factory/internal_config.nim index 08f11f1c5..b5275d00b 100644 --- a/waku/factory/internal_config.nim +++ b/waku/factory/internal_config.nim @@ -155,6 +155,7 @@ proc networkConfiguration*(conf: WakuNodeConf, clientId: string): NetConfigResul dns4DomainName = dns4DomainName, discv5UdpPort = discv5UdpPort, wakuFlags = some(wakuFlags), + dnsNameServers = conf.dnsAddrsNameServers, ) return netConfigRes diff --git a/waku/factory/waku.nim b/waku/factory/waku.nim index 9760d1580..c40db3b54 100644 --- a/waku/factory/waku.nim +++ b/waku/factory/waku.nim @@ -59,6 +59,7 @@ type Waku* = ref object wakuDiscv5*: WakuDiscoveryV5 dynamicBootstrapNodes: seq[RemotePeerInfo] dnsRetryLoopHandle: Future[void] + networkConnLoopHandle: Future[void] discoveryMngr: DiscoveryManager node*: WakuNode @@ -363,6 +364,15 @@ proc startDnsDiscoveryRetryLoop(waku: ptr Waku): Future[void] {.async.} = error "failed to connect to dynamic bootstrap nodes: " & getCurrentExceptionMsg() return +# The network connectivity loop checks periodically whether the node is online or not +# and triggers any change that depends on the network connectivity state +proc startNetworkConnectivityLoop(waku: Waku): Future[void] {.async.} = + while true: + await sleepAsync(15.seconds) + + # Update online state + await waku.node.peerManager.updateOnlineState() + proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} = debug "Retrieve dynamic bootstrap nodes" @@ -400,6 +410,9 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} = if not waku[].deliveryMonitor.isNil(): waku[].deliveryMonitor.startDeliveryMonitor() + # Start network connectivity check loop + waku[].networkConnLoopHandle = waku[].startNetworkConnectivityLoop() + return ok() # Waku shutdown @@ -411,6 +424,9 @@ proc stop*(waku: Waku): Future[void] {.async: (raises: [Exception]).} = if not waku.metricsServer.isNil(): await waku.metricsServer.stop() + if not waku.networkConnLoopHandle.isNil(): + await waku.networkConnLoopHandle.cancelAndWait() + if not waku.wakuDiscv5.isNil(): await waku.wakuDiscv5.stop() diff --git a/waku/node/config.nim b/waku/node/config.nim index 311e26771..51aadb48d 100644 --- a/waku/node/config.nim +++ b/waku/node/config.nim @@ -15,6 +15,7 @@ type NetConfig* = object extIp*: Option[IpAddress] extPort*: Option[Port] dns4DomainName*: Option[string] + dnsNameServers*: seq[IpAddress] announcedAddresses*: seq[MultiAddress] extMultiAddrs*: seq[MultiAddress] enrMultiAddrs*: seq[MultiAddress] @@ -75,6 +76,7 @@ proc init*( discv5UdpPort = none(Port), clusterId: uint16 = 0, wakuFlags = none(CapabilitiesBitfield), + dnsNameServers = newSeq[IpAddress](), ): NetConfigResult = ## Initialize and validate waku node network configuration @@ -165,6 +167,7 @@ proc init*( extPort: extPort, wssEnabled: wssEnabled, dns4DomainName: dns4DomainName, + dnsNameServers: dnsNameServers, announcedAddresses: announcedAddresses, extMultiAddrs: extMultiAddrs, enrMultiaddrs: enrMultiaddrs, diff --git a/waku/node/peer_manager/peer_manager.nim b/waku/node/peer_manager/peer_manager.nim index 602718d5d..75c72449a 100644 --- a/waku/node/peer_manager/peer_manager.nim +++ b/waku/node/peer_manager/peer_manager.nim @@ -8,6 +8,7 @@ import libp2p/multistream, libp2p/muxers/muxer, libp2p/nameresolving/nameresolver, + libp2p/nameresolving/dnsresolver, libp2p/peerstore import @@ -73,6 +74,8 @@ const # Max peers that we allow from the same IP DefaultColocationLimit* = 5 + DNSCheckDomain = "one.one.one.one" + type ConnectionChangeHandler* = proc( peerId: PeerId, peerEvent: PeerEventKind ): Future[void] {.gcsafe, raises: [Defect].} @@ -95,11 +98,16 @@ type PeerManager* = ref object of RootObj started: bool shardedPeerManagement: bool # temp feature flag onConnectionChange*: ConnectionChangeHandler + dnsNameServers*: seq[IpAddress] + online: bool #~~~~~~~~~~~~~~~~~~~# # Helper Functions # #~~~~~~~~~~~~~~~~~~~# +template isOnline*(self: PeerManager): bool = + self.online + proc calculateBackoff( initialBackoffInSec: int, backoffFactor: int, failedAttempts: int ): timer.Duration = @@ -535,7 +543,38 @@ proc getStreamByPeerIdAndProtocol*( return ok(streamRes.get()) +proc checkInternetConnectivity( + nameServerIps: seq[IpAddress], timeout = 2.seconds +): Future[bool] {.async.} = + var nameServers: seq[TransportAddress] + for ip in nameServerIps: + nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53 + + let dnsResolver = DnsResolver.new(nameServers) + + # Resolve domain IP + let resolved = await dnsResolver.resolveIp(DNSCheckDomain, 0.Port, Domain.AF_UNSPEC) + + if resolved.len > 0: + return true + else: + return false + +proc updateOnlineState*(pm: PeerManager) {.async.} = + let numConnectedPeers = + pm.switch.peerStore.peers().countIt(it.connectedness == Connected) + + if numConnectedPeers > 0: + pm.online = true + else: + pm.online = await checkInternetConnectivity(pm.dnsNameServers) + proc connectToRelayPeers*(pm: PeerManager) {.async.} = + # only attempt if current node is online + if not pm.isOnline(): + error "connectToRelayPeers: won't attempt new connections - node is offline" + return + var (inRelayPeers, outRelayPeers) = pm.connectedPeers(WakuRelayCodec) let totalRelayPeers = inRelayPeers.len + outRelayPeers.len @@ -778,6 +817,10 @@ proc manageRelayPeers*(pm: PeerManager) {.async.} = if pm.wakuMetadata.shards.len == 0: return + if not pm.isOnline(): + error "manageRelayPeers: won't attempt new connections - node is offline" + return + var peersToConnect: HashSet[PeerId] # Can't use RemotePeerInfo as they are ref objects var peersToDisconnect: int @@ -1005,6 +1048,7 @@ proc new*( maxFailedAttempts = MaxFailedAttempts, colocationLimit = DefaultColocationLimit, shardedPeerManagement = false, + dnsNameServers = newSeq[IpAddress](), ): PeerManager {.gcsafe.} = let capacity = switch.peerStore.capacity let maxConnections = switch.connManager.inSema.size @@ -1055,6 +1099,8 @@ proc new*( maxFailedAttempts: maxFailedAttempts, colocationLimit: colocationLimit, shardedPeerManagement: shardedPeerManagement, + dnsNameServers: dnsNameServers, + online: true, ) proc peerHook(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe.} = From 6bc05efc02ab8984643032b90e44c032075d619f Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Mon, 5 May 2025 22:57:20 +0200 Subject: [PATCH 37/48] chore: Avoid double relay subscription (#3396) * make sure subscribe once to every topic in waku_node * start suggest use of removeValidator in waku_relay/protocol. Commented until libp2p updated. --- apps/chat2/chat2.nim | 8 +- apps/chat2bridge/chat2bridge.nim | 9 +- apps/networkmonitor/networkmonitor.nim | 9 +- examples/publisher.nim | 5 +- examples/subscriber.nim | 8 +- .../stealth_commitment_protocol.nim | 4 +- .../requests/protocols/relay_request.nim | 17 +- tests/node/peer_manager/test_peer_manager.nim | 18 ++- tests/node/test_wakunode_filter.nim | 9 +- tests/node/test_wakunode_legacy_lightpush.nim | 22 ++- tests/node/test_wakunode_lightpush.nim | 20 ++- tests/node/test_wakunode_peer_manager.nim | 36 +++-- tests/node/test_wakunode_relay_rln.nim | 8 +- tests/test_peer_manager.nim | 27 ++-- tests/test_relay_peer_exchange.nim | 15 +- tests/test_waku_dnsdisc.nim | 12 +- tests/test_waku_keepalive.nim | 6 +- tests/test_wakunode.nim | 39 +++-- tests/waku_relay/test_wakunode_relay.nim | 145 +++++++++++++----- tests/waku_relay/utils.nim | 15 +- .../test_wakunode_rln_relay.nim | 89 ++++++++--- tests/waku_rln_relay/utils_static.nim | 6 +- tests/wakunode_rest/test_rest_cors.nim | 12 +- tests/wakunode_rest/test_rest_debug.nim | 6 +- tests/wakunode_rest/test_rest_filter.nim | 13 +- tests/wakunode_rest/test_rest_health.nim | 3 +- tests/wakunode_rest/test_rest_lightpush.nim | 23 ++- .../test_rest_lightpush_legacy.nim | 23 ++- tests/wakunode_rest/test_rest_relay.nim | 57 +++++-- tests/wakunode_rest/test_rest_store.nim | 18 ++- waku/factory/node_factory.nim | 6 +- waku/node/waku_node.nim | 68 ++++---- waku/waku_api/rest/builder.nim | 22 ++- waku/waku_api/rest/relay/handlers.nim | 22 ++- waku/waku_relay/protocol.nim | 47 ++++-- 35 files changed, 596 insertions(+), 251 deletions(-) diff --git a/apps/chat2/chat2.nim b/apps/chat2/chat2.nim index 3723291e3..b28724357 100644 --- a/apps/chat2/chat2.nim +++ b/apps/chat2/chat2.nim @@ -381,7 +381,9 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = if conf.relay: let shards = conf.shards.mapIt(RelayShard(clusterId: conf.clusterId, shardId: uint16(it))) - await node.mountRelay(shards) + (await node.mountRelay(shards)).isOkOr: + echo "failed to mount relay: " & error + return await node.mountLibp2pPing() @@ -535,7 +537,9 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = node.subscribe( (kind: PubsubSub, topic: DefaultPubsubTopic), some(WakuRelayHandler(handler)) - ) + ).isOkOr: + error "failed to subscribe to pubsub topic", + topic = DefaultPubsubTopic, error = error if conf.rlnRelay: info "WakuRLNRelay is enabled" diff --git a/apps/chat2bridge/chat2bridge.nim b/apps/chat2bridge/chat2bridge.nim index 96782360b..7a7a5d08f 100644 --- a/apps/chat2bridge/chat2bridge.nim +++ b/apps/chat2bridge/chat2bridge.nim @@ -215,7 +215,10 @@ proc start*(cmb: Chat2MatterBridge) {.async.} = # Always mount relay for bridge # `triggerSelf` is false on a `bridge` to avoid duplicates - await cmb.nodev2.mountRelay() + (await cmb.nodev2.mountRelay()).isOkOr: + error "failed to mount relay", error = error + return + cmb.nodev2.wakuRelay.triggerSelf = false # Bridging @@ -229,7 +232,9 @@ proc start*(cmb: Chat2MatterBridge) {.async.} = except: error "exception in relayHandler: " & getCurrentExceptionMsg() - cmb.nodev2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)) + cmb.nodev2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)).isOkOr: + error "failed to subscribe to relay", topic = DefaultPubsubTopic, error = error + return proc stop*(cmb: Chat2MatterBridge) {.async: (raises: [Exception]).} = info "Stopping Chat2MatterBridge" diff --git a/apps/networkmonitor/networkmonitor.nim b/apps/networkmonitor/networkmonitor.nim index 2861c85ae..d5d7e6bcf 100644 --- a/apps/networkmonitor/networkmonitor.nim +++ b/apps/networkmonitor/networkmonitor.nim @@ -554,7 +554,9 @@ proc subscribeAndHandleMessages( else: msgPerContentTopic[msg.contentTopic] = 1 - node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(WakuRelayHandler(handler))) + node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(WakuRelayHandler(handler))).isOkOr: + error "failed to subscribe to pubsub topic", pubsubTopic, error + quit(1) when isMainModule: # known issue: confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError @@ -619,7 +621,10 @@ when isMainModule: let (node, discv5) = nodeRes.get() - waitFor node.mountRelay() + (waitFor node.mountRelay()).isOkOr: + error "failed to mount waku relay protocol: ", err = error + quit 1 + waitFor node.mountLibp2pPing() var onFatalErrorAction = proc(msg: string) {.gcsafe, closure.} = diff --git a/examples/publisher.nim b/examples/publisher.nim index 5b1ca9f18..907ce2274 100644 --- a/examples/publisher.nim +++ b/examples/publisher.nim @@ -86,7 +86,10 @@ proc setupAndPublish(rng: ref HmacDrbgContext) {.async.} = ) await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + error "failed to mount relay", error = error + quit(1) + node.peerManager.start() (await wakuDiscv5.start()).isOkOr: diff --git a/examples/subscriber.nim b/examples/subscriber.nim index 90440aabc..633bfa4ca 100644 --- a/examples/subscriber.nim +++ b/examples/subscriber.nim @@ -84,7 +84,9 @@ proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} = ) await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + error "failed to mount relay", error = error + quit(1) node.peerManager.start() (await wakuDiscv5.start()).isOkOr: @@ -118,7 +120,9 @@ proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} = contentTopic = msg.contentTopic, timestamp = msg.timestamp - node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(WakuRelayHandler(handler))) + node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(WakuRelayHandler(handler))).isOkOr: + error "failed to subscribe to pubsub topic", pubsubTopic, error + quit(1) when isMainModule: let rng = crypto.newRng() diff --git a/examples/wakustealthcommitments/stealth_commitment_protocol.nim b/examples/wakustealthcommitments/stealth_commitment_protocol.nim index c6e6d6b9c..7da6bff56 100644 --- a/examples/wakustealthcommitments/stealth_commitment_protocol.nim +++ b/examples/wakustealthcommitments/stealth_commitment_protocol.nim @@ -187,5 +187,7 @@ proc new*( except CatchableError: error "could not handle SCP message: ", err = getCurrentExceptionMsg() - waku.node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(handler)) + waku.node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(handler)).isOkOr: + error "could not subscribe to pubsub topic: ", err = $error + return err("could not subscribe to pubsub topic: " & $error) return ok(SCP) diff --git a/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim b/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim index 97f01488a..6a437122a 100644 --- a/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim +++ b/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim @@ -7,6 +7,7 @@ import ../../../../../waku/waku_core/message, ../../../../../waku/waku_core/time, # Timestamp ../../../../../waku/waku_core/topics/pubsub_topic, + ../../../../../waku/waku_core/topics, ../../../../../waku/waku_relay/protocol, ../../../../../waku/node/peer_manager, ../../../../alloc @@ -108,12 +109,18 @@ proc process*( case self.operation of SUBSCRIBE: - # TO DO: properly perform 'subscribe' - waku.node.registerRelayDefaultHandler($self.pubsubTopic) - discard waku.node.wakuRelay.subscribe($self.pubsubTopic, self.relayEventCallback) + waku.node.subscribe( + (kind: SubscriptionKind.PubsubSub, topic: $self.pubsubTopic), + handler = some(self.relayEventCallback), + ).isOkOr: + let errorMsg = "Subscribe failed:" & $error + error "SUBSCRIBE failed", error = errorMsg + return err(errorMsg) of UNSUBSCRIBE: - # TODO: properly perform 'unsubscribe' - waku.node.wakuRelay.unsubscribeAll($self.pubsubTopic) + waku.node.unsubscribe((kind: SubscriptionKind.PubsubSub, topic: $self.pubsubTopic)).isOkOr: + let errorMsg = "Unsubscribe failed:" & $error + error "UNSUBSCRIBE failed", error = errorMsg + return err(errorMsg) of PUBLISH: let msg = self.message.toWakuMessage() let pubsubTopic = $self.pubsubTopic diff --git a/tests/node/peer_manager/test_peer_manager.nim b/tests/node/peer_manager/test_peer_manager.nim index 57acf13df..6eddda0d6 100644 --- a/tests/node/peer_manager/test_peer_manager.nim +++ b/tests/node/peer_manager/test_peer_manager.nim @@ -76,8 +76,10 @@ suite "Peer Manager": # And both mount metadata and relay discard client.mountMetadata(0) # clusterId irrelevant, overridden by topic discard server.mountMetadata(0) # clusterId irrelevant, overridden by topic - await client.mountRelay() - await server.mountRelay() + (await client.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" # And both nodes are started await allFutures(server.start(), client.start()) @@ -89,7 +91,8 @@ suite "Peer Manager": await sleepAsync(FUTURE_TIMEOUT) # When making an operation that triggers onPeerMetadata - client.subscribe((kind: SubscriptionKind.PubsubSub, topic: "newTopic")) + client.subscribe((kind: SubscriptionKind.PubsubSub, topic: "newTopic")).isOkOr: + assert false, "Failed to subscribe to relay" await sleepAsync(FUTURE_TIMEOUT) check: @@ -109,8 +112,10 @@ suite "Peer Manager": # And both mount metadata and relay discard client.mountMetadata(0) # clusterId irrelevant, overridden by topic discard server.mountMetadata(0) # clusterId irrelevant, overridden by topic - await client.mountRelay() - await server.mountRelay() + (await client.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" # And both nodes are started await allFutures(server.start(), client.start()) @@ -122,7 +127,8 @@ suite "Peer Manager": await sleepAsync(FUTURE_TIMEOUT) # When making an operation that triggers onPeerMetadata - client.subscribe((kind: SubscriptionKind.PubsubSub, topic: "newTopic")) + client.subscribe((kind: SubscriptionKind.PubsubSub, topic: "newTopic")).isOkOr: + assert false, "Failed to subscribe to relay" await sleepAsync(FUTURE_TIMEOUT) check: diff --git a/tests/node/test_wakunode_filter.nim b/tests/node/test_wakunode_filter.nim index 83c486a7e..bf9f2495b 100644 --- a/tests/node/test_wakunode_filter.nim +++ b/tests/node/test_wakunode_filter.nim @@ -135,7 +135,8 @@ suite "Waku Filter - End to End": asyncTest "Client Node can't receive Push from Server Node, via Relay": # Given the server node has Relay enabled - await server.mountRelay() + (await server.mountRelay()).isOkOr: + assert false, "error mounting relay: " & $error # And valid filter subscription let subscribeResponse = await client.filterSubscribe( @@ -159,7 +160,8 @@ suite "Waku Filter - End to End": server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) await server.start() - await server.mountRelay() + (await server.mountRelay()).isOkOr: + assert false, "error mounting relay: " & $error let serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() @@ -222,7 +224,8 @@ suite "Waku Filter - End to End": pushedMsg == msg asyncTest "Filter Client Node can't receive messages after subscribing and restarting, via Relay": - await server.mountRelay() + (await server.mountRelay()).isOkOr: + assert false, "error mounting relay: " & $error # Given a valid filter subscription let subscribeResponse = await client.filterSubscribe( diff --git a/tests/node/test_wakunode_legacy_lightpush.nim b/tests/node/test_wakunode_legacy_lightpush.nim index 4ff9c7f00..e19d29c64 100644 --- a/tests/node/test_wakunode_legacy_lightpush.nim +++ b/tests/node/test_wakunode_legacy_lightpush.nim @@ -52,7 +52,9 @@ suite "Waku Legacy Lightpush - End To End": await allFutures(server.start(), client.start()) await server.start() - await server.mountRelay() + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + await server.mountLegacyLightpush() # without rln-relay client.mountLegacyLightpushClient() @@ -142,7 +144,8 @@ suite "RLN Proofs as a Lightpush Service": await allFutures(server.start(), client.start()) await server.start() - await server.mountRelay() + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" await server.mountRlnRelay(wakuRlnConfig) await server.mountLegacyLightPush() client.mountLegacyLightPushClient() @@ -187,8 +190,10 @@ suite "Waku Legacy Lightpush message delivery": await allFutures(destNode.start(), bridgeNode.start(), lightNode.start()) - await destNode.mountRelay(@[DefaultRelayShard]) - await bridgeNode.mountRelay(@[DefaultRelayShard]) + (await destNode.mountRelay(@[DefaultRelayShard])).isOkOr: + assert false, "Failed to mount relay" + (await bridgeNode.mountRelay(@[DefaultRelayShard])).isOkOr: + assert false, "Failed to mount relay" await bridgeNode.mountLegacyLightPush() lightNode.mountLegacyLightPushClient() @@ -199,24 +204,25 @@ suite "Waku Legacy Lightpush message delivery": await destNode.connectToNodes(@[bridgeNode.peerInfo.toRemotePeerInfo()]) ## Given + const CustomPubsubTopic = "/waku/2/rs/0/1" let message = fakeWakuMessage() - var completionFutRelay = newFuture[bool]() proc relayHandler( topic: PubsubTopic, msg: WakuMessage ): Future[void] {.async, gcsafe.} = check: - topic == DefaultPubsubTopic + topic == CustomPubsubTopic msg == message completionFutRelay.complete(true) - destNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)) + destNode.subscribe((kind: PubsubSub, topic: CustomPubsubTopic), some(relayHandler)).isOkOr: + assert false, "Failed to subscribe to topic:" & $error # Wait for subscription to take effect await sleepAsync(100.millis) ## When - let res = await lightNode.legacyLightpushPublish(some(DefaultPubsubTopic), message) + let res = await lightNode.legacyLightpushPublish(some(CustomPubsubTopic), message) assert res.isOk(), $res.error ## Then diff --git a/tests/node/test_wakunode_lightpush.nim b/tests/node/test_wakunode_lightpush.nim index 2e785e368..72e9b8bf3 100644 --- a/tests/node/test_wakunode_lightpush.nim +++ b/tests/node/test_wakunode_lightpush.nim @@ -46,7 +46,8 @@ suite "Waku Lightpush - End To End": await allFutures(server.start(), client.start()) await server.start() - await server.mountRelay() + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" await server.mountLightpush() # without rln-relay client.mountLightpushClient() @@ -137,7 +138,8 @@ suite "RLN Proofs as a Lightpush Service": await allFutures(server.start(), client.start()) await server.start() - await server.mountRelay() + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" await server.mountRlnRelay(wakuRlnConfig) await server.mountLightPush() client.mountLightPushClient() @@ -182,8 +184,10 @@ suite "Waku Lightpush message delivery": await allFutures(destNode.start(), bridgeNode.start(), lightNode.start()) - await destNode.mountRelay(@[DefaultRelayShard]) - await bridgeNode.mountRelay(@[DefaultRelayShard]) + (await destNode.mountRelay(@[DefaultRelayShard])).isOkOr: + assert false, "Failed to mount relay" + (await bridgeNode.mountRelay(@[DefaultRelayShard])).isOkOr: + assert false, "Failed to mount relay" await bridgeNode.mountLightPush() lightNode.mountLightPushClient() @@ -194,6 +198,7 @@ suite "Waku Lightpush message delivery": await destNode.connectToNodes(@[bridgeNode.peerInfo.toRemotePeerInfo()]) ## Given + const CustomPubsubTopic = "/waku/2/rs/0/1" let message = fakeWakuMessage() var completionFutRelay = newFuture[bool]() @@ -201,17 +206,18 @@ suite "Waku Lightpush message delivery": topic: PubsubTopic, msg: WakuMessage ): Future[void] {.async, gcsafe.} = check: - topic == DefaultPubsubTopic + topic == CustomPubsubTopic msg == message completionFutRelay.complete(true) - destNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)) + destNode.subscribe((kind: PubsubSub, topic: CustomPubsubTopic), some(relayHandler)).isOkOr: + assert false, "Failed to subscribe to relay" # Wait for subscription to take effect await sleepAsync(100.millis) ## When - let res = await lightNode.lightpushPublish(some(DefaultPubsubTopic), message) + let res = await lightNode.lightpushPublish(some(CustomPubsubTopic), message) assert res.isOk(), $res.error assert res.get() == 1, "Expected to relay the message to 1 node" diff --git a/tests/node/test_wakunode_peer_manager.nim b/tests/node/test_wakunode_peer_manager.nim index e37b3e108..88fcc827f 100644 --- a/tests/node/test_wakunode_peer_manager.nim +++ b/tests/node/test_wakunode_peer_manager.nim @@ -308,7 +308,8 @@ suite "Peer Manager": asyncTest "Peer Protocol Support Verification (Before Connection)": # Given the server has mounted some Waku protocols - await server.mountRelay() + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" await server.mountFilter() # When connecting to the server @@ -335,7 +336,8 @@ suite "Peer Manager": server2RemotePeerInfo = server2.switch.peerInfo.toRemotePeerInfo() server2PeerId = server2RemotePeerInfo.peerId - await server2.mountRelay() + (await server2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" # When connecting to both servers await client.connectToNodes(@[serverRemotePeerInfo, server2RemotePeerInfo]) @@ -533,8 +535,10 @@ suite "Peer Manager": suite "Peer Connectivity States": asyncTest "State Tracking & Transition": # Given two correctly initialised nodes, but not connected - await server.mountRelay() - await client.mountRelay() + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + (await client.mountRelay()).isOkOr: + assert false, "Failed to mount relay" # Then their connectedness should be NotConnected check: @@ -587,8 +591,10 @@ suite "Peer Manager": suite "Automatic Reconnection": asyncTest "Automatic Reconnection Implementation": # Given two correctly initialised nodes, that are available for reconnection - await server.mountRelay() - await client.mountRelay() + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + (await client.mountRelay()).isOkOr: + assert false, "Failed to mount relay" await client.connectToNodes(@[serverRemotePeerInfo]) waitActive: @@ -810,7 +816,8 @@ suite "Mount Order": serverKey = generateSecp256k1Key() server = newTestWakuNode(serverKey, listenIp, listenPort) - await server.mountRelay() + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" await server.start() let serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo() @@ -834,7 +841,8 @@ suite "Mount Order": serverKey = generateSecp256k1Key() server = newTestWakuNode(serverKey, listenIp, listenPort) - await server.mountRelay() + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" let serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo() serverPeerId = serverRemotePeerInfo.peerId @@ -859,7 +867,8 @@ suite "Mount Order": server = newTestWakuNode(serverKey, listenIp, listenPort) await server.start() - await server.mountRelay() + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" let serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo() serverPeerId = serverRemotePeerInfo.peerId @@ -886,7 +895,8 @@ suite "Mount Order": let serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo() serverPeerId = serverRemotePeerInfo.peerId - await server.mountRelay() + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" # When connecting to the server await client.connectToNodes(@[serverRemotePeerInfo]) @@ -910,7 +920,8 @@ suite "Mount Order": serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo() serverPeerId = serverRemotePeerInfo.peerId await server.start() - await server.mountRelay() + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" # When connecting to the server await client.connectToNodes(@[serverRemotePeerInfo]) @@ -932,7 +943,8 @@ suite "Mount Order": let serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo() serverPeerId = serverRemotePeerInfo.peerId - await server.mountRelay() + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" await server.start() # When connecting to the server diff --git a/tests/node/test_wakunode_relay_rln.nim b/tests/node/test_wakunode_relay_rln.nim index 0bf608d12..45dc6ce37 100644 --- a/tests/node/test_wakunode_relay_rln.nim +++ b/tests/node/test_wakunode_relay_rln.nim @@ -263,7 +263,9 @@ suite "Waku RlnRelay - End to End - Static": completionFut.complete((topic, msg)) let subscriptionEvent = (kind: PubsubSub, topic: pubsubTopic) - server.subscribe(subscriptionEvent, some(relayHandler)) + server.subscribe(subscriptionEvent, some(relayHandler)).isOkOr: + assert false, "Failed to subscribe to pubsub topic" + await sleepAsync(FUTURE_TIMEOUT) # Generate Messages @@ -357,7 +359,9 @@ suite "Waku RlnRelay - End to End - Static": completionFut.complete((topic, msg)) let subscriptionEvent = (kind: PubsubSub, topic: pubsubTopic) - server.subscribe(subscriptionEvent, some(relayHandler)) + server.subscribe(subscriptionEvent, some(relayHandler)).isOkOr: + assert false, "Failed to subscribe to pubsub topic" + await sleepAsync(FUTURE_TIMEOUT) # Generate Messages diff --git a/tests/test_peer_manager.nim b/tests/test_peer_manager.nim index d79c6b991..1c2805710 100644 --- a/tests/test_peer_manager.nim +++ b/tests/test_peer_manager.nim @@ -282,8 +282,10 @@ procSuite "Peer Manager": await node1.start() await node2.start() - await node1.mountRelay() - await node2.mountRelay() + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" let peerInfo2 = node2.switch.peerInfo var remotePeerInfo2 = peerInfo2.toRemotePeerInfo() @@ -323,7 +325,8 @@ procSuite "Peer Manager": node3.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId) node3.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == NotConnected - await node3.mountRelay() + (await node3.mountRelay()).isOkOr: + assert false, "Failed to mount relay" await node3.peerManager.connectToRelayPeers() @@ -352,8 +355,10 @@ procSuite "Peer Manager": await node1.start() await node2.start() - await node1.mountRelay() - await node2.mountRelay() + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" let peerInfo2 = node2.switch.peerInfo var remotePeerInfo2 = peerInfo2.toRemotePeerInfo() @@ -393,7 +398,8 @@ procSuite "Peer Manager": node3.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId) node3.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == NotConnected - await node3.mountRelay() + (await node3.mountRelay()).isOkOr: + assert false, "Failed to mount relay" await node3.peerManager.manageRelayPeers() @@ -482,9 +488,11 @@ procSuite "Peer Manager": await node1.start() await node2.start() - await node1.mountRelay() + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" node1.wakuRelay.codec = betaCodec - await node2.mountRelay() + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" node2.wakuRelay.codec = betaCodec require: @@ -506,7 +514,8 @@ procSuite "Peer Manager": peerStorage = storage, ) - await node3.mountRelay() + (await node3.mountRelay()).isOkOr: + assert false, "Failed to mount relay" node3.wakuRelay.codec = stableCodec check: # Node 2 and 3 have differing codecs diff --git a/tests/test_relay_peer_exchange.nim b/tests/test_relay_peer_exchange.nim index e950cb015..a729ff1a7 100644 --- a/tests/test_relay_peer_exchange.nim +++ b/tests/test_relay_peer_exchange.nim @@ -23,8 +23,10 @@ procSuite "Relay (GossipSub) Peer Exchange": newTestWakuNode(node2Key, listenAddress, port, sendSignedPeerRecord = true) # When both client and server mount relay without a handler - await node1.mountRelay(@[DefaultRelayShard]) - await node2.mountRelay(@[DefaultRelayShard], none(RoutingRecordsHandler)) + (await node1.mountRelay(@[DefaultRelayShard])).isOkOr: + assert false, "Failed to mount relay" + (await node2.mountRelay(@[DefaultRelayShard], none(RoutingRecordsHandler))).isOkOr: + assert false, "Failed to mount relay" # Then the relays are mounted without a handler check: @@ -73,9 +75,12 @@ procSuite "Relay (GossipSub) Peer Exchange": peerExchangeHandle: RoutingRecordsHandler = peerExchangeHandler # Givem the nodes mount relay with a peer exchange handler - await node1.mountRelay(@[DefaultRelayShard], some(emptyPeerExchangeHandle)) - await node2.mountRelay(@[DefaultRelayShard], some(emptyPeerExchangeHandle)) - await node3.mountRelay(@[DefaultRelayShard], some(peerExchangeHandle)) + (await node1.mountRelay(@[DefaultRelayShard], some(emptyPeerExchangeHandle))).isOkOr: + assert false, "Failed to mount relay" + (await node2.mountRelay(@[DefaultRelayShard], some(emptyPeerExchangeHandle))).isOkOr: + assert false, "Failed to mount relay" + (await node3.mountRelay(@[DefaultRelayShard], some(peerExchangeHandle))).isOkOr: + assert false, "Failed to mount relay" # Ensure that node1 prunes all peers after the first connection node1.wakuRelay.parameters.dHigh = 1 diff --git a/tests/test_waku_dnsdisc.nim b/tests/test_waku_dnsdisc.nim index fe29627d4..7028b20eb 100644 --- a/tests/test_waku_dnsdisc.nim +++ b/tests/test_waku_dnsdisc.nim @@ -37,9 +37,12 @@ suite "Waku DNS Discovery": node3 = newTestWakuNode(nodeKey3, bindIp, Port(63503)) enr3 = node3.enr - await node1.mountRelay() - await node2.mountRelay() - await node3.mountRelay() + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + (await node3.mountRelay()).isOkOr: + assert false, "Failed to mount relay" await allFutures([node1.start(), node2.start(), node3.start()]) # Build and sign tree @@ -75,7 +78,8 @@ suite "Waku DNS Discovery": nodeKey4 = generateSecp256k1Key() node4 = newTestWakuNode(nodeKey4, bindIp, Port(63504)) - await node4.mountRelay() + (await node4.mountRelay()).isOkOr: + assert false, "Failed to mount relay" await node4.start() var wakuDnsDisc = WakuDnsDiscovery.init(location, resolver).get() diff --git a/tests/test_waku_keepalive.nim b/tests/test_waku_keepalive.nim index c961773e5..d4d05ad97 100644 --- a/tests/test_waku_keepalive.nim +++ b/tests/test_waku_keepalive.nim @@ -31,11 +31,13 @@ suite "Waku Keepalive": completionFut.complete(true) await node1.start() - await node1.mountRelay() + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" await node1.mountLibp2pPing() await node2.start() - await node2.mountRelay() + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" let pingProto = Ping.new(handler = pingHandler) await pingProto.start() diff --git a/tests/test_wakunode.nim b/tests/test_wakunode.nim index df4b442d6..51dd999b0 100644 --- a/tests/test_wakunode.nim +++ b/tests/test_wakunode.nim @@ -34,13 +34,15 @@ suite "WakuNode": # Setup node 1 with stable codec "/vac/waku/relay/2.0.0" await node1.start() - await node1.mountRelay(@[shard]) + (await node1.mountRelay(@[shard])).isOkOr: + assert false, "Failed to mount relay" node1.wakuRelay.codec = "/vac/waku/relay/2.0.0" # Setup node 2 with beta codec "/vac/waku/relay/2.0.0-beta2" await node2.start() - await node2.mountRelay(@[shard]) + (await node2.mountRelay(@[shard])).isOkOr: + assert false, "Failed to mount relay" node2.wakuRelay.codec = "/vac/waku/relay/2.0.0-beta2" check: @@ -61,7 +63,14 @@ suite "WakuNode": msg.payload == payload completionFut.complete(true) - node2.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)) + ## The following unsubscription is necessary to remove the default relay handler, which is + ## added when mountRelay is called. + node2.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr: + assert false, "Failed to unsubscribe from topic: " & $error + + ## Subscribe to the relay topic to add the custom relay handler defined above + node2.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr: + assert false, "Failed to subscribe to topic" await sleepAsync(2000.millis) var res = await node1.publish(some($shard), message) @@ -92,8 +101,10 @@ suite "WakuNode": node2PeerId = $(node2.switch.peerInfo.peerId) node2Dns4Addr = "/dns4/localhost/tcp/61022/p2p/" & node2PeerId - await node1.mountRelay() - await node2.mountRelay() + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" await allFutures([node1.start(), node2.start()]) @@ -117,7 +128,8 @@ suite "WakuNode": # Initialize and start node1 await node1.start() - await node1.mountRelay() + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" # Create an array to hold the other nodes var otherNodes: seq[WakuNode] = @[] @@ -129,7 +141,8 @@ suite "WakuNode": port = 60012 + i * 2 # Ensure unique ports for each node node = newTestWakuNode(nodeKey, parseIpAddress("127.0.0.1"), Port(port)) await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" otherNodes.add(node) # Connect all other nodes to node1 @@ -296,10 +309,12 @@ suite "WakuNode": node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(61016)) await node1.start() - await node1.mountRelay() + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" await node2.start() - await node2.mountRelay() + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) await node2.connectToNodes(@[node1.switch.peerInfo.toRemotePeerInfo()]) @@ -337,10 +352,12 @@ suite "WakuNode": node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(61020)) await node1.start() - await node1.mountRelay() + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" await node2.start() - await node2.mountRelay() + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) await node2.connectToNodes(@[node1.switch.peerInfo.toRemotePeerInfo()]) diff --git a/tests/waku_relay/test_wakunode_relay.nim b/tests/waku_relay/test_wakunode_relay.nim index 8e028acdc..5d5ce8458 100644 --- a/tests/waku_relay/test_wakunode_relay.nim +++ b/tests/waku_relay/test_wakunode_relay.nim @@ -30,7 +30,8 @@ suite "WakuNode - Relay": # Relay protocol starts if mounted after node start await node1.start() - await node1.mountRelay() + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" check: GossipSub(node1.wakuRelay).heartbeatFut.isNil() == false @@ -41,7 +42,8 @@ suite "WakuNode - Relay": nodeKey2 = generateSecp256k1Key() node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(0)) - await node2.mountRelay() + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" check: # Relay has not yet started as node has not yet started @@ -69,13 +71,16 @@ suite "WakuNode - Relay": message = WakuMessage(payload: payload, contentTopic: contentTopic) await node1.start() - await node1.mountRelay(@[shard]) + (await node1.mountRelay(@[shard])).isOkOr: + assert false, "Failed to mount relay" await node2.start() - await node2.mountRelay(@[shard]) + (await node2.mountRelay(@[shard])).isOkOr: + assert false, "Failed to mount relay" await node3.start() - await node3.mountRelay(@[shard]) + (await node3.mountRelay(@[shard])).isOkOr: + assert false, "Failed to mount relay" await allFutures( node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]), @@ -93,7 +98,14 @@ suite "WakuNode - Relay": msg.timestamp > 0 completionFut.complete(true) - node3.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)) + ## The following unsubscription is necessary to remove the default relay handler, which is + ## added when mountRelay is called. + node3.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr: + assert false, "Failed to unsubscribe from topic: " & $error + + ## Subscribe to the relay topic to add the custom relay handler defined above + node3.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr: + assert false, "Failed to subscribe to topic: " & $error await sleepAsync(500.millis) var res = await node1.publish(some($shard), message) @@ -136,13 +148,16 @@ suite "WakuNode - Relay": # start all the nodes await node1.start() - await node1.mountRelay(@[shard]) + (await node1.mountRelay(@[shard])).isOkOr: + assert false, "Failed to mount relay" await node2.start() - await node2.mountRelay(@[shard]) + (await node2.mountRelay(@[shard])).isOkOr: + assert false, "Failed to mount relay" await node3.start() - await node3.mountRelay(@[shard]) + (await node3.mountRelay(@[shard])).isOkOr: + assert false, "Failed to mount relay" await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) @@ -179,7 +194,14 @@ suite "WakuNode - Relay": # relay handler is called completionFut.complete(true) - node3.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)) + ## The following unsubscription is necessary to remove the default relay handler, which is + ## added when mountRelay is called. + node3.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr: + assert false, "Failed to unsubscribe from topic: " & $error + + ## Subscribe to the relay topic to add the custom relay handler defined above + node3.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr: + assert false, "Failed to subscribe to topic: " & $error await sleepAsync(500.millis) var res = await node1.publish(some($shard), message1) @@ -221,7 +243,8 @@ suite "WakuNode - Relay": connOk == true # Node 1 subscribes to topic - nodes[1].subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)) + nodes[1].subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr: + assert false, "Failed to subscribe to topic: " & $error await sleepAsync(500.millis) # Node 0 publishes 5 messages not compliant with WakuMessage (aka random bytes) @@ -265,10 +288,12 @@ suite "WakuNode - Relay": message = WakuMessage(payload: payload, contentTopic: contentTopic) await node1.start() - await node1.mountRelay(@[shard]) + (await node1.mountRelay(@[shard])).isOkOr: + assert false, "Failed to mount relay" await node2.start() - await node2.mountRelay(@[shard]) + (await node2.mountRelay(@[shard])).isOkOr: + assert false, "Failed to mount relay" await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) @@ -283,7 +308,14 @@ suite "WakuNode - Relay": msg.timestamp > 0 completionFut.complete(true) - node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)) + ## The following unsubscription is necessary to remove the default relay handler, which is + ## added when mountRelay is called. + node1.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr: + assert false, "Failed to unsubscribe from topic: " & $error + + ## Subscribe to the relay topic to add the custom relay handler defined above + node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr: + assert false, "Failed to subscribe to topic: " & $error await sleepAsync(500.millis) let res = await node2.publish(some($shard), message) @@ -314,10 +346,12 @@ suite "WakuNode - Relay": message = WakuMessage(payload: payload, contentTopic: contentTopic) await node1.start() - await node1.mountRelay(@[shard]) + (await node1.mountRelay(@[shard])).isOkOr: + assert false, "Failed to mount relay" await node2.start() - await node2.mountRelay(@[shard]) + (await node2.mountRelay(@[shard])).isOkOr: + assert false, "Failed to mount relay" await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) @@ -332,7 +366,14 @@ suite "WakuNode - Relay": msg.timestamp > 0 completionFut.complete(true) - node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)) + ## The following unsubscription is necessary to remove the default relay handler, which is + ## added when mountRelay is called. + node1.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr: + assert false, "Failed to unsubscribe from topic: " & $error + + ## Subscribe to the relay topic to add the custom relay handler defined above + node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr: + assert false, "Failed to subscribe to topic: " & $error await sleepAsync(500.millis) let res = await node2.publish(some($shard), message) @@ -363,10 +404,12 @@ suite "WakuNode - Relay": message = WakuMessage(payload: payload, contentTopic: contentTopic) await node1.start() - await node1.mountRelay(@[shard]) + (await node1.mountRelay(@[shard])).isOkOr: + assert false, "Failed to mount relay" await node2.start() - await node2.mountRelay(@[shard]) + (await node2.mountRelay(@[shard])).isOkOr: + assert false, "Failed to mount relay" #delete websocket peer address # TODO: a better way to find the index - this is too brittle @@ -385,7 +428,14 @@ suite "WakuNode - Relay": msg.timestamp > 0 completionFut.complete(true) - node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)) + ## The following unsubscription is necessary to remove the default relay handler, which is + ## added when mountRelay is called. + node1.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr: + assert false, "Failed to unsubscribe from topic: " & $error + + ## Subscribe to the relay topic to add the custom relay handler defined above + node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr: + assert false, "Failed to subscribe to topic: " & $error await sleepAsync(500.millis) let res = await node2.publish(some($shard), message) @@ -418,10 +468,12 @@ suite "WakuNode - Relay": message = WakuMessage(payload: payload, contentTopic: contentTopic) await node1.start() - await node1.mountRelay(@[shard]) + (await node1.mountRelay(@[shard])).isOkOr: + assert false, "Failed to mount relay" await node2.start() - await node2.mountRelay(@[shard]) + (await node2.mountRelay(@[shard])).isOkOr: + assert false, "Failed to mount relay" await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) @@ -436,7 +488,14 @@ suite "WakuNode - Relay": msg.timestamp > 0 completionFut.complete(true) - node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)) + ## The following unsubscription is necessary to remove the default relay handler, which is + ## added when mountRelay is called. + node1.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr: + assert false, "Failed to unsubscribe from topic: " & $error + + ## Subscribe to the relay topic to add the custom relay handler defined above + node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr: + assert false, "Failed to subscribe to topic: " & $error await sleepAsync(500.millis) let res = await node2.publish(some($shard), message) @@ -477,10 +536,12 @@ suite "WakuNode - Relay": message = WakuMessage(payload: payload, contentTopic: contentTopic) await node1.start() - await node1.mountRelay(@[shard]) + (await node1.mountRelay(@[shard])).isOkOr: + assert false, "Failed to mount relay" await node2.start() - await node2.mountRelay(@[shard]) + (await node2.mountRelay(@[shard])).isOkOr: + assert false, "Failed to mount relay" await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) @@ -495,7 +556,14 @@ suite "WakuNode - Relay": msg.timestamp > 0 completionFut.complete(true) - node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)) + ## The following unsubscription is necessary to remove the default relay handler, which is + ## added when mountRelay is called. + node1.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr: + assert false, "Failed to unsubscribe from topic: " & $error + + ## Subscribe to the relay topic to add the custom relay handler defined above + node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr: + assert false, "Failed to subscribe to topic: " & $error await sleepAsync(500.millis) let res = await node2.publish(some($shard), message) @@ -564,14 +632,15 @@ suite "WakuNode - Relay": # Stop all nodes await allFutures(nodes.mapIt(it.stop())) - asyncTest "Unsubscribe keep the subscription if other content topics also use the shard": + asyncTest "Only one subscription is allowed for contenttopics that generate the same shard": ## Setup let nodeKey = generateSecp256k1Key() node = newTestWakuNode(nodeKey, parseIpAddress("0.0.0.0"), Port(0)) await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" require node.mountSharding(1, 1).isOk ## Given @@ -593,19 +662,19 @@ suite "WakuNode - Relay": "topic must use the same shard" ## When - node.subscribe((kind: ContentSub, topic: contentTopicA), some(handler)) - node.subscribe((kind: ContentSub, topic: contentTopicB), some(handler)) - node.subscribe((kind: ContentSub, topic: contentTopicC), some(handler)) + node.subscribe((kind: ContentSub, topic: contentTopicA), some(handler)).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + node.subscribe((kind: ContentSub, topic: contentTopicB), some(handler)).isErrOr: + assert false, + "The subscription should fail because is already subscribe to that shard" + node.subscribe((kind: ContentSub, topic: contentTopicC), some(handler)).isErrOr: + assert false, + "The subscription should fail because is already subscribe to that shard" ## Then - node.unsubscribe((kind: ContentUnsub, topic: contentTopicB)) + node.unsubscribe((kind: ContentUnsub, topic: contentTopicB)).isOkOr: + assert false, "Failed to unsubscribe to topic: " & $error check node.wakuRelay.isSubscribed(shard) - node.unsubscribe((kind: ContentUnsub, topic: contentTopicA)) - check node.wakuRelay.isSubscribed(shard) - - node.unsubscribe((kind: ContentUnsub, topic: contentTopicC)) - check not node.wakuRelay.isSubscribed(shard) - ## Cleanup await node.stop() diff --git a/tests/waku_relay/utils.nim b/tests/waku_relay/utils.nim index 3e39294a1..821881f4c 100644 --- a/tests/waku_relay/utils.nim +++ b/tests/waku_relay/utils.nim @@ -5,6 +5,7 @@ import stew/byteutils, stew/shims/net as stewNet, chronos, + chronicles, libp2p/switch, libp2p/protocols/pubsub/pubsub @@ -50,12 +51,6 @@ proc setupRln*(node: WakuNode, identifier: uint) {.async.} = ) ) -proc setupRelayWithRln*( - node: WakuNode, identifier: uint, shards: seq[RelayShard] -) {.async.} = - await node.mountRelay(shards) - await setupRln(node, identifier) - proc subscribeToContentTopicWithHandler*( node: WakuNode, contentTopic: string ): Future[bool] = @@ -66,7 +61,9 @@ proc subscribeToContentTopicWithHandler*( if topic == topic: completionFut.complete(true) - node.subscribe((kind: ContentSub, topic: contentTopic), some(relayHandler)) + (node.subscribe((kind: ContentSub, topic: contentTopic), some(relayHandler))).isOkOr: + error "Failed to subscribe to content topic", error + completionFut.complete(true) return completionFut proc subscribeCompletionHandler*(node: WakuNode, pubsubTopic: string): Future[bool] = @@ -77,7 +74,9 @@ proc subscribeCompletionHandler*(node: WakuNode, pubsubTopic: string): Future[bo if topic == pubsubTopic: completionFut.complete(true) - node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(relayHandler)) + (node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(relayHandler))).isOkOr: + error "Failed to subscribe to pubsub topic", error + completionFut.complete(false) return completionFut proc sendRlnMessage*( diff --git a/tests/waku_rln_relay/test_wakunode_rln_relay.nim b/tests/waku_rln_relay/test_wakunode_rln_relay.nim index 2a0fd5779..bd8edfcd3 100644 --- a/tests/waku_rln_relay/test_wakunode_rln_relay.nim +++ b/tests/waku_rln_relay/test_wakunode_rln_relay.nim @@ -58,7 +58,8 @@ procSuite "WakuNode - RLN relay": # set up three nodes # node1 - await node1.mountRelay(@[DefaultRelayShard]) + (await node1.mountRelay(@[DefaultRelayShard])).isOkOr: + assert false, "Failed to mount relay" # mount rlnrelay in off-chain mode let wakuRlnConfig1 = WakuRlnConfig( @@ -74,7 +75,8 @@ procSuite "WakuNode - RLN relay": await node1.start() # node 2 - await node2.mountRelay(@[DefaultRelayShard]) + (await node2.mountRelay(@[DefaultRelayShard])).isOkOr: + assert false, "Failed to mount relay" # mount rlnrelay in off-chain mode let wakuRlnConfig2 = WakuRlnConfig( rlnRelayDynamic: false, @@ -89,7 +91,8 @@ procSuite "WakuNode - RLN relay": await node2.start() # node 3 - await node3.mountRelay(@[DefaultRelayShard]) + (await node3.mountRelay(@[DefaultRelayShard])).isOkOr: + assert false, "Failed to mount relay" let wakuRlnConfig3 = WakuRlnConfig( rlnRelayDynamic: false, @@ -115,8 +118,14 @@ procSuite "WakuNode - RLN relay": if topic == DefaultPubsubTopic: completionFut.complete(true) - # mount the relay handler - node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)) + ## The following unsubscription is necessary to remove the default relay handler, which is + ## added when mountRelay is called. + node3.unsubscribe((kind: PubsubUnsub, topic: DefaultPubsubTopic)).isOkOr: + assert false, "Failed to unsubscribe from topic: " & $error + + ## Subscribe to the relay topic to add the custom relay handler defined above + node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)).isOkOr: + assert false, "Failed to subscribe to pubsub topic: " & $error await sleepAsync(2000.millis) # prepare the message payload @@ -126,6 +135,11 @@ procSuite "WakuNode - RLN relay": var message = WakuMessage(payload: @payload, contentTopic: contentTopic) doAssert(node1.wakuRlnRelay.unsafeAppendRLNProof(message, epochTime()).isOk()) + debug "Nodes participating in the test", + node1 = shortLog(node1.switch.peerInfo.peerId), + node2 = shortLog(node2.switch.peerInfo.peerId), + node3 = shortLog(node3.switch.peerInfo.peerId) + ## node1 publishes a message with a rate limit proof, the message is then relayed to node2 which in turn ## verifies the rate limit proof of the message and relays the message to node3 ## verification at node2 occurs inside a topic validator which is installed as part of the waku-rln-relay mount proc @@ -187,9 +201,18 @@ procSuite "WakuNode - RLN relay": elif topic == $shards[1]: rxMessagesTopic2 = rxMessagesTopic2 + 1 + ## This unsubscription is necessary to remove the default relay handler, which is + ## added when mountRelay is called. + nodes[2].unsubscribe((kind: PubsubUnsub, topic: $shards[0])).isOkOr: + assert false, "Failed to unsubscribe to pubsub topic: " & $error + nodes[2].unsubscribe((kind: PubsubUnsub, topic: $shards[1])).isOkOr: + assert false, "Failed to unsubscribe to pubsub topic: " & $error + # mount the relay handlers - nodes[2].subscribe((kind: PubsubSub, topic: $shards[0]), some(relayHandler)) - nodes[2].subscribe((kind: PubsubSub, topic: $shards[1]), some(relayHandler)) + nodes[2].subscribe((kind: PubsubSub, topic: $shards[0]), some(relayHandler)).isOkOr: + assert false, "Failed to subscribe to pubsub topic: " & $error + nodes[2].subscribe((kind: PubsubSub, topic: $shards[1]), some(relayHandler)).isOkOr: + assert false, "Failed to subscribe to pubsub topic: " & $error await sleepAsync(1000.millis) # generate some messages with rln proofs first. generating @@ -250,7 +273,8 @@ procSuite "WakuNode - RLN relay": # set up three nodes # node1 - await node1.mountRelay(@[DefaultRelayShard]) + (await node1.mountRelay(@[DefaultRelayShard])).isOkOr: + assert false, "Failed to mount relay" # mount rlnrelay in off-chain mode let wakuRlnConfig1 = WakuRlnConfig( @@ -266,7 +290,8 @@ procSuite "WakuNode - RLN relay": await node1.start() # node 2 - await node2.mountRelay(@[DefaultRelayShard]) + (await node2.mountRelay(@[DefaultRelayShard])).isOkOr: + assert false, "Failed to mount relay" # mount rlnrelay in off-chain mode let wakuRlnConfig2 = WakuRlnConfig( rlnRelayDynamic: false, @@ -281,7 +306,8 @@ procSuite "WakuNode - RLN relay": await node2.start() # node 3 - await node3.mountRelay(@[DefaultRelayShard]) + (await node3.mountRelay(@[DefaultRelayShard])).isOkOr: + assert false, "Failed to mount relay" let wakuRlnConfig3 = WakuRlnConfig( rlnRelayDynamic: false, @@ -307,8 +333,14 @@ procSuite "WakuNode - RLN relay": if topic == DefaultPubsubTopic: completionFut.complete(true) + ## The following unsubscription is necessary to remove the default relay handler, which is + ## added when mountRelay is called. + node3.unsubscribe((kind: PubsubUnsub, topic: DefaultPubsubTopic)).isOkOr: + assert false, "Failed to unsubscribe to pubsub topic: " & $error + # mount the relay handler - node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)) + node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)).isOkOr: + assert false, "Failed to subscribe to pubsub topic: " & $error await sleepAsync(2000.millis) # prepare the message payload @@ -366,7 +398,8 @@ procSuite "WakuNode - RLN relay": # set up three nodes # node1 - await node1.mountRelay(@[DefaultRelayShard]) + (await node1.mountRelay(@[DefaultRelayShard])).isOkOr: + assert false, "Failed to mount relay" # mount rlnrelay in off-chain mode let wakuRlnConfig1 = WakuRlnConfig( @@ -382,7 +415,8 @@ procSuite "WakuNode - RLN relay": await node1.start() # node 2 - await node2.mountRelay(@[DefaultRelayShard]) + (await node2.mountRelay(@[DefaultRelayShard])).isOkOr: + assert false, "Failed to mount relay" # mount rlnrelay in off-chain mode let wakuRlnConfig2 = WakuRlnConfig( @@ -397,7 +431,8 @@ procSuite "WakuNode - RLN relay": await node2.start() # node 3 - await node3.mountRelay(@[DefaultRelayShard]) + (await node3.mountRelay(@[DefaultRelayShard])).isOkOr: + assert false, "Failed to mount relay" # mount rlnrelay in off-chain mode let wakuRlnConfig3 = WakuRlnConfig( @@ -456,8 +491,14 @@ procSuite "WakuNode - RLN relay": if msg.payload == wm4.payload: completionFut4.complete(true) + ## The following unsubscription is necessary to remove the default relay handler, which is + ## added when mountRelay is called. + node3.unsubscribe((kind: PubsubUnsub, topic: DefaultPubsubTopic)).isOkOr: + assert false, "Failed to unsubscribe to pubsub topic: " & $error + # mount the relay handler for node3 - node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)) + node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)).isOkOr: + assert false, "Failed to subscribe to pubsub topic: " & $error await sleepAsync(2000.millis) ## node1 publishes and relays 4 messages to node2 @@ -500,12 +541,15 @@ procSuite "WakuNode - RLN relay": epochSizeSec: uint64 = 5 # This means rlnMaxEpochGap = 4 # Given both nodes mount relay and rlnrelay - await node1.mountRelay(shardSeq) + (await node1.mountRelay(shardSeq)).isOkOr: + assert false, "Failed to mount relay" let wakuRlnConfig1 = buildWakuRlnConfig(1, epochSizeSec, "wakunode_10") - await node1.mountRlnRelay(wakuRlnConfig1) + (await node1.mountRlnRelay(wakuRlnConfig1)).isOkOr: + assert false, "Failed to mount rlnrelay" # Mount rlnrelay in node2 in off-chain mode - await node2.mountRelay(@[DefaultRelayShard]) + (await node2.mountRelay(@[DefaultRelayShard])).isOkOr: + assert false, "Failed to mount relay" let wakuRlnConfig2 = buildWakuRlnConfig(2, epochSizeSec, "wakunode_11") await node2.mountRlnRelay(wakuRlnConfig2) @@ -548,7 +592,8 @@ procSuite "WakuNode - RLN relay": if msg == wm6: completionFut6.complete(true) - node2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)) + node2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)).isOkOr: + assert false, "Failed to subscribe to pubsub topic: " & $error # Given all messages have an rln proof and are published by the node 1 let publishSleepDuration: Duration = 5000.millis @@ -638,12 +683,14 @@ procSuite "WakuNode - RLN relay": # Given both nodes mount relay and rlnrelay # Mount rlnrelay in node1 in off-chain mode - await node1.mountRelay(shardSeq) + (await node1.mountRelay(shardSeq)).isOkOr: + assert false, "Failed to mount relay" let wakuRlnConfig1 = buildWakuRlnConfig(1, epochSizeSec, "wakunode_10") await node1.mountRlnRelay(wakuRlnConfig1) # Mount rlnrelay in node2 in off-chain mode - await node2.mountRelay(@[DefaultRelayShard]) + (await node2.mountRelay(@[DefaultRelayShard])).isOkOr: + assert false, "Failed to mount relay" let wakuRlnConfig2 = buildWakuRlnConfig(2, epochSizeSec, "wakunode_11") await node2.mountRlnRelay(wakuRlnConfig2) diff --git a/tests/waku_rln_relay/utils_static.nim b/tests/waku_rln_relay/utils_static.nim index d2a781fcd..719ce465c 100644 --- a/tests/waku_rln_relay/utils_static.nim +++ b/tests/waku_rln_relay/utils_static.nim @@ -5,6 +5,7 @@ import stew/byteutils, stew/shims/net as stewNet, chronos, + chronicles, libp2p/switch, libp2p/protocols/pubsub/pubsub @@ -45,7 +46,10 @@ proc subscribeCompletionHandler*(node: WakuNode, pubsubTopic: string): Future[bo if topic == pubsubTopic: completionFut.complete(true) - node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(relayHandler)) + node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(relayHandler)).isOkOr: + error "failed to subscribe to relay", topic = pubsubTopic, error = error + completionFut.complete(false) + return completionFut proc sendRlnMessage*( diff --git a/tests/wakunode_rest/test_rest_cors.nim b/tests/wakunode_rest/test_rest_cors.nim index fc32440d7..7d29711b1 100644 --- a/tests/wakunode_rest/test_rest_cors.nim +++ b/tests/wakunode_rest/test_rest_cors.nim @@ -102,7 +102,8 @@ suite "Waku v2 REST API CORS Handling": # Given let node = testWakuNode() await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" var restPort = Port(0) let restAddress = parseIpAddress("0.0.0.0") @@ -155,7 +156,8 @@ suite "Waku v2 REST API CORS Handling": # Given let node = testWakuNode() await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" var restPort = Port(0) let restAddress = parseIpAddress("0.0.0.0") @@ -211,7 +213,8 @@ suite "Waku v2 REST API CORS Handling": # Given let node = testWakuNode() await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" var restPort = Port(0) let restAddress = parseIpAddress("0.0.0.0") @@ -258,7 +261,8 @@ suite "Waku v2 REST API CORS Handling": # Given let node = testWakuNode() await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" var restPort = Port(0) let restAddress = parseIpAddress("0.0.0.0") diff --git a/tests/wakunode_rest/test_rest_debug.nim b/tests/wakunode_rest/test_rest_debug.nim index f4e66eb20..3129b3544 100644 --- a/tests/wakunode_rest/test_rest_debug.nim +++ b/tests/wakunode_rest/test_rest_debug.nim @@ -37,7 +37,8 @@ suite "Waku v2 REST API - Debug": # Given let node = testWakuNode() await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" var restPort = Port(0) let restAddress = parseIpAddress("0.0.0.0") @@ -66,7 +67,8 @@ suite "Waku v2 REST API - Debug": # Given let node = testWakuNode() await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" var restPort = Port(0) let restAddress = parseIpAddress("0.0.0.0") diff --git a/tests/wakunode_rest/test_rest_filter.nim b/tests/wakunode_rest/test_rest_filter.nim index 358872769..556b6b52e 100644 --- a/tests/wakunode_rest/test_rest_filter.nim +++ b/tests/wakunode_rest/test_rest_filter.nim @@ -54,7 +54,9 @@ proc init(T: type RestFilterTest): Future[T] {.async.} = await allFutures(testSetup.serviceNode.start(), testSetup.subscriberNode.start()) - await testSetup.serviceNode.mountRelay() + (await testSetup.serviceNode.mountRelay()).isOkOr: + assert false, "Failed to mount relay: " & $error + await testSetup.serviceNode.mountFilter(messageCacheTTL = 1.seconds) await testSetup.subscriberNode.mountFilterClient() @@ -278,7 +280,8 @@ suite "Waku v2 Rest API - Filter V2": subPeerId = restFilterTest.subscriberNode.peerInfo.toRemotePeerInfo().peerId restFilterTest.messageCache.pubsubSubscribe(DefaultPubsubTopic) - restFilterTest.serviceNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)) + restFilterTest.serviceNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr: + assert false, "Failed to subscribe to topic: " & $error # When var requestBody = FilterSubscribeRequest( @@ -323,7 +326,8 @@ suite "Waku v2 Rest API - Filter V2": # setup filter service and client node let restFilterTest = await RestFilterTest.init() let subPeerId = restFilterTest.subscriberNode.peerInfo.toRemotePeerInfo().peerId - restFilterTest.serviceNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)) + restFilterTest.serviceNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr: + assert false, "Failed to subscribe to topic: " & $error let requestBody = FilterSubscribeRequest( requestId: "1001", @@ -394,7 +398,8 @@ suite "Waku v2 Rest API - Filter V2": # setup filter service and client node let restFilterTest = await RestFilterTest.init() let subPeerId = restFilterTest.subscriberNode.peerInfo.toRemotePeerInfo().peerId - restFilterTest.serviceNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)) + restFilterTest.serviceNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr: + assert false, "Failed to subscribe to topic: " & $error let requestBody = FilterSubscribeRequest( requestId: "1001", diff --git a/tests/wakunode_rest/test_rest_health.nim b/tests/wakunode_rest/test_rest_health.nim index 7d842a3eb..ac2fd9eac 100644 --- a/tests/wakunode_rest/test_rest_health.nim +++ b/tests/wakunode_rest/test_rest_health.nim @@ -42,7 +42,8 @@ suite "Waku v2 REST API - health": let node = testWakuNode() let healthMonitor = WakuNodeHealthMonitor() await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" healthMonitor.setOverallHealth(HealthStatus.INITIALIZING) diff --git a/tests/wakunode_rest/test_rest_lightpush.nim b/tests/wakunode_rest/test_rest_lightpush.nim index 2c4ec0959..72e309a13 100644 --- a/tests/wakunode_rest/test_rest_lightpush.nim +++ b/tests/wakunode_rest/test_rest_lightpush.nim @@ -58,8 +58,10 @@ proc init( testSetup.consumerNode.start(), ) - await testSetup.consumerNode.mountRelay() - await testSetup.serviceNode.mountRelay() + (await testSetup.consumerNode.mountRelay()).isOkOr: + assert false, "Failed to mount relay: " & $error + (await testSetup.serviceNode.mountRelay()).isOkOr: + assert false, "Failed to mount relay: " & $error await testSetup.serviceNode.mountLightPush(rateLimit) testSetup.pushNode.mountLightPushClient() @@ -129,10 +131,13 @@ suite "Waku v2 Rest API - lightpush": restLightPushTest.consumerNode.subscribe( (kind: PubsubSub, topic: DefaultPubsubTopic) - ) + ).isOkOr: + assert false, "Failed to subscribe to relay: " & $error + restLightPushTest.serviceNode.subscribe( (kind: PubsubSub, topic: DefaultPubsubTopic) - ) + ).isOkOr: + assert false, "Failed to subscribe to relay: " & $error require: toSeq(restLightPushTest.serviceNode.wakuRelay.subscribedTopics).len == 1 @@ -161,7 +166,8 @@ suite "Waku v2 Rest API - lightpush": restLightPushTest.serviceNode.subscribe( (kind: PubsubSub, topic: DefaultPubsubTopic) - ) + ).isOkOr: + assert false, "Failed to subscribe to relay: " & $error require: toSeq(restLightPushTest.serviceNode.wakuRelay.subscribedTopics).len == 1 @@ -218,10 +224,13 @@ suite "Waku v2 Rest API - lightpush": restLightPushTest.consumerNode.subscribe( (kind: PubsubSub, topic: DefaultPubsubTopic) - ) + ).isOkOr: + assert false, "Failed to subscribe to relay: " & $error + restLightPushTest.serviceNode.subscribe( (kind: PubsubSub, topic: DefaultPubsubTopic) - ) + ).isOkOr: + assert false, "Failed to subscribe to relay: " & $error require: toSeq(restLightPushTest.serviceNode.wakuRelay.subscribedTopics).len == 1 diff --git a/tests/wakunode_rest/test_rest_lightpush_legacy.nim b/tests/wakunode_rest/test_rest_lightpush_legacy.nim index 8176aed7a..e1d6dca30 100644 --- a/tests/wakunode_rest/test_rest_lightpush_legacy.nim +++ b/tests/wakunode_rest/test_rest_lightpush_legacy.nim @@ -58,8 +58,10 @@ proc init( testSetup.consumerNode.start(), ) - await testSetup.consumerNode.mountRelay() - await testSetup.serviceNode.mountRelay() + (await testSetup.consumerNode.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + (await testSetup.serviceNode.mountRelay()).isOkOr: + assert false, "Failed to mount relay" await testSetup.serviceNode.mountLegacyLightPush(rateLimit) testSetup.pushNode.mountLegacyLightPushClient() @@ -124,10 +126,13 @@ suite "Waku v2 Rest API - lightpush": restLightPushTest.consumerNode.subscribe( (kind: PubsubSub, topic: DefaultPubsubTopic) - ) + ).isOkOr: + assert false, "Failed to subscribe to topic" + restLightPushTest.serviceNode.subscribe( (kind: PubsubSub, topic: DefaultPubsubTopic) - ) + ).isOkOr: + assert false, "Failed to subscribe to topic" require: toSeq(restLightPushTest.serviceNode.wakuRelay.subscribedTopics).len == 1 @@ -156,7 +161,8 @@ suite "Waku v2 Rest API - lightpush": restLightPushTest.serviceNode.subscribe( (kind: PubsubSub, topic: DefaultPubsubTopic) - ) + ).isOkOr: + assert false, "Failed to subscribe to topic" require: toSeq(restLightPushTest.serviceNode.wakuRelay.subscribedTopics).len == 1 @@ -216,10 +222,13 @@ suite "Waku v2 Rest API - lightpush": restLightPushTest.consumerNode.subscribe( (kind: PubsubSub, topic: DefaultPubsubTopic) - ) + ).isOkOr: + assert false, "Failed to subscribe to topic" + restLightPushTest.serviceNode.subscribe( (kind: PubsubSub, topic: DefaultPubsubTopic) - ) + ).isOkOr: + assert false, "Failed to subscribe to topic" require: toSeq(restLightPushTest.serviceNode.wakuRelay.subscribedTopics).len == 1 diff --git a/tests/wakunode_rest/test_rest_relay.nim b/tests/wakunode_rest/test_rest_relay.nim index 9732d114b..acfa05bab 100644 --- a/tests/wakunode_rest/test_rest_relay.nim +++ b/tests/wakunode_rest/test_rest_relay.nim @@ -41,7 +41,8 @@ suite "Waku v2 Rest API - Relay": # Given let node = testWakuNode() await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" var restPort = Port(0) let restAddress = parseIpAddress("0.0.0.0") @@ -95,7 +96,8 @@ suite "Waku v2 Rest API - Relay": shard3 = RelayShard(clusterId: DefaultClusterId, shardId: 3) shard4 = RelayShard(clusterId: DefaultClusterId, shardId: 4) - await node.mountRelay(@[shard0, shard1, shard2, shard3]) + (await node.mountRelay(@[shard0, shard1, shard2, shard3, shard4])).isOkOr: + assert false, "Failed to mount relay" var restPort = Port(0) let restAddress = parseIpAddress("0.0.0.0") @@ -144,7 +146,8 @@ suite "Waku v2 Rest API - Relay": # Given let node = testWakuNode() await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" var restPort = Port(0) let restAddress = parseIpAddress("0.0.0.0") @@ -220,7 +223,8 @@ suite "Waku v2 Rest API - Relay": # Given let node = testWakuNode() await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" let wakuRlnConfig = WakuRlnConfig( rlnRelayDynamic: false, rlnRelayCredIndex: some(1.uint), @@ -245,7 +249,8 @@ suite "Waku v2 Rest API - Relay": let client = newRestHttpClient(initTAddress(restAddress, restPort)) - node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)) + node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr: + assert false, "Failed to subscribe to pubsub topic" require: toSeq(node.wakuRelay.subscribedTopics).len == 1 @@ -275,7 +280,8 @@ suite "Waku v2 Rest API - Relay": # Given let node = testWakuNode() await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" require node.mountSharding(1, 8).isOk var restPort = Port(0) @@ -324,11 +330,13 @@ suite "Waku v2 Rest API - Relay": # Given let node = testWakuNode() await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" var restPort = Port(0) let restAddress = parseIpAddress("0.0.0.0") let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + restServer.start() restPort = restServer.httpServer.address.port # update with bound port for client use @@ -347,11 +355,18 @@ suite "Waku v2 Rest API - Relay": cache.contentSubscribe("/waku/2/default-contentY/proto") installRelayApiHandlers(restServer.router, node, cache) - restServer.start() # When let client = newRestHttpClient(initTAddress(restAddress, restPort)) - let response = await client.relayDeleteAutoSubscriptionsV1(contentTopics) + + var response = await client.relayPostAutoSubscriptionsV1(contentTopics) + + check: + response.status == 200 + $response.contentType == $MIMETYPE_TEXT + response.data == "OK" + + response = await client.relayDeleteAutoSubscriptionsV1(contentTopics) # Then check: @@ -373,7 +388,8 @@ suite "Waku v2 Rest API - Relay": # Given let node = testWakuNode() await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" var restPort = Port(0) let restAddress = parseIpAddress("0.0.0.0") @@ -437,7 +453,8 @@ suite "Waku v2 Rest API - Relay": # Given let node = testWakuNode() await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" let wakuRlnConfig = WakuRlnConfig( rlnRelayDynamic: false, rlnRelayCredIndex: some(1.uint), @@ -461,7 +478,8 @@ suite "Waku v2 Rest API - Relay": let client = newRestHttpClient(initTAddress(restAddress, restPort)) - node.subscribe((kind: ContentSub, topic: DefaultContentTopic)) + node.subscribe((kind: ContentSub, topic: DefaultContentTopic)).isOkOr: + assert false, "Failed to subscribe to content topic: " & $error require: toSeq(node.wakuRelay.subscribedTopics).len == 1 @@ -489,7 +507,8 @@ suite "Waku v2 Rest API - Relay": # Given let node = testWakuNode() await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" let wakuRlnConfig = WakuRlnConfig( rlnRelayDynamic: false, rlnRelayCredIndex: some(1.uint), @@ -539,7 +558,8 @@ suite "Waku v2 Rest API - Relay": # Given let node = testWakuNode() await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" let wakuRlnConfig = WakuRlnConfig( rlnRelayDynamic: false, rlnRelayCredIndex: some(1.uint), @@ -564,7 +584,8 @@ suite "Waku v2 Rest API - Relay": let client = newRestHttpClient(initTAddress(restAddress, restPort)) - node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)) + node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr: + assert false, "Failed to subscribe to pubsub topic: " & $error require: toSeq(node.wakuRelay.subscribedTopics).len == 1 @@ -594,7 +615,8 @@ suite "Waku v2 Rest API - Relay": # Given let node = testWakuNode() await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" let wakuRlnConfig = WakuRlnConfig( rlnRelayDynamic: false, rlnRelayCredIndex: some(1.uint), @@ -619,7 +641,8 @@ suite "Waku v2 Rest API - Relay": let client = newRestHttpClient(initTAddress(restAddress, restPort)) - node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)) + node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr: + assert false, "Failed to subscribe to pubsub topic: " & $error require: toSeq(node.wakuRelay.subscribedTopics).len == 1 diff --git a/tests/wakunode_rest/test_rest_store.nim b/tests/wakunode_rest/test_rest_store.nim index c31e3939c..d0631bfbf 100644 --- a/tests/wakunode_rest/test_rest_store.nim +++ b/tests/wakunode_rest/test_rest_store.nim @@ -86,7 +86,8 @@ procSuite "Waku Rest API - Store v3": asyncTest "invalid cursor": let node = testWakuNode() await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + error "failed to mount relay", error = error var restPort = Port(0) let restAddress = parseIpAddress("0.0.0.0") @@ -165,7 +166,8 @@ procSuite "Waku Rest API - Store v3": asyncTest "Filter by start and end time": let node = testWakuNode() await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + error "failed to mount relay", error = error var restPort = Port(0) let restAddress = parseIpAddress("0.0.0.0") @@ -330,7 +332,8 @@ procSuite "Waku Rest API - Store v3": # Given let node = testWakuNode() await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + error "failed to mount relay", error = error var restPort = Port(0) let restAddress = parseIpAddress("0.0.0.0") @@ -403,7 +406,8 @@ procSuite "Waku Rest API - Store v3": # Given let node = testWakuNode() await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + error "failed to mount relay", error = error var restPort = Port(0) let restAddress = parseIpAddress("0.0.0.0") @@ -492,7 +496,8 @@ procSuite "Waku Rest API - Store v3": # Given let node = testWakuNode() await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + error "failed to mount relay", error = error var restPort = Port(0) let restAddress = parseIpAddress("0.0.0.0") @@ -548,7 +553,8 @@ procSuite "Waku Rest API - Store v3": # Given let node = testWakuNode() await node.start() - await node.mountRelay() + (await node.mountRelay()).isOkOr: + error "failed to mount relay", error = error var restPort = Port(0) let restAddress = parseIpAddress("0.0.0.0") diff --git a/waku/factory/node_factory.nim b/waku/factory/node_factory.nim index 3142ff766..78093c6cd 100644 --- a/waku/factory/node_factory.nim +++ b/waku/factory/node_factory.nim @@ -301,12 +301,12 @@ proc setupProtocols( debug "Setting max message size", num_bytes = parsedMaxMsgSize - try: + ( await mountRelay( node, shards, peerExchangeHandler = peerExchangeHandler, int(parsedMaxMsgSize) ) - except CatchableError: - return err("failed to mount waku relay protocol: " & getCurrentExceptionMsg()) + ).isOkOr: + return err("failed to mount waku relay protocol: " & $error) # Add validation keys to protected topics var subscribedProtectedShards: seq[ProtectedShard] diff --git a/waku/node/waku_node.nim b/waku/node/waku_node.nim index ce86c3c57..bb8b6f9c3 100644 --- a/waku/node/waku_node.nim +++ b/waku/node/waku_node.nim @@ -256,7 +256,7 @@ proc mountStoreSync*( ## Waku relay -proc registerRelayDefaultHandler*(node: WakuNode, topic: PubsubTopic) = +proc registerRelayDefaultHandler(node: WakuNode, topic: PubsubTopic) = if node.wakuRelay.isSubscribed(topic): return @@ -301,30 +301,34 @@ proc registerRelayDefaultHandler*(node: WakuNode, topic: PubsubTopic) = proc subscribe*( node: WakuNode, subscription: SubscriptionEvent, handler = none(WakuRelayHandler) -) = +): Result[void, string] = ## Subscribes to a PubSub or Content topic. Triggers handler when receiving messages on ## this topic. WakuRelayHandler is a method that takes a topic and a Waku message. if node.wakuRelay.isNil(): error "Invalid API call to `subscribe`. WakuRelay not mounted." - return + return err("Invalid API call to `subscribe`. WakuRelay not mounted.") let (pubsubTopic, contentTopicOp) = case subscription.kind of ContentSub: let shard = node.wakuSharding.getShard((subscription.topic)).valueOr: error "Autosharding error", error = error - return + return err("Autosharding error: " & error) ($shard, some(subscription.topic)) of PubsubSub: (subscription.topic, none(ContentTopic)) else: - return + return err("Unsupported subscription type in relay subscribe") + + if node.wakuRelay.isSubscribed(pubsubTopic): + debug "already subscribed to topic", pubsubTopic + return err("Already subscribed to topic: " & $pubsubTopic) if contentTopicOp.isSome() and node.contentTopicHandlers.hasKey(contentTopicOp.get()): error "Invalid API call to `subscribe`. Was already subscribed" - return + return err("Invalid API call to `subscribe`. Was already subscribed") node.topicSubscriptionQueue.emit((kind: PubsubSub, topic: pubsubTopic)) node.registerRelayDefaultHandler(pubsubTopic) @@ -335,43 +339,49 @@ proc subscribe*( if contentTopicOp.isSome(): node.contentTopicHandlers[contentTopicOp.get()] = wrappedHandler -proc unsubscribe*(node: WakuNode, subscription: SubscriptionEvent) = + return ok() + +proc unsubscribe*( + node: WakuNode, subscription: SubscriptionEvent +): Result[void, string] = ## Unsubscribes from a specific PubSub or Content topic. if node.wakuRelay.isNil(): error "Invalid API call to `unsubscribe`. WakuRelay not mounted." - return + return err("Invalid API call to `unsubscribe`. WakuRelay not mounted.") let (pubsubTopic, contentTopicOp) = case subscription.kind of ContentUnsub: let shard = node.wakuSharding.getShard((subscription.topic)).valueOr: error "Autosharding error", error = error - return + return err("Autosharding error: " & error) ($shard, some(subscription.topic)) of PubsubUnsub: (subscription.topic, none(ContentTopic)) else: - return + return err("Unsupported subscription type in relay unsubscribe") if not node.wakuRelay.isSubscribed(pubsubTopic): - error "Invalid API call to `unsubscribe`. Was not subscribed" + error "Invalid API call to `unsubscribe`. Was not subscribed", pubsubTopic return + err("Invalid API call to `unsubscribe`. Was not subscribed to: " & $pubsubTopic) if contentTopicOp.isSome(): # Remove this handler only var handler: TopicHandler + ## TODO: refactor this part. I think we can simplify it if node.contentTopicHandlers.pop(contentTopicOp.get(), handler): debug "unsubscribe", contentTopic = contentTopicOp.get() - node.wakuRelay.unsubscribe(pubsubTopic, handler) - - if contentTopicOp.isNone() or node.wakuRelay.topics.getOrDefault(pubsubTopic).len == 1: - # Remove all handlers + node.wakuRelay.unsubscribe(pubsubTopic) + else: debug "unsubscribe", pubsubTopic = pubsubTopic - node.wakuRelay.unsubscribeAll(pubsubTopic) + node.wakuRelay.unsubscribe(pubsubTopic) node.topicSubscriptionQueue.emit((kind: PubsubUnsub, topic: pubsubTopic)) + return ok() + proc publish*( node: WakuNode, pubsubTopicOp: Option[PubsubTopic], message: WakuMessage ): Future[Result[void, string]] {.async, gcsafe.} = @@ -433,20 +443,17 @@ proc mountRelay*( shards: seq[RelayShard] = @[], peerExchangeHandler = none(RoutingRecordsHandler), maxMessageSize = int(DefaultMaxWakuMessageSize), -) {.async, gcsafe.} = +): Future[Result[void, string]] {.async.} = if not node.wakuRelay.isNil(): error "wakuRelay already mounted, skipping" - return + return err("wakuRelay already mounted, skipping") ## The default relay topics is the union of all configured topics plus default PubsubTopic(s) info "mounting relay protocol" - let initRes = WakuRelay.new(node.switch, maxMessageSize) - if initRes.isErr(): - error "failed mounting relay protocol", error = initRes.error - return - - node.wakuRelay = initRes.value + node.wakuRelay = WakuRelay.new(node.switch, maxMessageSize).valueOr: + error "failed mounting relay protocol", error = error + return err("failed mounting relay protocol: " & error) ## Add peer exchange handler if peerExchangeHandler.isSome(): @@ -459,11 +466,17 @@ proc mountRelay*( node.switch.mount(node.wakuRelay, protocolMatcher(WakuRelayCodec)) - info "relay mounted successfully", shards = shards + ## Make sure we don't have duplicates + let uniqueShards = deduplicate(shards) # Subscribe to shards - for shard in shards: - node.subscribe((kind: PubsubSub, topic: $shard)) + for shard in uniqueShards: + node.subscribe((kind: PubsubSub, topic: $shard)).isOkOr: + error "failed to subscribe to shard", error = error + return err("failed to subscribe to shard in mountRelay: " & error) + + info "relay mounted successfully", shards = uniqueShards + return ok() ## Waku filter @@ -1218,6 +1231,7 @@ proc mountRlnRelay*( raise newException(CatchableError, "failed to mount WakuRlnRelay: " & rlnRelayRes.error) let rlnRelay = rlnRelayRes.get() + if (rlnConf.rlnRelayUserMessageLimit > rlnRelay.groupManager.rlnRelayMaxMessageLimit): error "rln-relay-user-message-limit can't exceed the MAX_MESSAGE_LIMIT in the rln contract" let validator = generateRlnValidator(rlnRelay, spamHandler) diff --git a/waku/waku_api/rest/builder.nim b/waku/waku_api/rest/builder.nim index cb324075a..747835fc8 100644 --- a/waku/waku_api/rest/builder.nim +++ b/waku/waku_api/rest/builder.nim @@ -18,7 +18,8 @@ import waku/waku_api/rest/legacy_store/handlers as rest_store_legacy_api, waku/waku_api/rest/health/handlers as rest_health_api, waku/waku_api/rest/admin/handlers as rest_admin_api, - waku/waku_core/topics + waku/waku_core/topics, + waku/waku_relay/protocol ## Monitoring and external interfaces @@ -129,18 +130,31 @@ proc startRestServerProtocolSupport*( ## Relay REST API if conf.relay: + ## This MessageCache is used, f.e., in js-waku<>nwaku interop tests. + ## js-waku tests asks nwaku-docker through REST whether a message is properly received. let cache = MessageCache.init(int(conf.restRelayCacheCapacity)) - let handler = messageCacheHandler(cache) + let handler: WakuRelayHandler = messageCacheHandler(cache) for shard in conf.shards: let pubsubTopic = $RelayShard(clusterId: conf.clusterId, shardId: shard) cache.pubsubSubscribe(pubsubTopic) - node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(handler)) + + ## TODO: remove this line. use observer-observable pattern + ## within waku_node::registerRelayDefaultHandler + discard node.wakuRelay.subscribe(pubsubTopic, handler) for contentTopic in conf.contentTopics: cache.contentSubscribe(contentTopic) - node.subscribe((kind: ContentSub, topic: contentTopic), some(handler)) + + let shard = node.wakuSharding.getShard(contentTopic).valueOr: + error "Autosharding error in REST", error = error + continue + let pubsubTopic = $shard + + ## TODO: remove this line. use observer-observable pattern + ## within waku_node::registerRelayDefaultHandler + discard node.wakuRelay.subscribe(pubsubTopic, handler) installRelayApiHandlers(router, node, cache) else: diff --git a/waku/waku_api/rest/relay/handlers.nim b/waku/waku_api/rest/relay/handlers.nim index 7ee0ee7e3..252375208 100644 --- a/waku/waku_api/rest/relay/handlers.nim +++ b/waku/waku_api/rest/relay/handlers.nim @@ -66,9 +66,13 @@ proc installRelayApiHandlers*( for pubsubTopic in newTopics: cache.pubsubSubscribe(pubsubTopic) + node.subscribe( (kind: PubsubSub, topic: pubsubTopic), some(messageCacheHandler(cache)) - ) + ).isOkOr: + let errorMsg = "Subscribe failed:" & $error + error "SUBSCRIBE failed", error = errorMsg + return RestApiResponse.internalServerError(errorMsg) return RestApiResponse.ok() @@ -88,7 +92,10 @@ proc installRelayApiHandlers*( # Unsubscribe all handlers from requested topics for pubsubTopic in req: cache.pubsubUnsubscribe(pubsubTopic) - node.unsubscribe((kind: PubsubUnsub, topic: pubsubTopic)) + node.unsubscribe((kind: PubsubUnsub, topic: pubsubTopic)).isOkOr: + let errorMsg = "Unsubscribe failed:" & $error + error "UNSUBSCRIBE failed", error = errorMsg + return RestApiResponse.internalServerError(errorMsg) # Successfully unsubscribed from all requested topics return RestApiResponse.ok() @@ -193,9 +200,13 @@ proc installRelayApiHandlers*( for contentTopic in newTopics: cache.contentSubscribe(contentTopic) + node.subscribe( (kind: ContentSub, topic: contentTopic), some(messageCacheHandler(cache)) - ) + ).isOkOr: + let errorMsg = "Subscribe failed:" & $error + error "SUBSCRIBE failed", error = errorMsg + return RestApiResponse.internalServerError(errorMsg) return RestApiResponse.ok() @@ -211,7 +222,10 @@ proc installRelayApiHandlers*( for contentTopic in req: cache.contentUnsubscribe(contentTopic) - node.unsubscribe((kind: ContentUnsub, topic: contentTopic)) + node.unsubscribe((kind: ContentUnsub, topic: contentTopic)).isOkOr: + let errorMsg = "Unsubscribe failed:" & $error + error "UNSUBSCRIBE failed", error = errorMsg + return RestApiResponse.internalServerError(errorMsg) return RestApiResponse.ok() diff --git a/waku/waku_relay/protocol.nim b/waku/waku_relay/protocol.nim index 4eeaf4607..daaf056b7 100644 --- a/waku/waku_relay/protocol.nim +++ b/waku/waku_relay/protocol.nim @@ -129,7 +129,8 @@ type # the second entry contains the error messages to be returned when the validator fails wakuValidators: seq[tuple[handler: WakuValidatorHandler, errorMessage: string]] # a map of validators to error messages to return when validation fails - validatorInserted: Table[PubsubTopic, bool] + topicValidator: Table[PubsubTopic, ValidatorHandler] + # map topic with its assigned validator within pubsub publishObservers: seq[PublishObserver] topicsHealth*: Table[string, TopicHealth] onTopicHealthChange*: TopicHealthChangeHandler @@ -427,7 +428,7 @@ proc isSubscribed*(w: WakuRelay, topic: PubsubTopic): bool = proc subscribedTopics*(w: WakuRelay): seq[PubsubTopic] = return toSeq(GossipSub(w).topics.keys()) -proc generateOrderedValidator(w: WakuRelay): auto {.gcsafe.} = +proc generateOrderedValidator(w: WakuRelay): ValidatorHandler {.gcsafe.} = # rejects messages that are not WakuMessage let wrappedValidator = proc( pubsubTopic: string, message: messages.Message @@ -516,9 +517,10 @@ proc subscribe*( # Add the ordered validator to the topic # This assumes that if `w.validatorInserted.hasKey(pubSubTopic) is true`, it contains the ordered validator. # Otherwise this might lead to unintended behaviour. - if not w.validatorInserted.hasKey(pubSubTopic): + if not w.topicValidator.hasKey(pubSubTopic): + let newValidator = w.generateOrderedValidator() procCall GossipSub(w).addValidator(pubSubTopic, w.generateOrderedValidator()) - w.validatorInserted[pubSubTopic] = true + w.topicValidator[pubSubTopic] = newValidator # set this topic parameters for scoring w.topicParams[pubsubTopic] = TopicParameters @@ -534,14 +536,36 @@ proc unsubscribeAll*(w: WakuRelay, pubsubTopic: PubsubTopic) = debug "unsubscribe all", pubsubTopic = pubsubTopic procCall GossipSub(w).unsubscribeAll(pubsubTopic) - w.validatorInserted.del(pubsubTopic) + w.topicValidator.del(pubsubTopic) -proc unsubscribe*(w: WakuRelay, pubsubTopic: PubsubTopic, handler: TopicHandler) = - ## Unsubscribe this handler on this pubsub topic +proc unsubscribe*(w: WakuRelay, pubsubTopic: PubsubTopic) = + if not w.topicValidator.hasKey(pubsubTopic): + error "unsubscribe no validator for this topic", pubsubTopic + return - debug "unsubscribe", pubsubTopic = pubsubTopic + if pubsubtopic notin Pubsub(w).topics: + error "not subscribed to the given topic", pubsubTopic + return - procCall GossipSub(w).unsubscribe(pubsubTopic, handler) + var topicHandlerSeq: seq[TopicHandler] + var topicValidator: ValidatorHandler + try: + topicHandlerSeq = Pubsub(w).topics[pubsubTopic] + if topicHandlerSeq.len == 0: + error "unsubscribe no handler for this topic", pubsubTopic + return + topicValidator = w.topicValidator[pubsubTopic] + except KeyError: + error "exception in unsubscribe", pubsubTopic, error = getCurrentExceptionMsg() + return + + let topicHandler = topicHandlerSeq[0] + + debug "unsubscribe", pubsubTopic + procCall GossipSub(w).unsubscribe($pubsubTopic, topicHandler) + ## TODO: uncomment the following line when https://github.com/vacp2p/nim-libp2p/pull/1356 + ## is available in a nim-libp2p release. + # procCall GossipSub(w).removeValidator(pubsubTopic, topicValidator) proc publish*( w: WakuRelay, pubsubTopic: PubsubTopic, wakuMessage: WakuMessage @@ -624,7 +648,4 @@ proc getNumConnectedPeers*( proc getSubscribedTopics*(w: WakuRelay): seq[PubsubTopic] = ## Returns a seq containing the current list of subscribed topics - var topics: seq[PubsubTopic] - for t in w.validatorInserted.keys(): - topics.add(t) - return topics + return PubSub(w).topics.keys.toSeq().mapIt(cast[PubsubTopic](it)) From cc66c7fe782ad60e273f5bfb29e850f8a9181d37 Mon Sep 17 00:00:00 2001 From: fryorcraken <110212804+fryorcraken@users.noreply.github.com> Date: Thu, 8 May 2025 07:05:35 +1000 Subject: [PATCH 38/48] chore!: separate internal and CLI configurations (#3357) Split `WakuNodeConfig` object for better separation of concerns and to introduce a tree-like structure to configuration. * fix: ensure twn cluster conf is still applied when clusterId=1 * test: remove usage of `WakuNodeConf` * Remove macro, split builder files, remove wakunodeconf from tests * rm network_conf_builder module as it is not used --------- Co-authored-by: NagyZoltanPeter <113987313+NagyZoltanPeter@users.noreply.github.com> Co-authored-by: Ivan Folgueira Bande --- apps/chat2/chat2.nim | 21 +- apps/chat2/config_chat2.nim | 13 + .../liteprotocoltester/liteprotocoltester.nim | 2 +- apps/networkmonitor/networkmonitor.nim | 17 +- apps/wakucanary/wakucanary.nim | 2 +- apps/wakunode2/wakunode2.nim | 57 +- examples/wakustealthcommitments/node_spec.nim | 1 - .../requests/node_lifecycle_request.nim | 6 +- tests/all_tests_waku.nim | 2 +- tests/factory/test_config.nim | 157 ----- tests/factory/test_external_config.nim | 208 ++++++ tests/factory/test_node_factory.nim | 21 +- tests/factory/test_waku_conf.nim | 272 ++++++++ tests/node/test_wakunode_legacy_lightpush.nim | 10 +- tests/node/test_wakunode_lightpush.nim | 10 +- tests/node/test_wakunode_relay_rln.nim | 29 +- tests/test_waku_netconfig.nim | 150 ++-- tests/testlib/wakucore.nim | 1 - tests/testlib/wakunode.nim | 58 +- tests/waku_discv5/test_waku_discv5.nim | 104 ++- tests/waku_relay/utils.nim | 8 +- tests/waku_rln_relay/test_waku_rln_relay.nim | 40 +- .../test_wakunode_rln_relay.nim | 110 +-- tests/waku_rln_relay/utils_static.nim | 8 +- tests/wakunode2/test_app.nim | 23 +- tests/wakunode_rest/test_rest_health.nim | 8 +- tests/wakunode_rest/test_rest_relay.nim | 50 +- tools/rln_db_inspector/rln_db_inspector.nim | 8 +- .../rln_keystore_generator.nim | 38 +- waku/common/utils/nat.nim | 3 +- waku/discovery/waku_discv5.nim | 35 +- waku/factory/builder.nim | 2 +- waku/factory/conf_builder/conf_builder.nim | 17 + .../conf_builder/discv5_conf_builder.nim | 65 ++ .../dns_discovery_conf_builder.nim | 38 + .../filter_service_conf_builder.nim | 45 ++ .../metrics_server_conf_builder.nim | 47 ++ .../conf_builder/rest_server_conf_builder.nim | 64 ++ .../conf_builder/rln_relay_conf_builder.nim | 104 +++ .../store_service_conf_builder.nim | 74 ++ .../conf_builder/store_sync_conf_builder.nim | 51 ++ .../conf_builder/waku_conf_builder.nim | 649 ++++++++++++++++++ .../conf_builder/web_socket_conf_builder.nim | 68 ++ waku/factory/external_config.nim | 251 +++++-- waku/factory/internal_config.nim | 165 ++--- waku/factory/network_conf.nim | 34 + waku/factory/networks_config.nim | 8 +- waku/factory/node_factory.nim | 170 ++--- waku/factory/validator_signed.nim | 2 +- waku/factory/waku.nim | 160 ++--- waku/factory/waku_conf.nim | 249 +++++++ waku/node/{config.nim => net_config.nim} | 20 +- waku/node/waku_metrics.nim | 22 +- waku/node/waku_node.nim | 11 +- waku/utils/collector.nim | 10 + waku/waku_api/rest/builder.nim | 50 +- waku/waku_api/rest/server.nim | 2 +- waku/waku_node.nim | 4 +- waku/waku_rln_relay/rln_relay.nim | 63 +- 59 files changed, 2984 insertions(+), 933 deletions(-) delete mode 100644 tests/factory/test_config.nim create mode 100644 tests/factory/test_external_config.nim create mode 100644 tests/factory/test_waku_conf.nim create mode 100644 waku/factory/conf_builder/conf_builder.nim create mode 100644 waku/factory/conf_builder/discv5_conf_builder.nim create mode 100644 waku/factory/conf_builder/dns_discovery_conf_builder.nim create mode 100644 waku/factory/conf_builder/filter_service_conf_builder.nim create mode 100644 waku/factory/conf_builder/metrics_server_conf_builder.nim create mode 100644 waku/factory/conf_builder/rest_server_conf_builder.nim create mode 100644 waku/factory/conf_builder/rln_relay_conf_builder.nim create mode 100644 waku/factory/conf_builder/store_service_conf_builder.nim create mode 100644 waku/factory/conf_builder/store_sync_conf_builder.nim create mode 100644 waku/factory/conf_builder/waku_conf_builder.nim create mode 100644 waku/factory/conf_builder/web_socket_conf_builder.nim create mode 100644 waku/factory/network_conf.nim create mode 100644 waku/factory/waku_conf.nim rename waku/node/{config.nim => net_config.nim} (90%) diff --git a/apps/chat2/chat2.nim b/apps/chat2/chat2.nim index b28724357..c25ce86d4 100644 --- a/apps/chat2/chat2.nim +++ b/apps/chat2/chat2.nim @@ -557,14 +557,19 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = echo "rln-relay preparation is in progress..." let rlnConf = WakuRlnConfig( - rlnRelayDynamic: conf.rlnRelayDynamic, - rlnRelayCredIndex: conf.rlnRelayCredIndex, - rlnRelayEthContractAddress: conf.rlnRelayEthContractAddress, - rlnRelayEthClientAddress: string(conf.rlnRelayethClientAddress), - rlnRelayCredPath: conf.rlnRelayCredPath, - rlnRelayCredPassword: conf.rlnRelayCredPassword, - rlnRelayUserMessageLimit: conf.rlnRelayUserMessageLimit, - rlnEpochSizeSec: conf.rlnEpochSizeSec, + dynamic: conf.rlnRelayDynamic, + credIndex: conf.rlnRelayCredIndex, + chainId: conf.rlnRelayChainId, + ethContractAddress: conf.rlnRelayEthContractAddress, + ethClientAddress: string(conf.rlnRelayethClientAddress), + creds: some( + RlnRelayCreds( + path: conf.rlnRelayCredPath, password: conf.rlnRelayCredPassword + ) + ), + userMessageLimit: conf.rlnRelayUserMessageLimit, + epochSizeSec: conf.rlnEpochSizeSec, + treePath: conf.rlnRelayTreePath, ) waitFor node.mountRlnRelay(rlnConf, spamHandler = some(spamHandler)) diff --git a/apps/chat2/config_chat2.nim b/apps/chat2/config_chat2.nim index 4bdc0d586..830222cd9 100644 --- a/apps/chat2/config_chat2.nim +++ b/apps/chat2/config_chat2.nim @@ -213,6 +213,13 @@ type name: "rln-relay" .}: bool + rlnRelayChainId* {. + desc: + "Chain ID of the provided contract (optional, will fetch from RPC provider if not used)", + defaultValue: 0, + name: "rln-relay-chain-id" + .}: uint + rlnRelayCredPath* {. desc: "The path for peristing rln-relay credential", defaultValue: "", @@ -273,6 +280,12 @@ type name: "rln-relay-epoch-sec" .}: uint64 + rlnRelayTreePath* {. + desc: "Path to the RLN merkle tree sled db (https://github.com/spacejam/sled)", + defaultValue: "", + name: "rln-relay-tree-path" + .}: string + # NOTE: Keys are different in nim-libp2p proc parseCmdArg*(T: type crypto.PrivateKey, p: string): T = try: diff --git a/apps/liteprotocoltester/liteprotocoltester.nim b/apps/liteprotocoltester/liteprotocoltester.nim index ef63e6e7d..58f6bd2e3 100644 --- a/apps/liteprotocoltester/liteprotocoltester.nim +++ b/apps/liteprotocoltester/liteprotocoltester.nim @@ -127,7 +127,7 @@ when isMainModule: nodeHealthMonitor = WakuNodeHealthMonitor() nodeHealthMonitor.setOverallHealth(HealthStatus.INITIALIZING) - let restServer = rest_server_builder.startRestServerEsentials( + let restServer = rest_server_builder.startRestServerEssentials( nodeHealthMonitor, wakuConf ).valueOr: error "Starting esential REST server failed.", error = $error diff --git a/apps/networkmonitor/networkmonitor.nim b/apps/networkmonitor/networkmonitor.nim index d5d7e6bcf..c8b8fe092 100644 --- a/apps/networkmonitor/networkmonitor.nim +++ b/apps/networkmonitor/networkmonitor.nim @@ -462,7 +462,7 @@ proc initAndStartApp( nodeBuilder.withNodeKey(key) nodeBuilder.withRecord(record) - nodeBUilder.withSwitchConfiguration(maxConnections = some(MaxConnectedPeers)) + nodeBuilder.withSwitchConfiguration(maxConnections = some(MaxConnectedPeers)) nodeBuilder.withPeerManagerConfig( maxConnections = MaxConnectedPeers, @@ -635,14 +635,13 @@ when isMainModule: if conf.rlnRelay and conf.rlnRelayEthContractAddress != "": let rlnConf = WakuRlnConfig( - rlnRelayDynamic: conf.rlnRelayDynamic, - rlnRelayCredIndex: some(uint(0)), - rlnRelayEthContractAddress: conf.rlnRelayEthContractAddress, - rlnRelayEthClientAddress: string(conf.rlnRelayethClientAddress), - rlnRelayCredPath: "", - rlnRelayCredPassword: "", - rlnRelayTreePath: conf.rlnRelayTreePath, - rlnEpochSizeSec: conf.rlnEpochSizeSec, + dynamic: conf.rlnRelayDynamic, + credIndex: some(uint(0)), + ethContractAddress: conf.rlnRelayEthContractAddress, + ethClientAddress: string(conf.rlnRelayethClientAddress), + treePath: conf.rlnRelayTreePath, + epochSizeSec: conf.rlnEpochSizeSec, + creds: none(RlnRelayCreds), onFatalErrorAction: onFatalErrorAction, ) diff --git a/apps/wakucanary/wakucanary.nim b/apps/wakucanary/wakucanary.nim index ea5220248..eec889eac 100644 --- a/apps/wakucanary/wakucanary.nim +++ b/apps/wakucanary/wakucanary.nim @@ -195,7 +195,7 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} = let netConfig = NetConfig.init( bindIp = bindIp, bindPort = nodeTcpPort, - wsBindPort = wsBindPort, + wsBindPort = some(wsBindPort), wsEnabled = isWs, wssEnabled = isWss, ) diff --git a/apps/wakunode2/wakunode2.nim b/apps/wakunode2/wakunode2.nim index b6e94c747..a99cfcb52 100644 --- a/apps/wakunode2/wakunode2.nim +++ b/apps/wakunode2/wakunode2.nim @@ -38,17 +38,19 @@ when isMainModule: const versionString = "version / git commit hash: " & waku.git_version - var conf = WakuNodeConf.load(version = versionString).valueOr: + var wakuNodeConf = WakuNodeConf.load(version = versionString).valueOr: error "failure while loading the configuration", error = error quit(QuitFailure) - ## Also called within Waku.new. The call to startRestServerEsentials needs the following line - logging.setupLog(conf.logLevel, conf.logFormat) + ## Also called within Waku.new. The call to startRestServerEssentials needs the following line + logging.setupLog(wakuNodeConf.logLevel, wakuNodeConf.logFormat) - case conf.cmd + case wakuNodeConf.cmd of generateRlnKeystore: + let conf = wakuNodeConf.toKeystoreGeneratorConf() doRlnKeystoreGenerator(conf) of inspectRlnDb: + let conf = wakuNodeConf.toInspectRlnDbConf() doInspectRlnDb(conf) of noCommand: # NOTE: {.threadvar.} is used to make the global variable GC safe for the closure uses it @@ -58,15 +60,20 @@ when isMainModule: nodeHealthMonitor = WakuNodeHealthMonitor() nodeHealthMonitor.setOverallHealth(HealthStatus.INITIALIZING) - var confCopy = conf - - let restServer = rest_server_builder.startRestServerEsentials( - nodeHealthMonitor, confCopy - ).valueOr: - error "Starting esential REST server failed.", error = $error + let conf = wakuNodeConf.toWakuConf().valueOr: + error "Waku configuration failed", error = error quit(QuitFailure) - var waku = Waku.new(confCopy).valueOr: + var restServer: WakuRestServerRef = nil + + if conf.restServerConf.isSome(): + restServer = rest_server_builder.startRestServerEssentials( + nodeHealthMonitor, conf.restServerConf.get(), conf.portsShift + ).valueOr: + error "Starting essential REST server failed.", error = $error + quit(QuitFailure) + + var waku = Waku.new(conf).valueOr: error "Waku initialization failed", error = error quit(QuitFailure) @@ -78,15 +85,27 @@ when isMainModule: error "Starting waku failed", error = error quit(QuitFailure) - rest_server_builder.startRestServerProtocolSupport( - restServer, waku.node, waku.wakuDiscv5, confCopy - ).isOkOr: - error "Starting protocols support REST server failed.", error = $error - quit(QuitFailure) + if conf.restServerConf.isSome(): + rest_server_builder.startRestServerProtocolSupport( + restServer, + waku.node, + waku.wakuDiscv5, + conf.restServerConf.get(), + conf.relay, + conf.lightPush, + conf.clusterId, + conf.shards, + conf.contentTopics, + ).isOkOr: + error "Starting protocols support REST server failed.", error = $error + quit(QuitFailure) - waku.metricsServer = waku_metrics.startMetricsServerAndLogging(confCopy).valueOr: - error "Starting monitoring and external interfaces failed", error = error - quit(QuitFailure) + if conf.metricsServerConf.isSome(): + waku.metricsServer = waku_metrics.startMetricsServerAndLogging( + conf.metricsServerConf.get(), conf.portsShift + ).valueOr: + error "Starting monitoring and external interfaces failed", error = error + quit(QuitFailure) nodeHealthMonitor.setOverallHealth(HealthStatus.READY) diff --git a/examples/wakustealthcommitments/node_spec.nim b/examples/wakustealthcommitments/node_spec.nim index dbab8a3b2..b5dafb0be 100644 --- a/examples/wakustealthcommitments/node_spec.nim +++ b/examples/wakustealthcommitments/node_spec.nim @@ -36,7 +36,6 @@ proc setup*(): Waku = conf.clusterId = twnClusterConf.clusterId conf.rlnRelayEthContractAddress = twnClusterConf.rlnRelayEthContractAddress conf.rlnRelayDynamic = twnClusterConf.rlnRelayDynamic - conf.rlnRelayBandwidthThreshold = twnClusterConf.rlnRelayBandwidthThreshold conf.discv5Discovery = twnClusterConf.discv5Discovery conf.discv5BootstrapNodes = conf.discv5BootstrapNodes & twnClusterConf.discv5BootstrapNodes diff --git a/library/waku_thread/inter_thread_communication/requests/node_lifecycle_request.nim b/library/waku_thread/inter_thread_communication/requests/node_lifecycle_request.nim index 9bd0017ab..8a874b681 100644 --- a/library/waku_thread/inter_thread_communication/requests/node_lifecycle_request.nim +++ b/library/waku_thread/inter_thread_communication/requests/node_lifecycle_request.nim @@ -72,7 +72,11 @@ proc createWaku( appCallbacks.relayHandler = nil appCallbacks.topicHealthChangeHandler = nil - let wakuRes = Waku.new(conf, appCallbacks).valueOr: + # TODO: Convert `confJson` directly to `WakuConf` + let wakuConf = conf.toWakuConf().valueOr: + return err("Configuration error: " & $error) + + let wakuRes = Waku.new(wakuConf, appCallbacks).valueOr: error "waku initialization failed", error = error return err("Failed setting up Waku: " & $error) diff --git a/tests/all_tests_waku.nim b/tests/all_tests_waku.nim index 20da29fe2..07e0cd895 100644 --- a/tests/all_tests_waku.nim +++ b/tests/all_tests_waku.nim @@ -108,4 +108,4 @@ import import ./waku_rln_relay/test_all # Node Factory -import ./factory/test_config +import ./factory/test_external_config diff --git a/tests/factory/test_config.nim b/tests/factory/test_config.nim deleted file mode 100644 index 1d8bf6e37..000000000 --- a/tests/factory/test_config.nim +++ /dev/null @@ -1,157 +0,0 @@ -{.used.} - -import - std/options, - testutils/unittests, - chronos, - libp2p/crypto/[crypto, secp], - libp2p/multiaddress, - nimcrypto/utils, - secp256k1, - confutils -import - ../../waku/factory/external_config, - ../../waku/factory/internal_config, - ../../waku/factory/networks_config, - ../../waku/common/logging - -suite "Waku config - apply preset": - test "Default preset is TWN": - ## Setup - let expectedConf = ClusterConf.TheWakuNetworkConf() - - ## Given - let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn") - - ## When - let res = applyPresetConfiguration(preConfig) - assert res.isOk(), $res.error - - ## Then - let conf = res.get() - assert conf.maxMessageSize == expectedConf.maxMessageSize - assert conf.clusterId == expectedConf.clusterId - assert conf.rlnRelay == expectedConf.rlnRelay - assert conf.rlnRelayEthContractAddress == expectedConf.rlnRelayEthContractAddress - assert conf.rlnRelayDynamic == expectedConf.rlnRelayDynamic - assert conf.rlnRelayChainId == expectedConf.rlnRelayChainId - assert conf.rlnRelayBandwidthThreshold == expectedConf.rlnRelayBandwidthThreshold - assert conf.rlnEpochSizeSec == expectedConf.rlnEpochSizeSec - assert conf.rlnRelayUserMessageLimit == expectedConf.rlnRelayUserMessageLimit - assert conf.numShardsInNetwork == expectedConf.numShardsInNetwork - assert conf.discv5BootstrapNodes == expectedConf.discv5BootstrapNodes - - test "Subscribes to all valid shards in twn": - ## Setup - let expectedConf = ClusterConf.TheWakuNetworkConf() - - ## Given - let shards: seq[uint16] = @[0, 1, 2, 3, 4, 5, 6, 7] - let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards) - - ## When - let res = applyPresetConfiguration(preConfig) - assert res.isOk(), $res.error - - ## Then - let conf = res.get() - assert conf.shards.len == expectedConf.numShardsInNetwork.int - - test "Subscribes to some valid shards in twn": - ## Setup - let expectedConf = ClusterConf.TheWakuNetworkConf() - - ## Given - let shards: seq[uint16] = @[0, 4, 7] - let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards) - - ## When - let resConf = applyPresetConfiguration(preConfig) - let res = validateShards(resConf.get()) - assert res.isOk(), $res.error - - ## Then - let conf = resConf.get() - assert conf.shards.len() == shards.len() - for index, shard in shards: - assert shard in conf.shards - - test "Subscribes to invalid shards in twn": - ## Setup - - ## Given - let shards: seq[uint16] = @[0, 4, 7, 10] - let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards) - let postConfig = applyPresetConfiguration(preConfig) - - ## When - let res = validateShards(postConfig.get()) - - ## Then - assert res.isErr(), "Invalid shard was accepted" - -suite "Waku config - node key": - test "Passed node key is used": - ## Setup - let nodeKeyStr = - "0011223344556677889900aabbccddeeff0011223344556677889900aabbccddeeff" - let nodekey = block: - let key = SkPrivateKey.init(utils.fromHex(nodeKeyStr)).tryGet() - crypto.PrivateKey(scheme: Secp256k1, skkey: key) - - ## Given - let config = WakuNodeConf.load(version = "", cmdLine = @["--nodekey=" & nodeKeyStr]) - - ## When - let res = getNodeKey(config) - assert res.isOk(), $res.error - - ## Then - let resKey = res.get() - assert utils.toHex(resKey.getRawBytes().get()) == - utils.toHex(nodekey.getRawBytes().get()) - -suite "Waku config - Shards": - test "Shards are valid": - ## Setup - - ## Given - let shards: seq[uint16] = @[0, 2, 4] - let numShardsInNetwork = 5.uint32 - let config = WakuNodeConf( - cmd: noCommand, shards: shards, numShardsInNetwork: numShardsInNetwork - ) - - ## When - let res = validateShards(config) - - ## Then - assert res.isOk(), $res.error - - test "Shards are not in range": - ## Setup - - ## Given - let shards: seq[uint16] = @[0, 2, 5] - let numShardsInNetwork = 5.uint32 - let config = WakuNodeConf( - cmd: noCommand, shards: shards, numShardsInNetwork: numShardsInNetwork - ) - - ## When - let res = validateShards(config) - - ## Then - assert res.isErr(), "Invalid shard was accepted" - - test "Shard is passed without num shards": - ## Setup - - ## Given - let config = WakuNodeConf.load(version = "", cmdLine = @["--shard=32"]) - - ## When - let res = validateShards(config) - - ## Then - assert res.isOk(), $res.error diff --git a/tests/factory/test_external_config.nim b/tests/factory/test_external_config.nim new file mode 100644 index 000000000..1caeb6e7b --- /dev/null +++ b/tests/factory/test_external_config.nim @@ -0,0 +1,208 @@ +{.used.} + +import + std/options, + testutils/unittests, + chronos, + libp2p/crypto/[crypto, secp], + libp2p/multiaddress, + nimcrypto/utils, + secp256k1, + confutils +import + ../../waku/factory/external_config, + ../../waku/factory/networks_config, + ../../waku/factory/waku_conf, + ../../waku/common/logging, + ../../waku/common/utils/parse_size_units + +suite "Waku config - apply preset": + test "Default preset is TWN": + ## Setup + let expectedConf = ClusterConf.TheWakuNetworkConf() + + ## Given + let preConfig = WakuNodeConf( + cmd: noCommand, + preset: "twn", + relay: true, + rlnRelayEthClientAddress: "http://someaddress".EthRpcUrl, + rlnRelayTreePath: "/tmp/sometreepath", + ) + + ## When + let res = preConfig.toWakuConf() + assert res.isOk(), $res.error + + ## Then + let conf = res.get() + check conf.maxMessageSizeBytes == + uint64(parseCorrectMsgSize(expectedConf.maxMessageSize)) + check conf.clusterId == expectedConf.clusterId + check conf.rlnRelayConf.isSome() == expectedConf.rlnRelay + if conf.rlnRelayConf.isSome(): + let rlnRelayConf = conf.rlnRelayConf.get() + check rlnRelayConf.ethContractAddress == expectedConf.rlnRelayEthContractAddress + check rlnRelayConf.dynamic == expectedConf.rlnRelayDynamic + check rlnRelayConf.chainId == expectedConf.rlnRelayChainId + check rlnRelayConf.epochSizeSec == expectedConf.rlnEpochSizeSec + check rlnRelayConf.userMessageLimit == expectedConf.rlnRelayUserMessageLimit + check conf.numShardsInNetwork == expectedConf.numShardsInNetwork + check conf.discv5Conf.isSome() == expectedConf.discv5Discovery + if conf.discv5Conf.isSome(): + let discv5Conf = conf.discv5Conf.get() + check discv5Conf.bootstrapNodes == expectedConf.discv5BootstrapNodes + + test "Subscribes to all valid shards in twn": + ## Setup + let expectedConf = ClusterConf.TheWakuNetworkConf() + + ## Given + let shards: seq[uint16] = @[0, 1, 2, 3, 4, 5, 6, 7] + let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards) + + ## When + let res = preConfig.toWakuConf() + assert res.isOk(), $res.error + + ## Then + let conf = res.get() + check conf.shards.len == expectedConf.numShardsInNetwork.int + + test "Subscribes to some valid shards in twn": + ## Setup + let expectedConf = ClusterConf.TheWakuNetworkConf() + + ## Given + let shards: seq[uint16] = @[0, 4, 7] + let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards) + + ## When + let resConf = preConfig.toWakuConf() + assert resConf.isOk(), $resConf.error + + ## Then + let conf = resConf.get() + assert conf.shards.len() == shards.len() + for index, shard in shards: + assert shard in conf.shards + + test "Subscribes to invalid shards in twn": + ## Setup + + ## Given + let shards: seq[uint16] = @[0, 4, 7, 10] + let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards) + + ## When + let res = preConfig.toWakuConf() + + ## Then + assert res.isErr(), "Invalid shard was accepted" + + test "Apply TWN preset when cluster id = 1": + ## Setup + let expectedConf = ClusterConf.TheWakuNetworkConf() + + ## Given + let preConfig = WakuNodeConf( + cmd: noCommand, + clusterId: 1.uint16, + relay: true, + rlnRelayEthClientAddress: "http://someaddress".EthRpcUrl, + rlnRelayTreePath: "/tmp/sometreepath", + ) + + ## When + let res = preConfig.toWakuConf() + assert res.isOk(), $res.error + + ## Then + let conf = res.get() + check conf.maxMessageSizeBytes == + uint64(parseCorrectMsgSize(expectedConf.maxMessageSize)) + check conf.clusterId == expectedConf.clusterId + check conf.rlnRelayConf.isSome() == expectedConf.rlnRelay + if conf.rlnRelayConf.isSome(): + let rlnRelayConf = conf.rlnRelayConf.get() + check rlnRelayConf.ethContractAddress == expectedConf.rlnRelayEthContractAddress + check rlnRelayConf.dynamic == expectedConf.rlnRelayDynamic + check rlnRelayConf.chainId == expectedConf.rlnRelayChainId + check rlnRelayConf.epochSizeSec == expectedConf.rlnEpochSizeSec + check rlnRelayConf.userMessageLimit == expectedConf.rlnRelayUserMessageLimit + check conf.numShardsInNetwork == expectedConf.numShardsInNetwork + check conf.discv5Conf.isSome() == expectedConf.discv5Discovery + if conf.discv5Conf.isSome(): + let discv5Conf = conf.discv5Conf.get() + check discv5Conf.bootstrapNodes == expectedConf.discv5BootstrapNodes + +suite "Waku config - node key": + test "Passed node key is used": + ## Setup + let nodeKeyStr = + "0011223344556677889900aabbccddeeff0011223344556677889900aabbccddeeff" + let nodekey = block: + let key = SkPrivateKey.init(utils.fromHex(nodeKeyStr)).tryGet() + crypto.PrivateKey(scheme: Secp256k1, skkey: key) + + ## Given + let config = WakuNodeConf.load(version = "", cmdLine = @["--nodekey=" & nodeKeyStr]) + + ## When + let res = config.toWakuConf() + assert res.isOk(), $res.error + + ## Then + let resKey = res.get().nodeKey + assert utils.toHex(resKey.getRawBytes().get()) == + utils.toHex(nodekey.getRawBytes().get()) + +suite "Waku config - Shards": + test "Shards are valid": + ## Setup + + ## Given + let shards: seq[uint16] = @[0, 2, 4] + let numShardsInNetwork = 5.uint32 + let wakuNodeConf = WakuNodeConf( + cmd: noCommand, shards: shards, numShardsInNetwork: numShardsInNetwork + ) + + ## When + let res = wakuNodeConf.toWakuConf() + assert res.isOk(), $res.error + + ## Then + let wakuConf = res.get() + let vRes = wakuConf.validate() + assert vRes.isOk(), $vRes.error + + test "Shards are not in range": + ## Setup + + ## Given + let shards: seq[uint16] = @[0, 2, 5] + let numShardsInNetwork = 5.uint32 + let wakuNodeConf = WakuNodeConf( + cmd: noCommand, shards: shards, numShardsInNetwork: numShardsInNetwork + ) + + ## When + let res = wakuNodeConf.toWakuConf() + + ## Then + assert res.isErr(), "Invalid shard was accepted" + + test "Shard is passed without num shards": + ## Setup + + ## Given + let wakuNodeConf = WakuNodeConf.load(version = "", cmdLine = @["--shard=32"]) + + ## When + let res = wakuNodeConf.toWakuConf() + + ## Then + let wakuConf = res.get() + let vRes = wakuConf.validate() + assert vRes.isOk(), $vRes.error diff --git a/tests/factory/test_node_factory.nim b/tests/factory/test_node_factory.nim index c575c2b81..f3d11b1a2 100644 --- a/tests/factory/test_node_factory.nim +++ b/tests/factory/test_node_factory.nim @@ -2,11 +2,15 @@ import testutils/unittests, chronos, libp2p/protocols/connectivity/relay/relay -import ../testlib/wakunode, waku/factory/node_factory, waku/waku_node +import + ../testlib/wakunode, + waku/factory/node_factory, + waku/waku_node, + waku/factory/conf_builder/conf_builder suite "Node Factory": test "Set up a node based on default configurations": - let conf = defaultTestWakuNodeConf() + let conf = defaultTestWakuConf() let node = setupNode(conf, relay = Relay.new()).valueOr: raiseAssert error @@ -20,8 +24,10 @@ suite "Node Factory": not node.wakuRendezvous.isNil() test "Set up a node with Store enabled": - var conf = defaultTestWakuNodeConf() - conf.store = true + var confBuilder = defaultTestWakuConfBuilder() + confBuilder.storeServiceConf.withEnabled(true) + confBuilder.storeServiceConf.withDbUrl("sqlite://store.sqlite3") + let conf = confBuilder.build().value let node = setupNode(conf, relay = Relay.new()).valueOr: raiseAssert error @@ -32,8 +38,9 @@ suite "Node Factory": not node.wakuArchive.isNil() test "Set up a node with Filter enabled": - var conf = defaultTestWakuNodeConf() - conf.filter = true + var confBuilder = defaultTestWakuConfBuilder() + confBuilder.filterServiceConf.withEnabled(true) + let conf = confBuilder.build().value let node = setupNode(conf, relay = Relay.new()).valueOr: raiseAssert error @@ -43,7 +50,7 @@ test "Set up a node with Filter enabled": not node.wakuFilter.isNil() test "Start a node based on default configurations": - let conf = defaultTestWakuNodeConf() + let conf = defaultTestWakuConf() let node = setupNode(conf, relay = Relay.new()).valueOr: raiseAssert error diff --git a/tests/factory/test_waku_conf.nim b/tests/factory/test_waku_conf.nim new file mode 100644 index 000000000..6b7040dd5 --- /dev/null +++ b/tests/factory/test_waku_conf.nim @@ -0,0 +1,272 @@ +{.used.} + +import + libp2p/crypto/[crypto, secp], + libp2p/multiaddress, + nimcrypto/utils, + std/[options, sequtils], + results, + testutils/unittests +import + waku/factory/waku_conf, + waku/factory/waku_conf_builder, + waku/factory/networks_config, + waku/common/utils/parse_size_units + +suite "Waku Conf - build with cluster conf": + test "Cluster Conf is passed and relay is enabled": + ## Setup + let clusterConf = ClusterConf.TheWakuNetworkConf() + var builder = WakuConfBuilder.init() + builder.discv5Conf.withUdpPort(9000) + builder.withRelayServiceRatio("50:50") + # Mount all shards in network + let expectedShards = toSeq[0.uint16 .. 7.uint16] + + ## Given + builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/") + builder.withClusterConf(clusterConf) + builder.withRelay(true) + builder.rlnRelayConf.withTreePath("/tmp/test-tree-path") + + ## When + let resConf = builder.build() + assert resConf.isOk(), $resConf.error + let conf = resConf.get() + + ## Then + let resValidate = conf.validate() + assert resValidate.isOk(), $resValidate.error + check conf.clusterId == clusterConf.clusterId + check conf.numShardsInNetwork == clusterConf.numShardsInNetwork + check conf.shards == expectedShards + check conf.maxMessageSizeBytes == + uint64(parseCorrectMsgSize(clusterConf.maxMessageSize)) + check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes + + if clusterConf.rlnRelay: + assert conf.rlnRelayConf.isSome(), "RLN Relay conf is disabled" + + let rlnRelayConf = conf.rlnRelayConf.get() + check rlnRelayConf.ethContractAddress.string == + clusterConf.rlnRelayEthContractAddress + check rlnRelayConf.dynamic == clusterConf.rlnRelayDynamic + check rlnRelayConf.chainId == clusterConf.rlnRelayChainId + check rlnRelayConf.epochSizeSec == clusterConf.rlnEpochSizeSec + check rlnRelayConf.userMessageLimit == clusterConf.rlnRelayUserMessageLimit + + test "Cluster Conf is passed, but relay is disabled": + ## Setup + let clusterConf = ClusterConf.TheWakuNetworkConf() + var builder = WakuConfBuilder.init() + builder.withRelayServiceRatio("50:50") + builder.discv5Conf.withUdpPort(9000) + # Mount all shards in network + let expectedShards = toSeq[0.uint16 .. 7.uint16] + + ## Given + builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/") + builder.withClusterConf(clusterConf) + builder.withRelay(false) + + ## When + let resConf = builder.build() + assert resConf.isOk(), $resConf.error + let conf = resConf.get() + + ## Then + let resValidate = conf.validate() + assert resValidate.isOk(), $resValidate.error + check conf.clusterId == clusterConf.clusterId + check conf.numShardsInNetwork == clusterConf.numShardsInNetwork + check conf.shards == expectedShards + check conf.maxMessageSizeBytes == + uint64(parseCorrectMsgSize(clusterConf.maxMessageSize)) + check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes + + assert conf.rlnRelayConf.isNone + + test "Cluster Conf is passed, but rln relay is disabled": + ## Setup + let clusterConf = ClusterConf.TheWakuNetworkConf() + var builder = WakuConfBuilder.init() + + let # Mount all shards in network + expectedShards = toSeq[0.uint16 .. 7.uint16] + + ## Given + builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/") + builder.withClusterConf(clusterConf) + builder.rlnRelayConf.withEnabled(false) + + ## When + let resConf = builder.build() + assert resConf.isOk(), $resConf.error + let conf = resConf.get() + + ## Then + let resValidate = conf.validate() + assert resValidate.isOk(), $resValidate.error + check conf.clusterId == clusterConf.clusterId + check conf.numShardsInNetwork == clusterConf.numShardsInNetwork + check conf.shards == expectedShards + check conf.maxMessageSizeBytes == + uint64(parseCorrectMsgSize(clusterConf.maxMessageSize)) + check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes + assert conf.rlnRelayConf.isNone + + test "Cluster Conf is passed and valid shards are specified": + ## Setup + let clusterConf = ClusterConf.TheWakuNetworkConf() + var builder = WakuConfBuilder.init() + let shards = @[2.uint16, 3.uint16] + + ## Given + builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/") + builder.withClusterConf(clusterConf) + builder.withShards(shards) + + ## When + let resConf = builder.build() + assert resConf.isOk(), $resConf.error + let conf = resConf.get() + + ## Then + let resValidate = conf.validate() + assert resValidate.isOk(), $resValidate.error + check conf.clusterId == clusterConf.clusterId + check conf.numShardsInNetwork == clusterConf.numShardsInNetwork + check conf.shards == shards + check conf.maxMessageSizeBytes == + uint64(parseCorrectMsgSize(clusterConf.maxMessageSize)) + check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes + + test "Cluster Conf is passed and invalid shards are specified": + ## Setup + let clusterConf = ClusterConf.TheWakuNetworkConf() + var builder = WakuConfBuilder.init() + let shards = @[2.uint16, 10.uint16] + + ## Given + builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/") + builder.withClusterConf(clusterConf) + builder.withShards(shards) + + ## When + let resConf = builder.build() + + ## Then + assert resConf.isErr(), "Invalid shard was accepted" + + test "Cluster Conf is passed and RLN contract is overridden": + ## Setup + let clusterConf = ClusterConf.TheWakuNetworkConf() + var builder = WakuConfBuilder.init() + builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/") + + # Mount all shards in network + let expectedShards = toSeq[0.uint16 .. 7.uint16] + let contractAddress = "0x0123456789ABCDEF" + + ## Given + builder.rlnRelayConf.withEthContractAddress(contractAddress) + builder.withClusterConf(clusterConf) + builder.withRelay(true) + builder.rlnRelayConf.withTreePath("/tmp/test") + + ## When + let resConf = builder.build() + assert resConf.isOk(), $resConf.error + let conf = resConf.get() + + ## Then + let resValidate = conf.validate() + assert resValidate.isOk(), $resValidate.error + check conf.clusterId == clusterConf.clusterId + check conf.numShardsInNetwork == clusterConf.numShardsInNetwork + check conf.shards == expectedShards + check conf.maxMessageSizeBytes == + uint64(parseCorrectMsgSize(clusterConf.maxMessageSize)) + check conf.discv5Conf.isSome == clusterConf.discv5Discovery + check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes + + if clusterConf.rlnRelay: + assert conf.rlnRelayConf.isSome + + let rlnRelayConf = conf.rlnRelayConf.get() + check rlnRelayConf.ethContractAddress.string == contractAddress + check rlnRelayConf.dynamic == clusterConf.rlnRelayDynamic + check rlnRelayConf.chainId == clusterConf.rlnRelayChainId + check rlnRelayConf.epochSizeSec == clusterConf.rlnEpochSizeSec + check rlnRelayConf.userMessageLimit == clusterConf.rlnRelayUserMessageLimit + +suite "Waku Conf - node key": + test "Node key is generated": + ## Setup + var builder = WakuConfBuilder.init() + builder.withClusterId(1) + + ## Given + + ## When + let resConf = builder.build() + assert resConf.isOk(), $resConf.error + let conf = resConf.get() + + ## Then + let resValidate = conf.validate() + assert resValidate.isOk(), $resValidate.error + let pubkey = getPublicKey(conf.nodeKey) + assert pubkey.isOk() + + test "Passed node key is used": + ## Setup + let nodeKeyStr = + "0011223344556677889900aabbccddeeff0011223344556677889900aabbccddeeff" + let nodeKey = block: + let key = SkPrivateKey.init(utils.fromHex(nodeKeyStr)).tryGet() + crypto.PrivateKey(scheme: Secp256k1, skkey: key) + var builder = WakuConfBuilder.init() + builder.withClusterId(1) + + ## Given + builder.withNodeKey(nodeKey) + + ## When + let resConf = builder.build() + assert resConf.isOk(), $resConf.error + let conf = resConf.get() + + ## Then + let resValidate = conf.validate() + assert resValidate.isOk(), $resValidate.error + assert utils.toHex(conf.nodeKey.getRawBytes().get()) == + utils.toHex(nodeKey.getRawBytes().get()), + "Passed node key isn't in config:" & $nodeKey & $conf.nodeKey + +suite "Waku Conf - extMultiaddrs": + test "Valid multiaddresses are passed and accepted": + ## Setup + var builder = WakuConfBuilder.init() + builder.withClusterId(1) + + ## Given + let multiaddrs = + @["/ip4/127.0.0.1/udp/9090/quic", "/ip6/::1/tcp/3217", "/dns4/foo.com/tcp/80"] + builder.withExtMultiAddrs(multiaddrs) + + ## When + let resConf = builder.build() + assert resConf.isOk(), $resConf.error + let conf = resConf.get() + + ## Then + let resValidate = conf.validate() + assert resValidate.isOk(), $resValidate.error + check multiaddrs.len == conf.networkConf.extMultiAddrs.len + let resMultiaddrs = conf.networkConf.extMultiAddrs.map( + proc(m: MultiAddress): string = + $m + ) + for m in multiaddrs: + check m in resMultiaddrs diff --git a/tests/node/test_wakunode_legacy_lightpush.nim b/tests/node/test_wakunode_legacy_lightpush.nim index e19d29c64..dfc306de8 100644 --- a/tests/node/test_wakunode_legacy_lightpush.nim +++ b/tests/node/test_wakunode_legacy_lightpush.nim @@ -134,11 +134,11 @@ suite "RLN Proofs as a Lightpush Service": # mount rln-relay let wakuRlnConfig = WakuRlnConfig( - rlnRelayDynamic: false, - rlnRelayCredIndex: some(1.uint), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode"), + dynamic: false, + credIndex: some(1.uint), + userMessageLimit: 1, + epochSizeSec: 1, + treePath: genTempPath("rln_tree", "wakunode"), ) await allFutures(server.start(), client.start()) diff --git a/tests/node/test_wakunode_lightpush.nim b/tests/node/test_wakunode_lightpush.nim index 72e9b8bf3..8d48c8cb7 100644 --- a/tests/node/test_wakunode_lightpush.nim +++ b/tests/node/test_wakunode_lightpush.nim @@ -128,11 +128,11 @@ suite "RLN Proofs as a Lightpush Service": # mount rln-relay let wakuRlnConfig = WakuRlnConfig( - rlnRelayDynamic: false, - rlnRelayCredIndex: some(1.uint), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode"), + dynamic: false, + credIndex: some(1.uint), + userMessageLimit: 1, + epochSizeSec: 1, + treePath: genTempPath("rln_tree", "wakunode"), ) await allFutures(server.start(), client.start()) diff --git a/tests/node/test_wakunode_relay_rln.nim b/tests/node/test_wakunode_relay_rln.nim index 45dc6ce37..afc282d50 100644 --- a/tests/node/test_wakunode_relay_rln.nim +++ b/tests/node/test_wakunode_relay_rln.nim @@ -83,16 +83,15 @@ proc getWakuRlnConfigOnChain*( ethClientAddress: Option[string] = none(string), ): WakuRlnConfig = return WakuRlnConfig( - rlnRelayDynamic: true, - rlnRelayCredIndex: some(credIndex), - rlnRelayEthContractAddress: rlnRelayEthContractAddress, - rlnRelayEthClientAddress: ethClientAddress.get(EthClient), - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_" & $credIndex), - rlnEpochSizeSec: 1, + dynamic: true, + credIndex: some(credIndex), + ethContractAddress: rlnRelayEthContractAddress, + ethClientAddress: ethClientAddress.get(EthClient), + treePath: genTempPath("rln_tree", "wakunode_" & $credIndex), + epochSizeSec: 1, onFatalErrorAction: fatalErrorHandler.get(fatalErrorVoidHandler), # If these are used, initialisation fails with "failed to mount WakuRlnRelay: could not initialize the group manager: the commitment does not have a membership" - rlnRelayCredPath: keystorePath, - rlnRelayCredPassword: password, + creds: some(RlnRelayCreds(path: keystorePath, password: password)), ) proc setupRelayWithOnChainRln*( @@ -227,13 +226,13 @@ suite "Waku RlnRelay - End to End - Static": let contractAddress = await uploadRLNContract(EthClient) let wakuRlnConfig = WakuRlnConfig( - rlnRelayDynamic: true, - rlnRelayCredIndex: some(0.uint), - rlnRelayUserMessageLimit: 111, - rlnRelayTreepath: genTempPath("rln_tree", "wakunode_0"), - rlnRelayEthClientAddress: EthClient, - rlnRelayEthContractAddress: $contractAddress, - rlnRelayChainId: 1337, + dynamic: true, + credIndex: some(0.uint), + userMessageLimit: 111, + treepath: genTempPath("rln_tree", "wakunode_0"), + ethClientAddress: EthClient, + ethContractAddress: $contractAddress, + chainId: 1337, onFatalErrorAction: proc(errStr: string) = raiseAssert errStr , diff --git a/tests/test_waku_netconfig.nim b/tests/test_waku_netconfig.nim index 4d5a2df7d..d2c9cc780 100644 --- a/tests/test_waku_netconfig.nim +++ b/tests/test_waku_netconfig.nim @@ -4,7 +4,7 @@ import chronos, confutils/toml/std/net, libp2p/multiaddress, testutils/unittests import ./testlib/wakunode, waku/waku_enr/capabilities -include waku/node/config +include waku/node/net_config proc defaultTestWakuFlags(): CapabilitiesBitfield = CapabilitiesBitfield.init( @@ -13,19 +13,27 @@ proc defaultTestWakuFlags(): CapabilitiesBitfield = suite "Waku NetConfig": asyncTest "Create NetConfig with default values": - let conf = defaultTestWakuNodeConf() + let conf = defaultTestWakuConf() let wakuFlags = defaultTestWakuFlags() let netConfigRes = NetConfig.init( - bindIp = conf.listenAddress, - bindPort = conf.tcpPort, + bindIp = conf.networkConf.p2pListenAddress, + bindPort = conf.networkConf.p2pTcpPort, extIp = none(IpAddress), extPort = none(Port), extMultiAddrs = @[], - wsBindPort = conf.websocketPort, - wsEnabled = conf.websocketSupport, - wssEnabled = conf.websocketSecureSupport, + wsBindPort = + if conf.webSocketConf.isSome(): + some(conf.webSocketConf.get().port) + else: + none(Port), + wsEnabled = conf.webSocketConf.isSome(), + wssEnabled = + if conf.webSocketConf.isSome(): + conf.webSocketConf.get().secureConf.isSome() + else: + false, dns4DomainName = none(string), discv5UdpPort = none(Port), wakuFlags = some(wakuFlags), @@ -35,10 +43,11 @@ suite "Waku NetConfig": netConfigRes.isOk() asyncTest "AnnouncedAddresses contains only bind address when no external addresses are provided": - let conf = defaultTestWakuNodeConf() + let conf = defaultTestWakuConf() - let netConfigRes = - NetConfig.init(bindIp = conf.listenAddress, bindPort = conf.tcpPort) + let netConfigRes = NetConfig.init( + bindIp = conf.networkConf.p2pListenAddress, bindPort = conf.networkConf.p2pTcpPort + ) assert netConfigRes.isOk(), $netConfigRes.error @@ -47,17 +56,19 @@ suite "Waku NetConfig": check: netConfig.announcedAddresses.len == 1 # Only bind address should be present netConfig.announcedAddresses[0] == - formatListenAddress(ip4TcpEndPoint(conf.listenAddress, conf.tcpPort)) + formatListenAddress( + ip4TcpEndPoint(conf.networkConf.p2pListenAddress, conf.networkConf.p2pTcpPort) + ) asyncTest "AnnouncedAddresses contains external address if extIp/Port are provided": let - conf = defaultTestWakuNodeConf() + conf = defaultTestWakuConf() extIp = parseIpAddress("1.2.3.4") extPort = Port(1234) let netConfigRes = NetConfig.init( - bindIp = conf.listenAddress, - bindPort = conf.tcpPort, + bindIp = conf.networkConf.p2pListenAddress, + bindPort = conf.networkConf.p2pTcpPort, extIp = some(extIp), extPort = some(extPort), ) @@ -72,13 +83,13 @@ suite "Waku NetConfig": asyncTest "AnnouncedAddresses contains dns4DomainName if provided": let - conf = defaultTestWakuNodeConf() + conf = defaultTestWakuConf() dns4DomainName = "example.com" extPort = Port(1234) let netConfigRes = NetConfig.init( - bindIp = conf.listenAddress, - bindPort = conf.tcpPort, + bindIp = conf.networkConf.p2pListenAddress, + bindPort = conf.networkConf.p2pTcpPort, dns4DomainName = some(dns4DomainName), extPort = some(extPort), ) @@ -93,14 +104,14 @@ suite "Waku NetConfig": asyncTest "AnnouncedAddresses includes extMultiAddrs when provided": let - conf = defaultTestWakuNodeConf() + conf = defaultTestWakuConf() extIp = parseIpAddress("1.2.3.4") extPort = Port(1234) extMultiAddrs = @[ip4TcpEndPoint(extIp, extPort)] let netConfigRes = NetConfig.init( - bindIp = conf.listenAddress, - bindPort = conf.tcpPort, + bindIp = conf.networkConf.p2pListenAddress, + bindPort = conf.networkConf.p2pTcpPort, extMultiAddrs = extMultiAddrs, ) @@ -114,14 +125,14 @@ suite "Waku NetConfig": asyncTest "AnnouncedAddresses uses dns4DomainName over extIp when both are provided": let - conf = defaultTestWakuNodeConf() + conf = defaultTestWakuConf() dns4DomainName = "example.com" extIp = parseIpAddress("1.2.3.4") extPort = Port(1234) let netConfigRes = NetConfig.init( - bindIp = conf.listenAddress, - bindPort = conf.tcpPort, + bindIp = conf.networkConf.p2pListenAddress, + bindPort = conf.networkConf.p2pTcpPort, dns4DomainName = some(dns4DomainName), extIp = some(extIp), extPort = some(extPort), @@ -137,12 +148,12 @@ suite "Waku NetConfig": asyncTest "AnnouncedAddresses includes WebSocket addresses when enabled": var - conf = defaultTestWakuNodeConf() + conf = defaultTestWakuConf() wssEnabled = false var netConfigRes = NetConfig.init( - bindIp = conf.listenAddress, - bindPort = conf.tcpPort, + bindIp = conf.networkConf.p2pListenAddress, + bindPort = conf.networkConf.p2pTcpPort, wsEnabled = true, wssEnabled = wssEnabled, ) @@ -153,16 +164,18 @@ suite "Waku NetConfig": check: netConfig.announcedAddresses.len == 2 # Bind address + wsHostAddress - netConfig.announcedAddresses[1] == - (ip4TcpEndPoint(conf.listenAddress, conf.websocketPort) & wsFlag(wssEnabled)) + netConfig.announcedAddresses[1] == ( + ip4TcpEndPoint(conf.networkConf.p2pListenAddress, conf.webSocketConf.get().port) & + wsFlag(wssEnabled) + ) ## Now try the same for the case of wssEnabled = true wssEnabled = true netConfigRes = NetConfig.init( - bindIp = conf.listenAddress, - bindPort = conf.tcpPort, + bindIp = conf.networkConf.p2pListenAddress, + bindPort = conf.networkConf.p2pTcpPort, wsEnabled = true, wssEnabled = wssEnabled, ) @@ -173,19 +186,21 @@ suite "Waku NetConfig": check: netConfig.announcedAddresses.len == 2 # Bind address + wsHostAddress - netConfig.announcedAddresses[1] == - (ip4TcpEndPoint(conf.listenAddress, conf.websocketPort) & wsFlag(wssEnabled)) + netConfig.announcedAddresses[1] == ( + ip4TcpEndPoint(conf.networkConf.p2pListenAddress, conf.websocketConf.get().port) & + wsFlag(wssEnabled) + ) asyncTest "Announced WebSocket address contains external IP if provided": let - conf = defaultTestWakuNodeConf() + conf = defaultTestWakuConf() extIp = parseIpAddress("1.2.3.4") extPort = Port(1234) wssEnabled = false let netConfigRes = NetConfig.init( - bindIp = conf.listenAddress, - bindPort = conf.tcpPort, + bindIp = conf.networkConf.p2pListenAddress, + bindPort = conf.networkConf.p2pTcpPort, extIp = some(extIp), extPort = some(extPort), wsEnabled = true, @@ -199,18 +214,18 @@ suite "Waku NetConfig": check: netConfig.announcedAddresses.len == 2 # External address + wsHostAddress netConfig.announcedAddresses[1] == - (ip4TcpEndPoint(extIp, conf.websocketPort) & wsFlag(wssEnabled)) + (ip4TcpEndPoint(extIp, conf.websocketConf.get().port) & wsFlag(wssEnabled)) asyncTest "Announced WebSocket address contains dns4DomainName if provided": let - conf = defaultTestWakuNodeConf() + conf = defaultTestWakuConf() dns4DomainName = "example.com" extPort = Port(1234) wssEnabled = false let netConfigRes = NetConfig.init( - bindIp = conf.listenAddress, - bindPort = conf.tcpPort, + bindIp = conf.networkConf.p2pListenAddress, + bindPort = conf.networkConf.p2pTcpPort, dns4DomainName = some(dns4DomainName), extPort = some(extPort), wsEnabled = true, @@ -223,20 +238,22 @@ suite "Waku NetConfig": check: netConfig.announcedAddresses.len == 2 # Bind address + wsHostAddress - netConfig.announcedAddresses[1] == - (dns4TcpEndPoint(dns4DomainName, conf.websocketPort) & wsFlag(wssEnabled)) + netConfig.announcedAddresses[1] == ( + dns4TcpEndPoint(dns4DomainName, conf.webSocketConf.get().port) & + wsFlag(wssEnabled) + ) asyncTest "Announced WebSocket address contains dns4DomainName if provided alongside extIp": let - conf = defaultTestWakuNodeConf() + conf = defaultTestWakuConf() dns4DomainName = "example.com" extIp = parseIpAddress("1.2.3.4") extPort = Port(1234) wssEnabled = false let netConfigRes = NetConfig.init( - bindIp = conf.listenAddress, - bindPort = conf.tcpPort, + bindIp = conf.networkConf.p2pListenAddress, + bindPort = conf.networkConf.p2pTcpPort, dns4DomainName = some(dns4DomainName), extIp = some(extIp), extPort = some(extPort), @@ -251,32 +268,35 @@ suite "Waku NetConfig": check: netConfig.announcedAddresses.len == 2 # DNS address + wsHostAddress netConfig.announcedAddresses[0] == dns4TcpEndPoint(dns4DomainName, extPort) - netConfig.announcedAddresses[1] == - (dns4TcpEndPoint(dns4DomainName, conf.websocketPort) & wsFlag(wssEnabled)) + netConfig.announcedAddresses[1] == ( + dns4TcpEndPoint(dns4DomainName, conf.webSocketConf.get().port) & + wsFlag(wssEnabled) + ) asyncTest "ENR is set with bindIp/Port if no extIp/Port are provided": - let conf = defaultTestWakuNodeConf() + let conf = defaultTestWakuConf() - let netConfigRes = - NetConfig.init(bindIp = conf.listenAddress, bindPort = conf.tcpPort) + let netConfigRes = NetConfig.init( + bindIp = conf.networkConf.p2pListenAddress, bindPort = conf.networkConf.p2pTcpPort + ) assert netConfigRes.isOk(), $netConfigRes.error let netConfig = netConfigRes.get() check: - netConfig.enrIp.get() == conf.listenAddress - netConfig.enrPort.get() == conf.tcpPort + netConfig.enrIp.get() == conf.networkConf.p2pListenAddress + netConfig.enrPort.get() == conf.networkConf.p2pTcpPort asyncTest "ENR is set with extIp/Port if provided": let - conf = defaultTestWakuNodeConf() + conf = defaultTestWakuConf() extIp = parseIpAddress("1.2.3.4") extPort = Port(1234) let netConfigRes = NetConfig.init( - bindIp = conf.listenAddress, - bindPort = conf.tcpPort, + bindIp = conf.networkConf.p2pListenAddress, + bindPort = conf.networkConf.p2pTcpPort, extIp = some(extIp), extPort = some(extPort), ) @@ -291,13 +311,13 @@ suite "Waku NetConfig": asyncTest "ENR is set with dns4DomainName if provided": let - conf = defaultTestWakuNodeConf() + conf = defaultTestWakuConf() dns4DomainName = "example.com" extPort = Port(1234) let netConfigRes = NetConfig.init( - bindIp = conf.listenAddress, - bindPort = conf.tcpPort, + bindIp = conf.networkConf.p2pListenAddress, + bindPort = conf.networkConf.p2pTcpPort, dns4DomainName = some(dns4DomainName), extPort = some(extPort), ) @@ -311,7 +331,7 @@ suite "Waku NetConfig": asyncTest "wsHostAddress is not announced if a WS/WSS address is provided in extMultiAddrs": var - conf = defaultTestWakuNodeConf() + conf = defaultTestWakuConf() extAddIp = parseIpAddress("1.2.3.4") extAddPort = Port(1234) wsEnabled = true @@ -319,8 +339,8 @@ suite "Waku NetConfig": extMultiAddrs = @[(ip4TcpEndPoint(extAddIp, extAddPort) & wsFlag(wssEnabled))] var netConfigRes = NetConfig.init( - bindIp = conf.listenAddress, - bindPort = conf.tcpPort, + bindIp = conf.networkConf.p2pListenAddress, + bindPort = conf.networkConf.p2pTcpPort, extMultiAddrs = extMultiAddrs, wsEnabled = wsEnabled, ) @@ -338,8 +358,8 @@ suite "Waku NetConfig": extMultiAddrs = @[(ip4TcpEndPoint(extAddIp, extAddPort) & wsFlag(wssEnabled))] netConfigRes = NetConfig.init( - bindIp = conf.listenAddress, - bindPort = conf.tcpPort, + bindIp = conf.networkConf.p2pListenAddress, + bindPort = conf.networkConf.p2pTcpPort, extMultiAddrs = extMultiAddrs, wssEnabled = wssEnabled, ) @@ -354,14 +374,14 @@ suite "Waku NetConfig": asyncTest "Only extMultiAddrs are published when enabling extMultiAddrsOnly flag": let - conf = defaultTestWakuNodeConf() + conf = defaultTestWakuConf() extAddIp = parseIpAddress("1.2.3.4") extAddPort = Port(1234) extMultiAddrs = @[ip4TcpEndPoint(extAddIp, extAddPort)] let netConfigRes = NetConfig.init( - bindIp = conf.listenAddress, - bindPort = conf.tcpPort, + bindIp = conf.networkConf.p2pListenAddress, + bindPort = conf.networkConf.p2pTcpPort, extMultiAddrs = extMultiAddrs, extMultiAddrsOnly = true, ) diff --git a/tests/testlib/wakucore.nim b/tests/testlib/wakucore.nim index c68a69deb..c5e16d03a 100644 --- a/tests/testlib/wakucore.nim +++ b/tests/testlib/wakucore.nim @@ -2,7 +2,6 @@ import std/[options, times], results, stew/byteutils, - stew/shims/net, chronos, libp2p/switch, libp2p/builders, diff --git a/tests/testlib/wakunode.nim b/tests/testlib/wakunode.nim index d1df39b6b..87fdbcf5f 100644 --- a/tests/testlib/wakunode.nim +++ b/tests/testlib/wakunode.nim @@ -15,35 +15,41 @@ import node/peer_manager, waku_enr, discovery/waku_discv5, - factory/external_config, factory/internal_config, + factory/waku_conf, + factory/conf_builder/conf_builder, factory/builder, ], ./common # Waku node -proc defaultTestWakuNodeConf*(): WakuNodeConf = - ## set cluster-id == 0 to not use TWN as that needs a background blockchain (e.g. anvil) - ## running because RLN is mounted if TWN (cluster-id == 1) is configured. - WakuNodeConf( - cmd: noCommand, - tcpPort: Port(60000), - websocketPort: Port(8000), - listenAddress: parseIpAddress("0.0.0.0"), - restAddress: parseIpAddress("127.0.0.1"), - metricsServerAddress: parseIpAddress("127.0.0.1"), - dnsAddrsNameServers: @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")], - nat: "any", - maxConnections: 50, - relayServiceRatio: "60:40", - maxMessageSize: "1024 KiB", - clusterId: DefaultClusterId, - shards: @[DefaultShardId], - relay: true, - rendezvous: true, - storeMessageDbUrl: "sqlite://store.sqlite3", +# TODO: migrate to usage of a test cluster conf +proc defaultTestWakuConfBuilder*(): WakuConfBuilder = + var builder = WakuConfBuilder.init() + builder.withP2pTcpPort(Port(60000)) + builder.withP2pListenAddress(parseIpAddress("0.0.0.0")) + builder.restServerConf.withListenAddress(parseIpAddress("127.0.0.1")) + builder.withDnsAddrsNameServers( + @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")] ) + builder.withNatStrategy("any") + builder.withMaxConnections(50) + builder.withRelayServiceRatio("60:40") + builder.withMaxMessageSize("1024 KiB") + builder.withClusterId(DefaultClusterId) + builder.withShards(@[DefaultShardId]) + builder.withRelay(true) + builder.withRendezvous(true) + builder.storeServiceConf.withDbMigration(false) + builder.storeServiceConf.withSupportV2(false) + builder.webSocketConf.withWebSocketPort(Port(8000)) + builder.webSocketConf.withEnabled(true) + return builder + +proc defaultTestWakuConf*(): WakuConf = + var builder = defaultTestWakuConfBuilder() + return builder.build().value proc newTestWakuNode*( nodeKey: crypto.PrivateKey, @@ -78,31 +84,31 @@ proc newTestWakuNode*( else: extPort - var conf = defaultTestWakuNodeConf() + var conf = defaultTestWakuConf() conf.clusterId = clusterId conf.shards = shards if dns4DomainName.isSome() and extIp.isNone(): # If there's an error resolving the IP, an exception is thrown and test fails - let dns = (waitFor dnsResolve(dns4DomainName.get(), conf)).valueOr: + let dns = (waitFor dnsResolve(dns4DomainName.get(), conf.dnsAddrsNameServers)).valueOr: raise newException(Defect, error) resolvedExtIp = some(parseIpAddress(dns)) let netConf = NetConfig.init( - bindIp = bindIp, clusterId = conf.clusterId, + bindIp = bindIp, bindPort = bindPort, extIp = resolvedExtIp, extPort = extPort, extMultiAddrs = extMultiAddrs, - wsBindPort = wsBindPort, + wsBindPort = some(wsBindPort), wsEnabled = wsEnabled, wssEnabled = wssEnabled, - wakuFlags = wakuFlags, dns4DomainName = dns4DomainName, discv5UdpPort = discv5UdpPort, + wakuFlags = wakuFlags, ).valueOr: raise newException(Defect, "Invalid network configuration: " & error) diff --git a/tests/waku_discv5/test_waku_discv5.nim b/tests/waku_discv5/test_waku_discv5.nim index b2d59813a..c5dd1c55e 100644 --- a/tests/waku_discv5/test_waku_discv5.nim +++ b/tests/waku_discv5/test_waku_discv5.nim @@ -1,27 +1,33 @@ {.used.} import - std/[sequtils, algorithm], + std/[sequtils, algorithm, options, net], results, - stew/shims/net, chronos, chronicles, testutils/unittests, libp2p/crypto/crypto as libp2p_keys, eth/keys as eth_keys, + eth/p2p/discoveryv5/enr as ethEnr, libp2p/crypto/secp, libp2p/protocols/rendezvous import - waku/[waku_core/topics, waku_enr, discovery/waku_discv5, waku_enr/capabilities], + waku/[ + waku_core/topics, + waku_core/codecs, + waku_enr, + discovery/waku_discv5, + waku_enr/capabilities, + factory/conf_builder/conf_builder, + factory/waku, + node/waku_node, + node/peer_manager, + ], ../testlib/[wakucore, testasync, assertions, futures, wakunode, testutils], ../waku_enr/utils, ./utils as discv5_utils -import eth/p2p/discoveryv5/enr as ethEnr - -include waku/factory/waku - suite "Waku Discovery v5": const validEnr = "enr:-K64QGAvsATunmvMT5c3LFjKS0tG39zlQ1195Z2pWu6RoB5fWP3EXz9QPlRXN" & @@ -360,7 +366,7 @@ suite "Waku Discovery v5": # Cleanup await allFutures(node1.stop(), node2.stop(), node3.stop(), node4.stop()) - suite "addBoostrapNode": + suite "addBootstrapNode": asyncTest "address is valid": # Given an empty list of enrs var enrs: seq[Record] = @[] @@ -413,25 +419,32 @@ suite "Waku Discovery v5": suite "waku discv5 initialization": asyncTest "Start waku and check discv5 discovered peers": - let myRng = crypto.newRng() - var conf = defaultTestWakuNodeConf() + let myRng = libp2p_keys.newRng() + var confBuilder = defaultTestWakuConfBuilder() - conf.nodekey = some(crypto.PrivateKey.random(Secp256k1, myRng[])[]) - conf.discv5Discovery = true - conf.discv5UdpPort = Port(9000) + confBuilder.withNodeKey(libp2p_keys.PrivateKey.random(Secp256k1, myRng[])[]) + confBuilder.discv5Conf.withEnabled(true) + confBuilder.discv5Conf.withUdpPort(9000.Port) + + let conf = confBuilder.build().valueOr: + raiseAssert error let waku0 = Waku.new(conf).valueOr: raiseAssert error (waitFor startWaku(addr waku0)).isOkOr: raiseAssert error - conf.nodekey = some(crypto.PrivateKey.random(Secp256k1, myRng[])[]) - conf.discv5BootstrapNodes = @[waku0.node.enr.toURI()] - conf.discv5Discovery = true - conf.discv5UdpPort = Port(9001) - conf.tcpPort = Port(60001) + confBuilder.withNodeKey(crypto.PrivateKey.random(Secp256k1, myRng[])[]) + confBuilder.discv5Conf.withBootstrapNodes(@[waku0.node.enr.toURI()]) + confBuilder.discv5Conf.withEnabled(true) + confBuilder.discv5Conf.withUdpPort(9001.Port) + confBuilder.withP2pTcpPort(60001.Port) + confBuilder.websocketConf.withEnabled(false) - let waku1 = Waku.new(conf).valueOr: + let conf1 = confBuilder.build().valueOr: + raiseAssert error + + let waku1 = Waku.new(conf1).valueOr: raiseAssert error (waitFor startWaku(addr waku1)).isOkOr: raiseAssert error @@ -439,12 +452,14 @@ suite "Waku Discovery v5": await waku1.node.mountPeerExchange() await waku1.node.mountRendezvous() - var conf2 = conf - conf2.discv5BootstrapNodes = @[waku1.node.enr.toURI()] - conf2.discv5Discovery = true - conf2.tcpPort = Port(60003) - conf2.discv5UdpPort = Port(9003) - conf2.nodekey = some(crypto.PrivateKey.random(Secp256k1, myRng[])[]) + confBuilder.discv5Conf.withBootstrapNodes(@[waku1.node.enr.toURI()]) + confBuilder.withP2pTcpPort(60003.Port) + confBuilder.discv5Conf.withUdpPort(9003.Port) + confBuilder.withNodeKey(crypto.PrivateKey.random(Secp256k1, myRng[])[]) + confBuilder.websocketConf.withEnabled(false) + + let conf2 = confBuilder.build().valueOr: + raiseAssert error let waku2 = Waku.new(conf2).valueOr: raiseAssert error @@ -470,16 +485,26 @@ suite "Waku Discovery v5": assert r.isSome(), "could not retrieve peer mounting RendezVousCodec" asyncTest "Discv5 bootstrap nodes should be added to the peer store": - var conf = defaultTestWakuNodeConf() - - conf.discv5BootstrapNodes = @[validEnr] + var confBuilder = defaultTestWakuConfBuilder() + confBuilder.discv5Conf.withEnabled(true) + confBuilder.discv5Conf.withUdpPort(9003.Port) + confBuilder.discv5Conf.withBootstrapNodes(@[validEnr]) + let conf = confBuilder.build().valueOr: + raiseAssert error let waku = Waku.new(conf).valueOr: raiseAssert error discard setupDiscoveryV5( - waku.node.enr, waku.node.peerManager, waku.node.topicSubscriptionQueue, - waku.conf, waku.dynamicBootstrapNodes, waku.rng, waku.key, + waku.node.enr, + waku.node.peerManager, + waku.node.topicSubscriptionQueue, + waku.conf.discv5Conf.get(), + waku.dynamicBootstrapNodes, + waku.rng, + waku.conf.nodeKey, + waku.conf.networkConf.p2pListenAddress, + waku.conf.portsShift, ) check: @@ -488,18 +513,29 @@ suite "Waku Discovery v5": ) asyncTest "Invalid discv5 bootstrap node ENRs are ignored": - var conf = defaultTestWakuNodeConf() + var confBuilder = defaultTestWakuConfBuilder() + confBuilder.discv5Conf.withEnabled(true) + confBuilder.discv5Conf.withUdpPort(9004.Port) let invalidEnr = "invalid-enr" - conf.discv5BootstrapNodes = @[invalidEnr] + confBuilder.discv5Conf.withBootstrapNodes(@[invalidEnr]) + let conf = confBuilder.build().valueOr: + raiseAssert error let waku = Waku.new(conf).valueOr: raiseAssert error discard setupDiscoveryV5( - waku.node.enr, waku.node.peerManager, waku.node.topicSubscriptionQueue, - waku.conf, waku.dynamicBootstrapNodes, waku.rng, waku.key, + waku.node.enr, + waku.node.peerManager, + waku.node.topicSubscriptionQueue, + conf.discv5Conf.get(), + waku.dynamicBootstrapNodes, + waku.rng, + waku.conf.nodeKey, + waku.conf.networkConf.p2pListenAddress, + waku.conf.portsShift, ) check: diff --git a/tests/waku_relay/utils.nim b/tests/waku_relay/utils.nim index 821881f4c..6de28583e 100644 --- a/tests/waku_relay/utils.nim +++ b/tests/waku_relay/utils.nim @@ -44,10 +44,10 @@ proc newTestWakuRelay*(switch = newTestSwitch()): Future[WakuRelay] {.async.} = proc setupRln*(node: WakuNode, identifier: uint) {.async.} = await node.mountRlnRelay( WakuRlnConfig( - rlnRelayDynamic: false, - rlnRelayCredIndex: some(identifier), - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_" & $identifier), - rlnEpochSizeSec: 1, + dynamic: false, + credIndex: some(identifier), + treePath: genTempPath("rln_tree", "wakunode_" & $identifier), + epochSizeSec: 1, ) ) diff --git a/tests/waku_rln_relay/test_waku_rln_relay.nim b/tests/waku_rln_relay/test_waku_rln_relay.nim index 95ec7b4c7..907b7c1b3 100644 --- a/tests/waku_rln_relay/test_waku_rln_relay.nim +++ b/tests/waku_rln_relay/test_waku_rln_relay.nim @@ -690,11 +690,11 @@ suite "Waku rln relay": let index = MembershipIndex(5) let wakuRlnConfig = WakuRlnConfig( - rlnRelayDynamic: false, - rlnRelayCredIndex: some(index), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_2"), + dynamic: false, + credIndex: some(index), + userMessageLimit: 1, + epochSizeSec: 1, + treePath: genTempPath("rln_tree", "waku_rln_relay_2"), ) let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr: @@ -741,22 +741,22 @@ suite "Waku rln relay": let index2 = MembershipIndex(6) let rlnConf1 = WakuRlnConfig( - rlnRelayDynamic: false, - rlnRelayCredIndex: some(index1), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_3"), + dynamic: false, + credIndex: some(index1), + userMessageLimit: 1, + epochSizeSec: 1, + treePath: genTempPath("rln_tree", "waku_rln_relay_3"), ) let wakuRlnRelay1 = (await WakuRlnRelay.new(rlnConf1)).valueOr: raiseAssert "failed to create waku rln relay: " & $error let rlnConf2 = WakuRlnConfig( - rlnRelayDynamic: false, - rlnRelayCredIndex: some(index2), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_4"), + dynamic: false, + credIndex: some(index2), + userMessageLimit: 1, + epochSizeSec: 1, + treePath: genTempPath("rln_tree", "waku_rln_relay_4"), ) let wakuRlnRelay2 = (await WakuRlnRelay.new(rlnConf2)).valueOr: @@ -893,11 +893,11 @@ suite "Waku rln relay": proc runTestForEpochSizeSec(rlnEpochSizeSec: uint) {.async.} = let wakuRlnConfig = WakuRlnConfig( - rlnRelayDynamic: false, - rlnRelayCredIndex: some(index), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: rlnEpochSizeSec, - rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_4"), + dynamic: false, + credIndex: some(index), + userMessageLimit: 1, + epochSizeSec: rlnEpochSizeSec, + treePath: genTempPath("rln_tree", "waku_rln_relay_4"), ) let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr: diff --git a/tests/waku_rln_relay/test_wakunode_rln_relay.nim b/tests/waku_rln_relay/test_wakunode_rln_relay.nim index bd8edfcd3..a5237dab1 100644 --- a/tests/waku_rln_relay/test_wakunode_rln_relay.nim +++ b/tests/waku_rln_relay/test_wakunode_rln_relay.nim @@ -25,11 +25,11 @@ proc buildWakuRlnConfig( let treePath = genTempPath("rln_tree", treeFilename) # Off-chain return WakuRlnConfig( - rlnRelayDynamic: false, - rlnRelayCredIndex: some(credIndex.uint), - rlnRelayUserMessageLimit: userMessageLimit, - rlnEpochSizeSec: epochSizeSec, - rlnRelayTreePath: treePath, + dynamic: false, + credIndex: some(credIndex.uint), + userMessageLimit: userMessageLimit, + epochSizeSec: epochSizeSec, + treePath: treePath, ) proc waitForNullifierLog(node: WakuNode, expectedLen: int): Future[bool] {.async.} = @@ -63,11 +63,11 @@ procSuite "WakuNode - RLN relay": # mount rlnrelay in off-chain mode let wakuRlnConfig1 = WakuRlnConfig( - rlnRelayDynamic: false, - rlnRelayCredIndex: some(1.uint), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode"), + dynamic: false, + credIndex: some(1.uint), + userMessageLimit: 1, + epochSizeSec: 1, + treePath: genTempPath("rln_tree", "wakunode"), ) await node1.mountRlnRelay(wakuRlnConfig1) @@ -79,11 +79,11 @@ procSuite "WakuNode - RLN relay": assert false, "Failed to mount relay" # mount rlnrelay in off-chain mode let wakuRlnConfig2 = WakuRlnConfig( - rlnRelayDynamic: false, - rlnRelayCredIndex: some(2.uint), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_2"), + dynamic: false, + credIndex: some(2.uint), + userMessageLimit: 1, + epochSizeSec: 1, + treePath: genTempPath("rln_tree", "wakunode_2"), ) await node2.mountRlnRelay(wakuRlnConfig2) @@ -95,11 +95,11 @@ procSuite "WakuNode - RLN relay": assert false, "Failed to mount relay" let wakuRlnConfig3 = WakuRlnConfig( - rlnRelayDynamic: false, - rlnRelayCredIndex: some(3.uint), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_3"), + dynamic: false, + credIndex: some(3.uint), + userMessageLimit: 1, + epochSizeSec: 1, + treePath: genTempPath("rln_tree", "wakunode_3"), ) await node3.mountRlnRelay(wakuRlnConfig3) @@ -174,11 +174,11 @@ procSuite "WakuNode - RLN relay": # mount rlnrelay in off-chain mode for index, node in nodes: let wakuRlnConfig = WakuRlnConfig( - rlnRelayDynamic: false, - rlnRelayCredIndex: some(index.uint + 1), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_" & $(index + 1)), + dynamic: false, + credIndex: some(index.uint + 1), + userMessageLimit: 1, + epochSizeSec: 1, + treePath: genTempPath("rln_tree", "wakunode_" & $(index + 1)), ) await node.mountRlnRelay(wakuRlnConfig) @@ -278,11 +278,11 @@ procSuite "WakuNode - RLN relay": # mount rlnrelay in off-chain mode let wakuRlnConfig1 = WakuRlnConfig( - rlnRelayDynamic: false, - rlnRelayCredIndex: some(1.uint), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_4"), + dynamic: false, + credIndex: some(1.uint), + userMessageLimit: 1, + epochSizeSec: 1, + treePath: genTempPath("rln_tree", "wakunode_4"), ) await node1.mountRlnRelay(wakuRlnConfig1) @@ -294,11 +294,11 @@ procSuite "WakuNode - RLN relay": assert false, "Failed to mount relay" # mount rlnrelay in off-chain mode let wakuRlnConfig2 = WakuRlnConfig( - rlnRelayDynamic: false, - rlnRelayCredIndex: some(2.uint), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_5"), + dynamic: false, + credIndex: some(2.uint), + userMessageLimit: 1, + epochSizeSec: 1, + treePath: genTempPath("rln_tree", "wakunode_5"), ) await node2.mountRlnRelay(wakuRlnConfig2) @@ -310,11 +310,11 @@ procSuite "WakuNode - RLN relay": assert false, "Failed to mount relay" let wakuRlnConfig3 = WakuRlnConfig( - rlnRelayDynamic: false, - rlnRelayCredIndex: some(3.uint), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_6"), + dynamic: false, + credIndex: some(3.uint), + userMessageLimit: 1, + epochSizeSec: 1, + treePath: genTempPath("rln_tree", "wakunode_6"), ) await node3.mountRlnRelay(wakuRlnConfig3) @@ -403,11 +403,11 @@ procSuite "WakuNode - RLN relay": # mount rlnrelay in off-chain mode let wakuRlnConfig1 = WakuRlnConfig( - rlnRelayDynamic: false, - rlnRelayCredIndex: some(1.uint), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_7"), + dynamic: false, + credIndex: some(1.uint), + userMessageLimit: 1, + epochSizeSec: 1, + treePath: genTempPath("rln_tree", "wakunode_7"), ) await node1.mountRlnRelay(wakuRlnConfig1) @@ -420,11 +420,11 @@ procSuite "WakuNode - RLN relay": # mount rlnrelay in off-chain mode let wakuRlnConfig2 = WakuRlnConfig( - rlnRelayDynamic: false, - rlnRelayCredIndex: some(2.uint), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_8"), + dynamic: false, + credIndex: some(2.uint), + userMessageLimit: 1, + epochSizeSec: 1, + treePath: genTempPath("rln_tree", "wakunode_8"), ) await node2.mountRlnRelay(wakuRlnConfig2) @@ -436,11 +436,11 @@ procSuite "WakuNode - RLN relay": # mount rlnrelay in off-chain mode let wakuRlnConfig3 = WakuRlnConfig( - rlnRelayDynamic: false, - rlnRelayCredIndex: some(3.uint), - rlnRelayUserMessageLimit: 1, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_9"), + dynamic: false, + credIndex: some(3.uint), + userMessageLimit: 1, + epochSizeSec: 1, + treePath: genTempPath("rln_tree", "wakunode_9"), ) await node3.mountRlnRelay(wakuRlnConfig3) diff --git a/tests/waku_rln_relay/utils_static.nim b/tests/waku_rln_relay/utils_static.nim index 719ce465c..8f564beb1 100644 --- a/tests/waku_rln_relay/utils_static.nim +++ b/tests/waku_rln_relay/utils_static.nim @@ -25,10 +25,10 @@ proc setupStaticRln*( ) {.async.} = await node.mountRlnRelay( WakuRlnConfig( - rlnRelayDynamic: false, - rlnRelayCredIndex: some(identifier), - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_" & $identifier), - rlnEpochSizeSec: 1, + dynamic: false, + credIndex: some(identifier), + treePath: genTempPath("rln_tree", "wakunode_" & $identifier), + epochSizeSec: 1, ) ) diff --git a/tests/wakunode2/test_app.nim b/tests/wakunode2/test_app.nim index 73ffc8f93..2ee933e3f 100644 --- a/tests/wakunode2/test_app.nim +++ b/tests/wakunode2/test_app.nim @@ -9,15 +9,14 @@ import libp2p/crypto/secp, libp2p/multiaddress, libp2p/switch -import - ../testlib/common, ../testlib/wakucore, ../testlib/wakunode, waku/node/waku_metrics +import ../testlib/wakucore, ../testlib/wakunode -include waku/factory/waku +include waku/factory/waku, waku/common/enr/typed_record suite "Wakunode2 - Waku": test "compilation version should be reported": ## Given - var conf = defaultTestWakuNodeConf() + let conf = defaultTestWakuConf() let waku = Waku.new(conf).valueOr: raiseAssert error @@ -32,7 +31,7 @@ suite "Wakunode2 - Waku": suite "Wakunode2 - Waku initialization": test "peer persistence setup should be successfully mounted": ## Given - var conf = defaultTestWakuNodeConf() + var conf = defaultTestWakuConf() conf.peerPersistence = true let waku = Waku.new(conf).valueOr: @@ -43,7 +42,7 @@ suite "Wakunode2 - Waku initialization": test "node setup is successful with default configuration": ## Given - var conf = defaultTestWakuNodeConf() + var conf = defaultTestWakuConf() ## When var waku = Waku.new(conf).valueOr: @@ -52,9 +51,6 @@ suite "Wakunode2 - Waku initialization": (waitFor startWaku(addr waku)).isOkOr: raiseAssert error - waku.metricsServer = waku_metrics.startMetricsServerAndLogging(conf).valueOr: - raiseAssert error - ## Then let node = waku.node check: @@ -69,8 +65,8 @@ suite "Wakunode2 - Waku initialization": test "app properly handles dynamic port configuration": ## Given - var conf = defaultTestWakuNodeConf() - conf.tcpPort = Port(0) + var conf = defaultTestWakuConf() + conf.networkConf.p2pTcpPort = Port(0) ## When var waku = Waku.new(conf).valueOr: @@ -82,9 +78,12 @@ suite "Wakunode2 - Waku initialization": ## Then let node = waku.node - typedNodeEnr = node.enr.toTypedRecord() + typedNodeEnr = node.enr.toTyped() assert typedNodeEnr.isOk(), $typedNodeEnr.error + let tcpPort = typedNodeEnr.value.tcp() + assert tcpPort.isSome() + check tcpPort.get() != 0 check: # Waku started properly diff --git a/tests/wakunode_rest/test_rest_health.nim b/tests/wakunode_rest/test_rest_health.nim index ac2fd9eac..3c7c94e87 100644 --- a/tests/wakunode_rest/test_rest_health.nim +++ b/tests/wakunode_rest/test_rest_health.nim @@ -69,10 +69,10 @@ suite "Waku v2 REST API - health": # now kick in rln (currently the only check for health) await node.mountRlnRelay( WakuRlnConfig( - rlnRelayDynamic: false, - rlnRelayCredIndex: some(1.uint), - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode"), + dynamic: false, + credIndex: some(1.uint), + epochSizeSec: 1, + treePath: genTempPath("rln_tree", "wakunode"), ) ) healthMonitor.setNode(node) diff --git a/tests/wakunode_rest/test_rest_relay.nim b/tests/wakunode_rest/test_rest_relay.nim index acfa05bab..719e66b8a 100644 --- a/tests/wakunode_rest/test_rest_relay.nim +++ b/tests/wakunode_rest/test_rest_relay.nim @@ -226,11 +226,11 @@ suite "Waku v2 Rest API - Relay": (await node.mountRelay()).isOkOr: assert false, "Failed to mount relay" let wakuRlnConfig = WakuRlnConfig( - rlnRelayDynamic: false, - rlnRelayCredIndex: some(1.uint), - rlnRelayUserMessageLimit: 20, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"), + dynamic: false, + credIndex: some(1.uint), + userMessageLimit: 20, + epochSizeSec: 1, + treePath: genTempPath("rln_tree", "wakunode_1"), ) await node.mountRlnRelay(wakuRlnConfig) @@ -456,11 +456,11 @@ suite "Waku v2 Rest API - Relay": (await node.mountRelay()).isOkOr: assert false, "Failed to mount relay" let wakuRlnConfig = WakuRlnConfig( - rlnRelayDynamic: false, - rlnRelayCredIndex: some(1.uint), - rlnRelayUserMessageLimit: 20, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"), + dynamic: false, + credIndex: some(1.uint), + userMessageLimit: 20, + epochSizeSec: 1, + treePath: genTempPath("rln_tree", "wakunode_1"), ) await node.mountRlnRelay(wakuRlnConfig) @@ -510,11 +510,11 @@ suite "Waku v2 Rest API - Relay": (await node.mountRelay()).isOkOr: assert false, "Failed to mount relay" let wakuRlnConfig = WakuRlnConfig( - rlnRelayDynamic: false, - rlnRelayCredIndex: some(1.uint), - rlnRelayUserMessageLimit: 20, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"), + dynamic: false, + credIndex: some(1.uint), + userMessageLimit: 20, + epochSizeSec: 1, + treePath: genTempPath("rln_tree", "wakunode_1"), ) await node.mountRlnRelay(wakuRlnConfig) @@ -561,11 +561,11 @@ suite "Waku v2 Rest API - Relay": (await node.mountRelay()).isOkOr: assert false, "Failed to mount relay" let wakuRlnConfig = WakuRlnConfig( - rlnRelayDynamic: false, - rlnRelayCredIndex: some(1.uint), - rlnRelayUserMessageLimit: 20, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"), + dynamic: false, + credIndex: some(1.uint), + userMessageLimit: 20, + epochSizeSec: 1, + treePath: genTempPath("rln_tree", "wakunode_1"), ) await node.mountRlnRelay(wakuRlnConfig) @@ -618,11 +618,11 @@ suite "Waku v2 Rest API - Relay": (await node.mountRelay()).isOkOr: assert false, "Failed to mount relay" let wakuRlnConfig = WakuRlnConfig( - rlnRelayDynamic: false, - rlnRelayCredIndex: some(1.uint), - rlnRelayUserMessageLimit: 20, - rlnEpochSizeSec: 1, - rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"), + dynamic: false, + credIndex: some(1.uint), + userMessageLimit: 20, + epochSizeSec: 1, + treePath: genTempPath("rln_tree", "wakunode_1"), ) await node.mountRlnRelay(wakuRlnConfig) diff --git a/tools/rln_db_inspector/rln_db_inspector.nim b/tools/rln_db_inspector/rln_db_inspector.nim index 198f4469d..e1d093e86 100644 --- a/tools/rln_db_inspector/rln_db_inspector.nim +++ b/tools/rln_db_inspector/rln_db_inspector.nim @@ -5,13 +5,15 @@ else: import chronicles, sequtils, results -import - waku/[waku_rln_relay/rln, waku_rln_relay/conversion_utils, factory/external_config] +import waku/[waku_rln_relay/rln, waku_rln_relay/conversion_utils] logScope: topics = "rln_db_inspector" -proc doInspectRlnDb*(conf: WakuNodeConf) = +type InspectRlnDbConf* = object + treePath*: string + +proc doInspectRlnDb*(conf: InspectRlnDbConf) = # 1. load configuration trace "configuration", conf = $conf diff --git a/tools/rln_keystore_generator/rln_keystore_generator.nim b/tools/rln_keystore_generator/rln_keystore_generator.nim index 1bde9ae01..0ca1c9968 100644 --- a/tools/rln_keystore_generator/rln_keystore_generator.nim +++ b/tools/rln_keystore_generator/rln_keystore_generator.nim @@ -11,13 +11,22 @@ import waku_rln_relay/rln, waku_rln_relay/conversion_utils, waku_rln_relay/group_manager/on_chain, - factory/external_config, ] logScope: topics = "rln_keystore_generator" -proc doRlnKeystoreGenerator*(conf: WakuNodeConf) = +type RlnKeystoreGeneratorConf* = object + execute*: bool + ethContractAddress*: string + ethClientAddress*: string + chainId*: uint + credPath*: string + credPassword*: string + userMessageLimit*: uint64 + ethPrivateKey*: string + +proc doRlnKeystoreGenerator*(conf: RlnKeystoreGeneratorConf) = # 1. load configuration trace "configuration", conf = $conf @@ -56,13 +65,13 @@ proc doRlnKeystoreGenerator*(conf: WakuNodeConf) = # 4. initialize OnchainGroupManager let groupManager = OnchainGroupManager( - ethClientUrl: string(conf.rlnRelayethClientAddress), - chainId: conf.rlnRelayChainId, - ethContractAddress: conf.rlnRelayEthContractAddress, + ethClientUrl: string(conf.ethClientAddress), + chainId: conf.chainId, + ethContractAddress: conf.ethContractAddress, rlnInstance: rlnInstance, keystorePath: none(string), keystorePassword: none(string), - ethPrivateKey: some(conf.rlnRelayEthPrivateKey), + ethPrivateKey: some(conf.ethPrivateKey), onFatalErrorAction: onFatalErrorAction, ) try: @@ -77,7 +86,7 @@ proc doRlnKeystoreGenerator*(conf: WakuNodeConf) = # 5. register on-chain try: - waitFor groupManager.register(credential, conf.rlnRelayUserMessageLimit) + waitFor groupManager.register(credential, conf.userMessageLimit) except Exception, CatchableError: error "failure while registering credentials on-chain", error = getCurrentExceptionMsg() @@ -87,28 +96,27 @@ proc doRlnKeystoreGenerator*(conf: WakuNodeConf) = info "Your membership has been registered on-chain.", chainId = $groupManager.chainId, - contractAddress = conf.rlnRelayEthContractAddress, + contractAddress = conf.ethContractAddress, membershipIndex = groupManager.membershipIndex.get() - info "Your user message limit is", userMessageLimit = conf.rlnRelayUserMessageLimit + info "Your user message limit is", userMessageLimit = conf.userMessageLimit # 6. write to keystore let keystoreCred = KeystoreMembership( membershipContract: MembershipContract( - chainId: $groupManager.chainId, address: conf.rlnRelayEthContractAddress + chainId: $groupManager.chainId, address: conf.ethContractAddress ), treeIndex: groupManager.membershipIndex.get(), identityCredential: credential, - userMessageLimit: conf.rlnRelayUserMessageLimit, + userMessageLimit: conf.userMessageLimit, ) - let persistRes = addMembershipCredentials( - conf.rlnRelayCredPath, keystoreCred, conf.rlnRelayCredPassword, RLNAppInfo - ) + let persistRes = + addMembershipCredentials(conf.credPath, keystoreCred, conf.credPassword, RLNAppInfo) if persistRes.isErr(): error "failed to persist credentials", error = persistRes.error quit(1) - info "credentials persisted", path = conf.rlnRelayCredPath + info "credentials persisted", path = conf.credPath try: waitFor groupManager.stop() diff --git a/waku/common/utils/nat.nim b/waku/common/utils/nat.nim index 698ba68be..125a48935 100644 --- a/waku/common/utils/nat.nim +++ b/waku/common/utils/nat.nim @@ -8,13 +8,14 @@ logScope: ## Due to the design of nim-eth/nat module we must ensure it is only initialized once. ## see: https://github.com/waku-org/nwaku/issues/2628 -## Details: nim-eth/nat module starts a meaintenance thread for refreshing the NAT mappings, but everything in the module is global, +## Details: nim-eth/nat module starts a maintenance thread for refreshing the NAT mappings, but everything in the module is global, ## there is no room to store multiple configurations. ## Exact meaning: redirectPorts cannot be called twice in a program lifetime. ## During waku tests we happen to start several node instances in parallel thus resulting in multiple NAT configurations and multiple threads. ## Those threads will dead lock each other in tear down. var singletonNat: bool = false +# TODO: pass `NatStrategy`, not a string proc setupNat*( natConf, clientId: string, tcpPort, udpPort: Port ): Result[ diff --git a/waku/discovery/waku_discv5.nim b/waku/discovery/waku_discv5.nim index 91649280a..0c57eb384 100644 --- a/waku/discovery/waku_discv5.nim +++ b/waku/discovery/waku_discv5.nim @@ -10,11 +10,7 @@ import eth/keys as eth_keys, eth/p2p/discoveryv5/node, eth/p2p/discoveryv5/protocol -import - ../node/peer_manager/peer_manager, - ../waku_core, - ../waku_enr, - ../factory/external_config +import ../node/peer_manager/peer_manager, ../waku_core, ../waku_enr export protocol, waku_enr @@ -26,6 +22,18 @@ logScope: ## Config +# TODO: merge both conf +type Discv5Conf* {.requiresInit.} = object + # TODO: This should probably be an option on the builder + # But translated to everything else "false" on the config + discv5Only*: bool + bootstrapNodes*: seq[string] + udpPort*: Port + tableIpLimit*: uint + bucketIpLimit*: uint + bitsPerHop*: int + enrAutoUpdate*: bool + type WakuDiscoveryV5Config* = object discv5Config*: Option[DiscoveryConfig] address*: IpAddress @@ -383,10 +391,12 @@ proc setupDiscoveryV5*( myENR: enr.Record, nodePeerManager: PeerManager, nodeTopicSubscriptionQueue: AsyncEventQueue[SubscriptionEvent], - conf: WakuNodeConf, + conf: Discv5Conf, dynamicBootstrapNodes: seq[RemotePeerInfo], rng: ref HmacDrbgContext, key: crypto.PrivateKey, + p2pListenAddress: IpAddress, + portsShift: uint16, ): WakuDiscoveryV5 = let dynamicBootstrapEnrs = dynamicBootstrapNodes.filterIt(it.hasUdpPort()).mapIt(it.enr.get()) @@ -394,7 +404,7 @@ proc setupDiscoveryV5*( var discv5BootstrapEnrs: seq[enr.Record] # parse enrURIs from the configuration and add the resulting ENRs to the discv5BootstrapEnrs seq - for enrUri in conf.discv5BootstrapNodes: + for enrUri in conf.bootstrapNodes: addBootstrapNode(enrUri, discv5BootstrapEnrs) for enr in discv5BootstrapEnrs: @@ -407,19 +417,18 @@ proc setupDiscoveryV5*( discv5BootstrapEnrs.add(dynamicBootstrapEnrs) - let discv5Config = DiscoveryConfig.init( - conf.discv5TableIpLimit, conf.discv5BucketIpLimit, conf.discv5BitsPerHop - ) + let discv5Config = + DiscoveryConfig.init(conf.tableIpLimit, conf.bucketIpLimit, conf.bitsPerHop) - let discv5UdpPort = Port(uint16(conf.discv5UdpPort) + conf.portsShift) + let discv5UdpPort = Port(uint16(conf.udpPort) + portsShift) let discv5Conf = WakuDiscoveryV5Config( discv5Config: some(discv5Config), - address: conf.listenAddress, + address: p2pListenAddress, port: discv5UdpPort, privateKey: eth_keys.PrivateKey(key.skkey), bootstrapRecords: discv5BootstrapEnrs, - autoupdateRecord: conf.discv5EnrAutoUpdate, + autoupdateRecord: conf.enrAutoUpdate, ) WakuDiscoveryV5.new( diff --git a/waku/factory/builder.nim b/waku/factory/builder.nim index 78b07ed9b..d1cede969 100644 --- a/waku/factory/builder.nim +++ b/waku/factory/builder.nim @@ -89,7 +89,7 @@ proc withNetworkConfigurationDetails*( extIp = extIp, extPort = extPort, extMultiAddrs = extMultiAddrs, - wsBindPort = wsBindPort, + wsBindPort = some(wsBindPort), wsEnabled = wsEnabled, wssEnabled = wssEnabled, wakuFlags = wakuFlags, diff --git a/waku/factory/conf_builder/conf_builder.nim b/waku/factory/conf_builder/conf_builder.nim new file mode 100644 index 000000000..9b7f44ada --- /dev/null +++ b/waku/factory/conf_builder/conf_builder.nim @@ -0,0 +1,17 @@ +import + ./waku_conf_builder, + ./filter_service_conf_builder, + ./store_sync_conf_builder, + ./store_service_conf_builder, + ./rest_server_conf_builder, + ./dns_discovery_conf_builder, + ./discv5_conf_builder, + ./web_socket_conf_builder, + ./metrics_server_conf_builder, + ./rln_relay_conf_builder + +export + waku_conf_builder, filter_service_conf_builder, store_sync_conf_builder, + store_service_conf_builder, rest_server_conf_builder, dns_discovery_conf_builder, + discv5_conf_builder, web_socket_conf_builder, metrics_server_conf_builder, + rln_relay_conf_builder diff --git a/waku/factory/conf_builder/discv5_conf_builder.nim b/waku/factory/conf_builder/discv5_conf_builder.nim new file mode 100644 index 000000000..950b2a4f6 --- /dev/null +++ b/waku/factory/conf_builder/discv5_conf_builder.nim @@ -0,0 +1,65 @@ +import chronicles, std/[net, options, sequtils], results +import ../waku_conf + +logScope: + topics = "waku conf builder discv5" + +########################### +## Discv5 Config Builder ## +########################### +type Discv5ConfBuilder* = object + enabled*: Option[bool] + + bootstrapNodes*: seq[string] + bitsPerHop*: Option[int] + bucketIpLimit*: Option[uint] + discv5Only*: Option[bool] + enrAutoUpdate*: Option[bool] + tableIpLimit*: Option[uint] + udpPort*: Option[Port] + +proc init*(T: type Discv5ConfBuilder): Discv5ConfBuilder = + Discv5ConfBuilder() + +proc withEnabled*(b: var Discv5ConfBuilder, enabled: bool) = + b.enabled = some(enabled) + +proc withBitsPerHop*(b: var Discv5ConfBuilder, bitsPerHop: int) = + b.bitsPerHop = some(bitsPerHop) + +proc withBucketIpLimit*(b: var Discv5ConfBuilder, bucketIpLimit: uint) = + b.bucketIpLimit = some(bucketIpLimit) + +proc withDiscv5Only*(b: var Discv5ConfBuilder, discv5Only: bool) = + b.discv5Only = some(discv5Only) + +proc withEnrAutoUpdate*(b: var Discv5ConfBuilder, enrAutoUpdate: bool) = + b.enrAutoUpdate = some(enrAutoUpdate) + +proc withTableIpLimit*(b: var Discv5ConfBuilder, tableIpLimit: uint) = + b.tableIpLimit = some(tableIpLimit) + +proc withUdpPort*(b: var Discv5ConfBuilder, udpPort: Port) = + b.udpPort = some(udpPort) + +proc withBootstrapNodes*(b: var Discv5ConfBuilder, bootstrapNodes: seq[string]) = + # TODO: validate ENRs? + b.bootstrapNodes = concat(b.bootstrapNodes, bootstrapNodes) + +proc build*(b: Discv5ConfBuilder): Result[Option[Discv5Conf], string] = + if not b.enabled.get(false): + return ok(none(Discv5Conf)) + + return ok( + some( + Discv5Conf( + bootstrapNodes: b.bootstrapNodes, + bitsPerHop: b.bitsPerHop.get(1), + bucketIpLimit: b.bucketIpLimit.get(2), + discv5Only: b.discv5Only.get(false), + enrAutoUpdate: b.enrAutoUpdate.get(true), + tableIpLimit: b.tableIpLimit.get(10), + udpPort: b.udpPort.get(9000.Port), + ) + ) + ) diff --git a/waku/factory/conf_builder/dns_discovery_conf_builder.nim b/waku/factory/conf_builder/dns_discovery_conf_builder.nim new file mode 100644 index 000000000..8ac33a18f --- /dev/null +++ b/waku/factory/conf_builder/dns_discovery_conf_builder.nim @@ -0,0 +1,38 @@ +import chronicles, std/[net, options, sequtils], results +import ../waku_conf + +logScope: + topics = "waku conf builder dns discovery" + +################################## +## DNS Discovery Config Builder ## +################################## +type DnsDiscoveryConfBuilder* = object + enabled*: Option[bool] + enrTreeUrl*: Option[string] + nameServers*: seq[IpAddress] + +proc init*(T: type DnsDiscoveryConfBuilder): DnsDiscoveryConfBuilder = + DnsDiscoveryConfBuilder() + +proc withEnabled*(b: var DnsDiscoveryConfBuilder, enabled: bool) = + b.enabled = some(enabled) + +proc withEnrTreeUrl*(b: var DnsDiscoveryConfBuilder, enrTreeUrl: string) = + b.enrTreeUrl = some(enrTreeUrl) + +proc withNameServers*(b: var DnsDiscoveryConfBuilder, nameServers: seq[IpAddress]) = + b.nameServers = concat(b.nameServers, nameServers) + +proc build*(b: DnsDiscoveryConfBuilder): Result[Option[DnsDiscoveryConf], string] = + if not b.enabled.get(false): + return ok(none(DnsDiscoveryConf)) + + if b.nameServers.len == 0: + return err("dnsDiscovery.nameServers is not specified") + if b.enrTreeUrl.isNone(): + return err("dnsDiscovery.enrTreeUrl is not specified") + + return ok( + some(DnsDiscoveryConf(nameServers: b.nameServers, enrTreeUrl: b.enrTreeUrl.get())) + ) diff --git a/waku/factory/conf_builder/filter_service_conf_builder.nim b/waku/factory/conf_builder/filter_service_conf_builder.nim new file mode 100644 index 000000000..a3f056b01 --- /dev/null +++ b/waku/factory/conf_builder/filter_service_conf_builder.nim @@ -0,0 +1,45 @@ +import chronicles, std/options, results +import ../waku_conf + +logScope: + topics = "waku conf builder filter service" + +################################### +## Filter Service Config Builder ## +################################### +type FilterServiceConfBuilder* = object + enabled*: Option[bool] + maxPeersToServe*: Option[uint32] + subscriptionTimeout*: Option[uint16] + maxCriteria*: Option[uint32] + +proc init*(T: type FilterServiceConfBuilder): FilterServiceConfBuilder = + FilterServiceConfBuilder() + +proc withEnabled*(b: var FilterServiceConfBuilder, enabled: bool) = + b.enabled = some(enabled) + +proc withMaxPeersToServe*(b: var FilterServiceConfBuilder, maxPeersToServe: uint32) = + b.maxPeersToServe = some(maxPeersToServe) + +proc withSubscriptionTimeout*( + b: var FilterServiceConfBuilder, subscriptionTimeout: uint16 +) = + b.subscriptionTimeout = some(subscriptionTimeout) + +proc withMaxCriteria*(b: var FilterServiceConfBuilder, maxCriteria: uint32) = + b.maxCriteria = some(maxCriteria) + +proc build*(b: FilterServiceConfBuilder): Result[Option[FilterServiceConf], string] = + if not b.enabled.get(false): + return ok(none(FilterServiceConf)) + + return ok( + some( + FilterServiceConf( + maxPeersToServe: b.maxPeersToServe.get(500), + subscriptionTimeout: b.subscriptionTimeout.get(300), + maxCriteria: b.maxCriteria.get(1000), + ) + ) + ) diff --git a/waku/factory/conf_builder/metrics_server_conf_builder.nim b/waku/factory/conf_builder/metrics_server_conf_builder.nim new file mode 100644 index 000000000..0f0d18564 --- /dev/null +++ b/waku/factory/conf_builder/metrics_server_conf_builder.nim @@ -0,0 +1,47 @@ +import chronicles, std/[net, options], results +import ../waku_conf + +logScope: + topics = "waku conf builder metrics server" + +################################### +## Metrics Server Config Builder ## +################################### +type MetricsServerConfBuilder* = object + enabled*: Option[bool] + + httpAddress*: Option[IpAddress] + httpPort*: Option[Port] + logging*: Option[bool] + +proc init*(T: type MetricsServerConfBuilder): MetricsServerConfBuilder = + MetricsServerConfBuilder() + +proc withEnabled*(b: var MetricsServerConfBuilder, enabled: bool) = + b.enabled = some(enabled) + +proc withHttpAddress*(b: var MetricsServerConfBuilder, httpAddress: IpAddress) = + b.httpAddress = some(httpAddress) + +proc withHttpPort*(b: var MetricsServerConfBuilder, httpPort: Port) = + b.httpPort = some(httpPort) + +proc withHttpPort*(b: var MetricsServerConfBuilder, httpPort: uint16) = + b.httpPort = some(Port(httpPort)) + +proc withLogging*(b: var MetricsServerConfBuilder, logging: bool) = + b.logging = some(logging) + +proc build*(b: MetricsServerConfBuilder): Result[Option[MetricsServerConf], string] = + if not b.enabled.get(false): + return ok(none(MetricsServerConf)) + + return ok( + some( + MetricsServerConf( + httpAddress: b.httpAddress.get(static parseIpAddress("127.0.0.1")), + httpPort: b.httpPort.get(8008.Port), + logging: b.logging.get(false), + ) + ) + ) diff --git a/waku/factory/conf_builder/rest_server_conf_builder.nim b/waku/factory/conf_builder/rest_server_conf_builder.nim new file mode 100644 index 000000000..2efd91f02 --- /dev/null +++ b/waku/factory/conf_builder/rest_server_conf_builder.nim @@ -0,0 +1,64 @@ +import chronicles, std/[net, options, sequtils], results +import ../waku_conf + +logScope: + topics = "waku conf builder rest server" + +################################ +## REST Server Config Builder ## +################################ +type RestServerConfBuilder* = object + enabled*: Option[bool] + + allowOrigin*: seq[string] + listenAddress*: Option[IpAddress] + port*: Option[Port] + admin*: Option[bool] + relayCacheCapacity*: Option[uint32] + +proc init*(T: type RestServerConfBuilder): RestServerConfBuilder = + RestServerConfBuilder() + +proc withEnabled*(b: var RestServerConfBuilder, enabled: bool) = + b.enabled = some(enabled) + +proc withAllowOrigin*(b: var RestServerConfBuilder, allowOrigin: seq[string]) = + b.allowOrigin = concat(b.allowOrigin, allowOrigin) + +proc withListenAddress*(b: var RestServerConfBuilder, listenAddress: IpAddress) = + b.listenAddress = some(listenAddress) + +proc withPort*(b: var RestServerConfBuilder, port: Port) = + b.port = some(port) + +proc withPort*(b: var RestServerConfBuilder, port: uint16) = + b.port = some(Port(port)) + +proc withAdmin*(b: var RestServerConfBuilder, admin: bool) = + b.admin = some(admin) + +proc withRelayCacheCapacity*(b: var RestServerConfBuilder, relayCacheCapacity: uint32) = + b.relayCacheCapacity = some(relayCacheCapacity) + +proc build*(b: RestServerConfBuilder): Result[Option[RestServerConf], string] = + if not b.enabled.get(false): + return ok(none(RestServerConf)) + + if b.listenAddress.isNone(): + return err("restServer.listenAddress is not specified") + if b.port.isNone(): + return err("restServer.port is not specified") + if b.relayCacheCapacity.isNone(): + return err("restServer.relayCacheCapacity is not specified") + + return ok( + some( + RestServerConf( + allowOrigin: b.allowOrigin, + listenAddress: b.listenAddress.get(), + port: b.port.get(), + admin: b.admin.get(false), + relayCacheCapacity: b.relayCacheCapacity.get(), + ) + ) + ) diff --git a/waku/factory/conf_builder/rln_relay_conf_builder.nim b/waku/factory/conf_builder/rln_relay_conf_builder.nim new file mode 100644 index 000000000..ff126d058 --- /dev/null +++ b/waku/factory/conf_builder/rln_relay_conf_builder.nim @@ -0,0 +1,104 @@ +import chronicles, std/options, results +import ../waku_conf + +logScope: + topics = "waku conf builder rln relay" + +############################## +## RLN Relay Config Builder ## +############################## +type RlnRelayConfBuilder* = object + enabled*: Option[bool] + + chainId*: Option[uint] + ethClientAddress*: Option[string] + ethContractAddress*: Option[string] + credIndex*: Option[uint] + credPassword*: Option[string] + credPath*: Option[string] + dynamic*: Option[bool] + epochSizeSec*: Option[uint64] + userMessageLimit*: Option[uint64] + treePath*: Option[string] + +proc init*(T: type RlnRelayConfBuilder): RlnRelayConfBuilder = + RlnRelayConfBuilder() + +proc withEnabled*(b: var RlnRelayConfBuilder, enabled: bool) = + b.enabled = some(enabled) + +proc withChainId*(b: var RlnRelayConfBuilder, chainId: uint) = + b.chainId = some(chainId) + +proc withCredIndex*(b: var RlnRelayConfBuilder, credIndex: uint) = + b.credIndex = some(credIndex) + +proc withCredPassword*(b: var RlnRelayConfBuilder, credPassword: string) = + b.credPassword = some(credPassword) + +proc withCredPath*(b: var RlnRelayConfBuilder, credPath: string) = + b.credPath = some(credPath) + +proc withDynamic*(b: var RlnRelayConfBuilder, dynamic: bool) = + b.dynamic = some(dynamic) + +proc withEthClientAddress*(b: var RlnRelayConfBuilder, ethClientAddress: string) = + b.ethClientAddress = some(ethClientAddress) + +proc withEthContractAddress*(b: var RlnRelayConfBuilder, ethContractAddress: string) = + b.ethContractAddress = some(ethContractAddress) + +proc withEpochSizeSec*(b: var RlnRelayConfBuilder, epochSizeSec: uint64) = + b.epochSizeSec = some(epochSizeSec) + +proc withUserMessageLimit*(b: var RlnRelayConfBuilder, userMessageLimit: uint64) = + b.userMessageLimit = some(userMessageLimit) + +proc withTreePath*(b: var RlnRelayConfBuilder, treePath: string) = + b.treePath = some(treePath) + +proc build*(b: RlnRelayConfBuilder): Result[Option[RlnRelayConf], string] = + if not b.enabled.get(false): + return ok(none(RlnRelayConf)) + + if b.chainId.isNone(): + return err("RLN Relay Chain Id is not specified") + + let creds = + if b.credPath.isSome() and b.credPassword.isSome(): + some(RlnRelayCreds(path: b.credPath.get(), password: b.credPassword.get())) + elif b.credPath.isSome() and b.credPassword.isNone(): + return err("RLN Relay Credential Password is not specified but path is") + elif b.credPath.isNone() and b.credPassword.isSome(): + return err("RLN Relay Credential Path is not specified but password is") + else: + none(RlnRelayCreds) + + if b.dynamic.isNone(): + return err("rlnRelay.dynamic is not specified") + if b.ethClientAddress.get("") == "": + return err("rlnRelay.ethClientAddress is not specified") + if b.ethContractAddress.get("") == "": + return err("rlnRelay.ethContractAddress is not specified") + if b.epochSizeSec.isNone(): + return err("rlnRelay.epochSizeSec is not specified") + if b.userMessageLimit.isNone(): + return err("rlnRelay.userMessageLimit is not specified") + if b.treePath.isNone(): + return err("rlnRelay.treePath is not specified") + + return ok( + some( + RlnRelayConf( + chainId: b.chainId.get(), + credIndex: b.credIndex, + creds: creds, + dynamic: b.dynamic.get(), + ethClientAddress: b.ethClientAddress.get(), + ethContractAddress: b.ethContractAddress.get(), + epochSizeSec: b.epochSizeSec.get(), + userMessageLimit: b.userMessageLimit.get(), + treePath: b.treePath.get(), + ) + ) + ) diff --git a/waku/factory/conf_builder/store_service_conf_builder.nim b/waku/factory/conf_builder/store_service_conf_builder.nim new file mode 100644 index 000000000..d12bc8150 --- /dev/null +++ b/waku/factory/conf_builder/store_service_conf_builder.nim @@ -0,0 +1,74 @@ +import chronicles, std/options, results, chronos +import ../waku_conf, ./store_sync_conf_builder + +logScope: + topics = "waku conf builder store service" + +################################## +## Store Service Config Builder ## +################################## +type StoreServiceConfBuilder* = object + enabled*: Option[bool] + + dbMigration*: Option[bool] + dbURl*: Option[string] + dbVacuum*: Option[bool] + supportV2*: Option[bool] + maxNumDbConnections*: Option[int] + retentionPolicy*: Option[string] + resume*: Option[bool] + storeSyncConf*: StoreSyncConfBuilder + +proc init*(T: type StoreServiceConfBuilder): StoreServiceConfBuilder = + StoreServiceConfBuilder(storeSyncConf: StoreSyncConfBuilder.init()) + +proc withEnabled*(b: var StoreServiceConfBuilder, enabled: bool) = + b.enabled = some(enabled) + +proc withDbMigration*(b: var StoreServiceConfBuilder, dbMigration: bool) = + b.dbMigration = some(dbMigration) + +proc withDbUrl*(b: var StoreServiceConfBuilder, dbUrl: string) = + b.dbURl = some(dbUrl) + +proc withDbVacuum*(b: var StoreServiceConfBuilder, dbVacuum: bool) = + b.dbVacuum = some(dbVacuum) + +proc withSupportV2*(b: var StoreServiceConfBuilder, supportV2: bool) = + b.supportV2 = some(supportV2) + +proc withMaxNumDbConnections*( + b: var StoreServiceConfBuilder, maxNumDbConnections: int +) = + b.maxNumDbConnections = some(maxNumDbConnections) + +proc withRetentionPolicy*(b: var StoreServiceConfBuilder, retentionPolicy: string) = + b.retentionPolicy = some(retentionPolicy) + +proc withResume*(b: var StoreServiceConfBuilder, resume: bool) = + b.resume = some(resume) + +proc build*(b: StoreServiceConfBuilder): Result[Option[StoreServiceConf], string] = + if not b.enabled.get(false): + return ok(none(StoreServiceConf)) + + if b.dbUrl.get("") == "": + return err "store.dbUrl is not specified" + + let storeSyncConf = b.storeSyncConf.build().valueOr: + return err("Store Sync Conf failed to build") + + return ok( + some( + StoreServiceConf( + dbMigration: b.dbMigration.get(true), + dbURl: b.dbUrl.get(), + dbVacuum: b.dbVacuum.get(false), + supportV2: b.supportV2.get(true), + maxNumDbConnections: b.maxNumDbConnections.get(50), + retentionPolicy: b.retentionPolicy.get("time:" & $2.days.seconds), + resume: b.resume.get(false), + storeSyncConf: storeSyncConf, + ) + ) + ) diff --git a/waku/factory/conf_builder/store_sync_conf_builder.nim b/waku/factory/conf_builder/store_sync_conf_builder.nim new file mode 100644 index 000000000..4c7177b71 --- /dev/null +++ b/waku/factory/conf_builder/store_sync_conf_builder.nim @@ -0,0 +1,51 @@ +import chronicles, std/options, results +import ../waku_conf + +logScope: + topics = "waku conf builder store sync" + +################################## +## Store Sync Config Builder ## +################################## +type StoreSyncConfBuilder* = object + enabled*: Option[bool] + + rangeSec*: Option[uint32] + intervalSec*: Option[uint32] + relayJitterSec*: Option[uint32] + +proc init*(T: type StoreSyncConfBuilder): StoreSyncConfBuilder = + StoreSyncConfBuilder() + +proc withEnabled*(b: var StoreSyncConfBuilder, enabled: bool) = + b.enabled = some(enabled) + +proc withRangeSec*(b: var StoreSyncConfBuilder, rangeSec: uint32) = + b.rangeSec = some(rangeSec) + +proc withIntervalSec*(b: var StoreSyncConfBuilder, intervalSec: uint32) = + b.intervalSec = some(intervalSec) + +proc withRelayJitterSec*(b: var StoreSyncConfBuilder, relayJitterSec: uint32) = + b.relayJitterSec = some(relayJitterSec) + +proc build*(b: StoreSyncConfBuilder): Result[Option[StoreSyncConf], string] = + if not b.enabled.get(false): + return ok(none(StoreSyncConf)) + + if b.rangeSec.isNone(): + return err "store.rangeSec is not specified" + if b.intervalSec.isNone(): + return err "store.intervalSec is not specified" + if b.relayJitterSec.isNone(): + return err "store.relayJitterSec is not specified" + + return ok( + some( + StoreSyncConf( + rangeSec: b.rangeSec.get(), + intervalSec: b.intervalSec.get(), + relayJitterSec: b.relayJitterSec.get(), + ) + ) + ) diff --git a/waku/factory/conf_builder/waku_conf_builder.nim b/waku/factory/conf_builder/waku_conf_builder.nim new file mode 100644 index 000000000..44cb706af --- /dev/null +++ b/waku/factory/conf_builder/waku_conf_builder.nim @@ -0,0 +1,649 @@ +import + libp2p/crypto/crypto, + libp2p/multiaddress, + std/[net, options, sequtils, strutils], + chronicles, + chronos, + results + +import + ../waku_conf, + ../networks_config, + ../../common/logging, + ../../common/utils/parse_size_units, + ../../waku_enr/capabilities + +import + ./filter_service_conf_builder, + ./store_sync_conf_builder, + ./store_service_conf_builder, + ./rest_server_conf_builder, + ./dns_discovery_conf_builder, + ./discv5_conf_builder, + ./web_socket_conf_builder, + ./metrics_server_conf_builder, + ./rln_relay_conf_builder + +logScope: + topics = "waku conf builder" + +type MaxMessageSizeKind* = enum + mmskNone + mmskStr + mmskInt + +type MaxMessageSize* = object + case kind*: MaxMessageSizeKind + of mmskNone: + discard + of mmskStr: + str*: string + of mmskInt: + bytes*: uint64 + +## `WakuConfBuilder` is a convenient tool to accumulate +## Config parameters to build a `WakuConfig`. +## It provides some type conversion, as well as applying +## defaults in an agnostic manner (for any usage of Waku node) +# +# TODO: Sub protocol builder (eg `StoreServiceConfBuilder` +# is be better defined in the protocol module (eg store) +# and apply good defaults from this protocol PoV and make the +# decision when the dev must specify a value vs when a default +# is fine to have. +# +# TODO: Add default to most values so that when a developer uses +# the builder, it works out-of-the-box +type WakuConfBuilder* = object + nodeKey: Option[crypto.PrivateKey] + + clusterId: Option[uint16] + numShardsInNetwork: Option[uint32] + shards: Option[seq[uint16]] + protectedShards: Option[seq[ProtectedShard]] + contentTopics: Option[seq[string]] + + # Conf builders + dnsDiscoveryConf*: DnsDiscoveryConfBuilder + discv5Conf*: Discv5ConfBuilder + filterServiceConf*: FilterServiceConfBuilder + metricsServerConf*: MetricsServerConfBuilder + restServerConf*: RestServerConfBuilder + rlnRelayConf*: RlnRelayConfBuilder + storeServiceConf*: StoreServiceConfBuilder + webSocketConf*: WebSocketConfBuilder + # End conf builders + relay: Option[bool] + lightPush: Option[bool] + peerExchange: Option[bool] + storeSync: Option[bool] + relayPeerExchange: Option[bool] + + # TODO: move within a relayConf + rendezvous: Option[bool] + discv5Only: Option[bool] + + clusterConf: Option[ClusterConf] + + staticNodes: seq[string] + + remoteStoreNode: Option[string] + remoteLightPushNode: Option[string] + remoteFilterNode: Option[string] + remotePeerExchangeNode: Option[string] + + maxMessageSize: MaxMessageSize + + logLevel: Option[logging.LogLevel] + logFormat: Option[logging.LogFormat] + + natStrategy: Option[string] + + p2pTcpPort: Option[Port] + p2pListenAddress: Option[IpAddress] + portsShift: Option[uint16] + dns4DomainName: Option[string] + extMultiAddrs: seq[string] + extMultiAddrsOnly: Option[bool] + + dnsAddrs: Option[bool] + dnsAddrsNameServers: seq[IpAddress] + + peerPersistence: Option[bool] + peerStoreCapacity: Option[int] + maxConnections: Option[int] + colocationLimit: Option[int] + + agentString: Option[string] + + rateLimits: Option[seq[string]] + + maxRelayPeers: Option[int] + relayShardedPeerManagement: Option[bool] + relayServiceRatio: Option[string] + circuitRelayClient: Option[bool] + keepAlive: Option[bool] + p2pReliability: Option[bool] + +proc init*(T: type WakuConfBuilder): WakuConfBuilder = + WakuConfBuilder( + dnsDiscoveryConf: DnsDiscoveryConfBuilder.init(), + discv5Conf: Discv5ConfBuilder.init(), + filterServiceConf: FilterServiceConfBuilder.init(), + metricsServerConf: MetricsServerConfBuilder.init(), + restServerConf: RestServerConfBuilder.init(), + rlnRelayConf: RlnRelayConfBuilder.init(), + storeServiceConf: StoreServiceConfBuilder.init(), + webSocketConf: WebSocketConfBuilder.init(), + ) + +proc withClusterConf*(b: var WakuConfBuilder, clusterConf: ClusterConf) = + b.clusterConf = some(clusterConf) + +proc withNodeKey*(b: var WakuConfBuilder, nodeKey: crypto.PrivateKey) = + b.nodeKey = some(nodeKey) + +proc withClusterId*(b: var WakuConfBuilder, clusterId: uint16) = + b.clusterId = some(clusterId) + +proc withNumShardsInNetwork*(b: var WakuConfBuilder, numShardsInNetwork: uint32) = + b.numShardsInNetwork = some(numShardsInNetwork) + +proc withShards*(b: var WakuConfBuilder, shards: seq[uint16]) = + b.shards = some(shards) + +proc withProtectedShards*( + b: var WakuConfBuilder, protectedShards: seq[ProtectedShard] +) = + b.protectedShards = some(protectedShards) + +proc withContentTopics*(b: var WakuConfBuilder, contentTopics: seq[string]) = + b.contentTopics = some(contentTopics) + +proc withRelay*(b: var WakuConfBuilder, relay: bool) = + b.relay = some(relay) + +proc withLightPush*(b: var WakuConfBuilder, lightPush: bool) = + b.lightPush = some(lightPush) + +proc withStoreSync*(b: var WakuConfBuilder, storeSync: bool) = + b.storeSync = some(storeSync) + +proc withPeerExchange*(b: var WakuConfBuilder, peerExchange: bool) = + b.peerExchange = some(peerExchange) + +proc withRelayPeerExchange*(b: var WakuConfBuilder, relayPeerExchange: bool) = + b.relayPeerExchange = some(relayPeerExchange) + +proc withRendezvous*(b: var WakuConfBuilder, rendezvous: bool) = + b.rendezvous = some(rendezvous) + +proc withRemoteStoreNode*(b: var WakuConfBuilder, remoteStoreNode: string) = + b.remoteStoreNode = some(remoteStoreNode) + +proc withRemoteLightPushNode*(b: var WakuConfBuilder, remoteLightPushNode: string) = + b.remoteLightPushNode = some(remoteLightPushNode) + +proc withRemoteFilterNode*(b: var WakuConfBuilder, remoteFilterNode: string) = + b.remoteFilterNode = some(remoteFilterNode) + +proc withRemotePeerExchangeNode*( + b: var WakuConfBuilder, remotePeerExchangeNode: string +) = + b.remotePeerExchangeNode = some(remotePeerExchangeNode) + +proc withDnsAddrs*(b: var WakuConfBuilder, dnsAddrs: bool) = + b.dnsAddrs = some(dnsAddrs) + +proc withPeerPersistence*(b: var WakuConfBuilder, peerPersistence: bool) = + b.peerPersistence = some(peerPersistence) + +proc withPeerStoreCapacity*(b: var WakuConfBuilder, peerStoreCapacity: int) = + b.peerStoreCapacity = some(peerStoreCapacity) + +proc withMaxConnections*(b: var WakuConfBuilder, maxConnections: int) = + b.maxConnections = some(maxConnections) + +proc withDnsAddrsNameServers*( + b: var WakuConfBuilder, dnsAddrsNameServers: seq[IpAddress] +) = + b.dnsAddrsNameServers = concat(b.dnsAddrsNameServers, dnsAddrsNameServers) + +proc withLogLevel*(b: var WakuConfBuilder, logLevel: logging.LogLevel) = + b.logLevel = some(logLevel) + +proc withLogFormat*(b: var WakuConfBuilder, logFormat: logging.LogFormat) = + b.logFormat = some(logFormat) + +proc withP2pTcpPort*(b: var WakuConfBuilder, p2pTcpPort: Port) = + b.p2pTcpPort = some(p2pTcpPort) + +proc withP2pTcpPort*(b: var WakuConfBuilder, p2pTcpPort: uint16) = + b.p2pTcpPort = some(Port(p2pTcpPort)) + +proc withPortsShift*(b: var WakuConfBuilder, portsShift: uint16) = + b.portsShift = some(portsShift) + +proc withP2pListenAddress*(b: var WakuConfBuilder, p2pListenAddress: IpAddress) = + b.p2pListenAddress = some(p2pListenAddress) + +proc withExtMultiAddrsOnly*(b: var WakuConfBuilder, extMultiAddrsOnly: bool) = + b.extMultiAddrsOnly = some(extMultiAddrsOnly) + +proc withDns4DomainName*(b: var WakuConfBuilder, dns4DomainName: string) = + b.dns4DomainName = some(dns4DomainName) + +proc withNatStrategy*(b: var WakuConfBuilder, natStrategy: string) = + b.natStrategy = some(natStrategy) + +proc withAgentString*(b: var WakuConfBuilder, agentString: string) = + b.agentString = some(agentString) + +proc withColocationLimit*(b: var WakuConfBuilder, colocationLimit: int) = + b.colocationLimit = some(colocationLimit) + +proc withRateLimits*(b: var WakuConfBuilder, rateLimits: seq[string]) = + b.rateLimits = some(rateLimits) + +proc withMaxRelayPeers*(b: var WakuConfBuilder, maxRelayPeers: int) = + b.maxRelayPeers = some(maxRelayPeers) + +proc withRelayServiceRatio*(b: var WakuConfBuilder, relayServiceRatio: string) = + b.relayServiceRatio = some(relayServiceRatio) + +proc withCircuitRelayClient*(b: var WakuConfBuilder, circuitRelayClient: bool) = + b.circuitRelayClient = some(circuitRelayClient) + +proc withRelayShardedPeerManagement*( + b: var WakuConfBuilder, relayShardedPeerManagement: bool +) = + b.relayShardedPeerManagement = some(relayShardedPeerManagement) + +proc withKeepAlive*(b: var WakuConfBuilder, keepAlive: bool) = + b.keepAlive = some(keepAlive) + +proc withP2pReliability*(b: var WakuConfBuilder, p2pReliability: bool) = + b.p2pReliability = some(p2pReliability) + +proc withExtMultiAddrs*(builder: var WakuConfBuilder, extMultiAddrs: seq[string]) = + builder.extMultiAddrs = concat(builder.extMultiAddrs, extMultiAddrs) + +proc withMaxMessageSize*(builder: var WakuConfBuilder, maxMessageSizeBytes: uint64) = + builder.maxMessageSize = MaxMessageSize(kind: mmskInt, bytes: maxMessageSizeBytes) + +proc withMaxMessageSize*(builder: var WakuConfBuilder, maxMessageSize: string) = + builder.maxMessageSize = MaxMessageSize(kind: mmskStr, str: maxMessageSize) + +proc withStaticNodes*(builder: var WakuConfBuilder, staticNodes: seq[string]) = + builder.staticNodes = concat(builder.staticNodes, staticNodes) + +proc nodeKey( + builder: WakuConfBuilder, rng: ref HmacDrbgContext +): Result[crypto.PrivateKey, string] = + if builder.nodeKey.isSome(): + return ok(builder.nodeKey.get()) + else: + warn "missing node key, generating new set" + let nodeKey = crypto.PrivateKey.random(Secp256k1, rng[]).valueOr: + error "Failed to generate key", error = error + return err("Failed to generate key: " & $error) + return ok(nodeKey) + +proc applyClusterConf(builder: var WakuConfBuilder) = + # Apply cluster conf, overrides most values passed individually + # If you want to tweak values, don't use clusterConf + if builder.clusterConf.isNone: + return + let clusterConf = builder.clusterConf.get() + + if builder.clusterId.isSome(): + warn "Cluster id was provided alongside a cluster conf", + used = clusterConf.clusterId, discarded = builder.clusterId.get() + builder.clusterId = some(clusterConf.clusterId) + + # Apply relay parameters + if builder.relay.get(false) and clusterConf.rlnRelay: + if builder.rlnRelayConf.enabled.isSome(): + warn "RLN Relay was provided alongside a cluster conf", + used = clusterConf.rlnRelay, discarded = builder.rlnRelayConf.enabled + builder.rlnRelayConf.withEnabled(true) + + if builder.rlnRelayConf.ethContractAddress.get("") != "": + warn "RLN Relay ETH Contract Address was provided alongside a cluster conf", + used = clusterConf.rlnRelayEthContractAddress.string, + discarded = builder.rlnRelayConf.ethContractAddress.get().string + builder.rlnRelayConf.withEthContractAddress(clusterConf.rlnRelayEthContractAddress) + + if builder.rlnRelayConf.chainId.isSome(): + warn "RLN Relay Chain Id was provided alongside a cluster conf", + used = clusterConf.rlnRelayChainId, discarded = builder.rlnRelayConf.chainId + builder.rlnRelayConf.withChainId(clusterConf.rlnRelayChainId) + + if builder.rlnRelayConf.dynamic.isSome(): + warn "RLN Relay Dynamic was provided alongside a cluster conf", + used = clusterConf.rlnRelayDynamic, discarded = builder.rlnRelayConf.dynamic + builder.rlnRelayConf.withDynamic(clusterConf.rlnRelayDynamic) + + if builder.rlnRelayConf.epochSizeSec.isSome(): + warn "RLN Epoch Size in Seconds was provided alongside a cluster conf", + used = clusterConf.rlnEpochSizeSec, + discarded = builder.rlnRelayConf.epochSizeSec + builder.rlnRelayConf.withEpochSizeSec(clusterConf.rlnEpochSizeSec) + + if builder.rlnRelayConf.userMessageLimit.isSome(): + warn "RLN Relay Dynamic was provided alongside a cluster conf", + used = clusterConf.rlnRelayUserMessageLimit, + discarded = builder.rlnRelayConf.userMessageLimit + builder.rlnRelayConf.withUserMessageLimit(clusterConf.rlnRelayUserMessageLimit) + # End Apply relay parameters + + case builder.maxMessageSize.kind + of mmskNone: + discard + of mmskStr, mmskInt: + warn "Max Message Size was provided alongside a cluster conf", + used = clusterConf.maxMessageSize, discarded = $builder.maxMessageSize + builder.withMaxMessageSize(parseCorrectMsgSize(clusterConf.maxMessageSize)) + + if builder.numShardsInNetwork.isSome(): + warn "Num Shards In Network was provided alongside a cluster conf", + used = clusterConf.numShardsInNetwork, discarded = builder.numShardsInNetwork + builder.numShardsInNetwork = some(clusterConf.numShardsInNetwork) + + if clusterConf.discv5Discovery: + if builder.discv5Conf.enabled.isNone: + builder.discv5Conf.withEnabled(clusterConf.discv5Discovery) + + if builder.discv5Conf.bootstrapNodes.len == 0 and + clusterConf.discv5BootstrapNodes.len > 0: + warn "Discv5 Boostrap nodes were provided alongside a cluster conf", + used = clusterConf.discv5BootstrapNodes, + discarded = builder.discv5Conf.bootstrapNodes + builder.discv5Conf.withBootstrapNodes(clusterConf.discv5BootstrapNodes) + +proc build*( + builder: var WakuConfBuilder, rng: ref HmacDrbgContext = crypto.newRng() +): Result[WakuConf, string] = + ## Return a WakuConf that contains all mandatory parameters + ## Applies some sane defaults that are applicable across any usage + ## of libwaku. It aims to be agnostic so it does not apply a + ## default when it is opinionated. + + applyClusterConf(builder) + + let relay = + if builder.relay.isSome(): + builder.relay.get() + else: + warn "whether to mount relay is not specified, defaulting to not mounting" + false + + let lightPush = + if builder.lightPush.isSome(): + builder.lightPush.get() + else: + warn "whether to mount lightPush is not specified, defaulting to not mounting" + false + + let peerExchange = + if builder.peerExchange.isSome(): + builder.peerExchange.get() + else: + warn "whether to mount peerExchange is not specified, defaulting to not mounting" + false + + let storeSync = + if builder.storeSync.isSome(): + builder.storeSync.get() + else: + warn "whether to mount storeSync is not specified, defaulting to not mounting" + false + + let rendezvous = + if builder.rendezvous.isSome(): + builder.rendezvous.get() + else: + warn "whether to mount rendezvous is not specified, defaulting to not mounting" + false + + let relayPeerExchange = builder.relayPeerExchange.get(false) + + let nodeKey = ?nodeKey(builder, rng) + + let clusterId = + if builder.clusterId.isNone(): + # TODO: ClusterId should never be defaulted, instead, presets + # should be defined and used + warn("Cluster Id was not specified, defaulting to 0") + 0.uint16 + else: + builder.clusterId.get() + + let numShardsInNetwork = + if builder.numShardsInNetwork.isSome(): + builder.numShardsInNetwork.get() + else: + warn "Number of shards in network not specified, defaulting to zero (improve is wip)" + 0 + + let shards = + if builder.shards.isSome(): + builder.shards.get() + else: + warn "shards not specified, defaulting to all shards in network" + # TODO: conversion should not be needed + let upperShard: uint16 = uint16(numShardsInNetwork - 1) + toSeq(0.uint16 .. upperShard) + + let protectedShards = builder.protectedShards.get(@[]) + + let maxMessageSizeBytes = + case builder.maxMessageSize.kind + of mmskInt: + builder.maxMessageSize.bytes + of mmskStr: + ?parseMsgSize(builder.maxMessageSize.str) + else: + warn "Max Message Size not specified, defaulting to 150KiB" + parseCorrectMsgSize("150KiB") + + let contentTopics = builder.contentTopics.get(@[]) + + # Build sub-configs + let discv5Conf = builder.discv5Conf.build().valueOr: + return err("Discv5 Conf building failed: " & $error) + + let dnsDiscoveryConf = builder.dnsDiscoveryConf.build().valueOr: + return err("DNS Discovery Conf building failed: " & $error) + + let filterServiceConf = builder.filterServiceConf.build().valueOr: + return err("Filter Service Conf building failed: " & $error) + + let metricsServerConf = builder.metricsServerConf.build().valueOr: + return err("Metrics Server Conf building failed: " & $error) + + let restServerConf = builder.restServerConf.build().valueOr: + return err("REST Server Conf building failed: " & $error) + + let rlnRelayConf = builder.rlnRelayConf.build().valueOr: + return err("RLN Relay Conf building failed: " & $error) + + let storeServiceConf = builder.storeServiceConf.build().valueOr: + return err("Store Conf building failed: " & $error) + + let webSocketConf = builder.webSocketConf.build().valueOr: + return err("WebSocket Conf building failed: " & $error) + # End - Build sub-configs + + let logLevel = + if builder.logLevel.isSome(): + builder.logLevel.get() + else: + warn "Log Level not specified, defaulting to INFO" + logging.LogLevel.INFO + + let logFormat = + if builder.logFormat.isSome(): + builder.logFormat.get() + else: + warn "Log Format not specified, defaulting to TEXT" + logging.LogFormat.TEXT + + let natStrategy = + if builder.natStrategy.isSome(): + builder.natStrategy.get() + else: + warn "Nat Strategy is not specified, defaulting to none" + "none" + + let p2pTcpPort = + if builder.p2pTcpPort.isSome(): + builder.p2pTcpPort.get() + else: + warn "P2P Listening TCP Port is not specified, listening on 60000" + 60000.Port + + let p2pListenAddress = + if builder.p2pListenAddress.isSome(): + builder.p2pListenAddress.get() + else: + warn "P2P listening address not specified, listening on 0.0.0.0" + (static parseIpAddress("0.0.0.0")) + + let portsShift = + if builder.portsShift.isSome(): + builder.portsShift.get() + else: + warn "Ports Shift is not specified, defaulting to 0" + 0.uint16 + + let dns4DomainName = + if builder.dns4DomainName.isSome(): + let d = builder.dns4DomainName.get() + if d.string != "": + some(d) + else: + none(string) + else: + none(string) + + var extMultiAddrs: seq[MultiAddress] = @[] + for s in builder.extMultiAddrs: + let m = MultiAddress.init(s).valueOr: + return err("Invalid multiaddress provided: " & s) + extMultiAddrs.add(m) + + let extMultiAddrsOnly = + if builder.extMultiAddrsOnly.isSome(): + builder.extMultiAddrsOnly.get() + else: + warn "Whether to only announce external multiaddresses is not specified, defaulting to false" + false + + let dnsAddrs = + if builder.dnsAddrs.isSome(): + builder.dnsAddrs.get() + else: + warn "Whether to resolve DNS multiaddresses was not specified, defaulting to false." + false + + let dnsAddrsNameServers = + if builder.dnsAddrsNameServers.len != 0: + builder.dnsAddrsNameServers + else: + warn "DNS name servers IPs not provided, defaulting to Cloudflare's." + @[static parseIpAddress("1.1.1.1"), static parseIpAddress("1.0.0.1")] + + let peerPersistence = + if builder.peerPersistence.isSome(): + builder.peerPersistence.get() + else: + warn "Peer persistence not specified, defaulting to false" + false + + let maxConnections = + if builder.maxConnections.isSome(): + builder.maxConnections.get() + else: + warn "Max Connections was not specified, defaulting to 300" + 300 + + # TODO: Do the git version thing here + let agentString = builder.agentString.get("nwaku") + + # TODO: use `DefaultColocationLimit`. the user of this value should + # probably be defining a config object + let colocationLimit = builder.colocationLimit.get(5) + let rateLimits = builder.rateLimits.get(newSeq[string](0)) + + # TODO: is there a strategy for experimental features? delete vs promote + let relayShardedPeerManagement = builder.relayShardedPeerManagement.get(false) + + let wakuFlags = CapabilitiesBitfield.init( + lightpush = lightPush, + filter = filterServiceConf.isSome, + store = storeServiceConf.isSome, + relay = relay, + sync = storeServiceConf.isSome() and storeServiceConf.get().storeSyncConf.isSome, + ) + + let wakuConf = WakuConf( + # confs + storeServiceConf: storeServiceConf, + filterServiceConf: filterServiceConf, + discv5Conf: discv5Conf, + rlnRelayConf: rlnRelayConf, + metricsServerConf: metricsServerConf, + restServerConf: restServerConf, + dnsDiscoveryConf: dnsDiscoveryConf, + # end confs + nodeKey: nodeKey, + clusterId: clusterId, + numShardsInNetwork: numShardsInNetwork, + contentTopics: contentTopics, + shards: shards, + protectedShards: protectedShards, + relay: relay, + lightPush: lightPush, + peerExchange: peerExchange, + rendezvous: rendezvous, + remoteStoreNode: builder.remoteStoreNode, + remoteLightPushNode: builder.remoteLightPushNode, + remoteFilterNode: builder.remoteFilterNode, + remotePeerExchangeNode: builder.remotePeerExchangeNode, + relayPeerExchange: relayPeerExchange, + maxMessageSizeBytes: maxMessageSizeBytes, + logLevel: logLevel, + logFormat: logFormat, + # TODO: Separate builders + networkConf: NetworkConfig( + natStrategy: natStrategy, + p2pTcpPort: p2pTcpPort, + dns4DomainName: dns4DomainName, + p2pListenAddress: p2pListenAddress, + extMultiAddrs: extMultiAddrs, + extMultiAddrsOnly: extMultiAddrsOnly, + ), + portsShift: portsShift, + webSocketConf: webSocketConf, + dnsAddrs: dnsAddrs, + dnsAddrsNameServers: dnsAddrsNameServers, + peerPersistence: peerPersistence, + peerStoreCapacity: builder.peerStoreCapacity, + maxConnections: maxConnections, + agentString: agentString, + colocationLimit: colocationLimit, + maxRelayPeers: builder.maxRelayPeers, + relayServiceRatio: builder.relayServiceRatio.get("60:40"), + rateLimits: rateLimits, + circuitRelayClient: builder.circuitRelayClient.get(false), + keepAlive: builder.keepAlive.get(true), + staticNodes: builder.staticNodes, + relayShardedPeerManagement: relayShardedPeerManagement, + p2pReliability: builder.p2pReliability.get(false), + wakuFlags: wakuFlags, + ) + + ?wakuConf.validate() + + return ok(wakuConf) diff --git a/waku/factory/conf_builder/web_socket_conf_builder.nim b/waku/factory/conf_builder/web_socket_conf_builder.nim new file mode 100644 index 000000000..b091e2d1e --- /dev/null +++ b/waku/factory/conf_builder/web_socket_conf_builder.nim @@ -0,0 +1,68 @@ +import chronicles, std/[net, options], results +import ../network_conf + +logScope: + topics = "waku conf builder websocket" + +############################## +## WebSocket Config Builder ## +############################## +type WebSocketConfBuilder* = object + enabled*: Option[bool] + webSocketPort*: Option[Port] + secureEnabled*: Option[bool] + keyPath*: Option[string] + certPath*: Option[string] + +proc init*(T: type WebSocketConfBuilder): WebSocketConfBuilder = + WebSocketConfBuilder() + +proc withEnabled*(b: var WebSocketConfBuilder, enabled: bool) = + b.enabled = some(enabled) + +proc withSecureEnabled*(b: var WebSocketConfBuilder, secureEnabled: bool) = + b.secureEnabled = some(secureEnabled) + +proc withWebSocketPort*(b: var WebSocketConfBuilder, webSocketPort: Port) = + b.webSocketPort = some(webSocketPort) + +proc withWebSocketPort*(b: var WebSocketConfBuilder, webSocketPort: uint16) = + b.webSocketPort = some(Port(webSocketPort)) + +proc withKeyPath*(b: var WebSocketConfBuilder, keyPath: string) = + b.keyPath = some(keyPath) + +proc withCertPath*(b: var WebSocketConfBuilder, certPath: string) = + b.certPath = some(certPath) + +proc build*(b: WebSocketConfBuilder): Result[Option[WebSocketConf], string] = + if not b.enabled.get(false): + return ok(none(WebSocketConf)) + + if b.webSocketPort.isNone(): + return err("websocket.port is not specified") + + if not b.secureEnabled.get(false): + return ok( + some( + WebSocketConf( + port: b.websocketPort.get(), secureConf: none(WebSocketSecureConf) + ) + ) + ) + + if b.keyPath.get("") == "": + return err("WebSocketSecure enabled but key path is not specified") + if b.certPath.get("") == "": + return err("WebSocketSecure enabled but cert path is not specified") + + return ok( + some( + WebSocketConf( + port: b.webSocketPort.get(), + secureConf: some( + WebSocketSecureConf(keyPath: b.keyPath.get(), certPath: b.certPath.get()) + ), + ) + ) + ) diff --git a/waku/factory/external_config.nim b/waku/factory/external_config.nim index 41fc25582..76b52b20b 100644 --- a/waku/factory/external_config.nim +++ b/waku/factory/external_config.nim @@ -1,6 +1,7 @@ import std/[strutils, strformat], results, + chronicles, chronos, regex, confutils, @@ -14,17 +15,26 @@ import nimcrypto/utils, secp256k1, json + import + ./waku_conf, + ./conf_builder/conf_builder, + ./networks_config, ../common/confutils/envvar/defs as confEnvvarDefs, ../common/confutils/envvar/std/net as confEnvvarNet, ../common/logging, ../waku_enr, ../node/peer_manager, - ../waku_core/topics/pubsub_topic + ../waku_core/topics/pubsub_topic, + ../../tools/rln_keystore_generator/rln_keystore_generator, + ../../tools/rln_db_inspector/rln_db_inspector include ../waku_core/message/default_values -export confTomlDefs, confTomlNet, confEnvvarDefs, confEnvvarNet +export confTomlDefs, confTomlNet, confEnvvarDefs, confEnvvarNet, ProtectedShard + +logScope: + topics = "waku external config" # Git version in git describe format (defined at compile time) const git_version* {.strdefine.} = "n/a" @@ -33,10 +43,6 @@ type ConfResult*[T] = Result[T, string] type EthRpcUrl* = distinct string -type ProtectedShard* = object - shard*: uint16 - key*: secp256k1.SkPublicKey - type StartUpCommand* = enum noCommand # default, runs waku generateRlnKeystore # generates a new RLN keystore @@ -148,7 +154,7 @@ type WakuNodeConf* = object ## General node config preset* {. desc: - "Network preset to use. 'twn' is The RLN-protected Waku Network (cluster 1).", + "Network preset to use. 'twn' is The RLN-protected Waku Network (cluster 1). Overrides other values.", defaultValue: "", name: "preset" .}: string @@ -196,7 +202,7 @@ type WakuNodeConf* = object .}: seq[string] extMultiAddrsOnly* {. - desc: "Only announce external multiaddresses", + desc: "Only announce external multiaddresses setup with --ext-multiaddr", defaultValue: false, name: "ext-multiaddr-only" .}: bool @@ -300,31 +306,12 @@ hence would have reachability issues.""", name: "rln-relay-dynamic" .}: bool - rlnRelayIdKey* {. - desc: "Rln relay identity secret key as a Hex string", - defaultValue: "", - name: "rln-relay-id-key" - .}: string - - rlnRelayIdCommitmentKey* {. - desc: "Rln relay identity commitment key as a Hex string", - defaultValue: "", - name: "rln-relay-id-commitment-key" - .}: string - rlnRelayTreePath* {. desc: "Path to the RLN merkle tree sled db (https://github.com/spacejam/sled)", defaultValue: "", name: "rln-relay-tree-path" .}: string - rlnRelayBandwidthThreshold* {. - desc: - "Message rate in bytes/sec after which verification of proofs should happen.", - defaultValue: 0, # to maintain backwards compatibility - name: "rln-relay-bandwidth-threshold" - .}: int - staticnodes* {. desc: "Peer multiaddr to directly connect with. Argument may be repeated.", name: "staticnode" @@ -372,7 +359,7 @@ hence would have reachability issues.""", .}: bool legacyStore* {. - desc: "Enable/disable waku store legacy mode", + desc: "Enable/disable support of Waku Store v2 as a service", defaultValue: true, name: "legacy-store" .}: bool @@ -432,28 +419,20 @@ hence would have reachability issues.""", desc: "Interval between store sync attempts. In seconds.", defaultValue: 300, # 5 minutes name: "store-sync-interval" - .}: int64 + .}: uint32 storeSyncRange* {. desc: "Amount of time to sync. In seconds.", defaultValue: 3600, # 1 hours name: "store-sync-range" - .}: int64 + .}: uint32 storeSyncRelayJitter* {. hidden, desc: "Time offset to account for message propagation jitter. In seconds.", defaultValue: 20, name: "store-sync-relay-jitter" - .}: int64 - - storeSyncMaxPayloadSize* {. - hidden, - desc: - "Max size in bytes of the inner negentropy payload. Cannot be less than 5K, 0 is unlimited.", - defaultValue: 0, - name: "store-sync-max-payload-size" - .}: int64 + .}: uint32 ## Filter config filter* {. @@ -471,7 +450,7 @@ hence would have reachability issues.""", "Timeout for filter subscription without ping or refresh it, in seconds. Only for v2 filter protocol.", defaultValue: 300, # 5 minutes name: "filter-subscription-timeout" - .}: int64 + .}: uint16 filterMaxPeersToServe* {. desc: "Maximum number of peers to serve at a time. Only for v2 filter protocol.", @@ -594,9 +573,9 @@ with the drawback of consuming some more bandwidth.""", ## Discovery v5 config discv5Discovery* {. desc: "Enable discovering nodes via Node Discovery v5.", - defaultValue: false, + defaultValue: none(bool), name: "discv5-discovery" - .}: bool + .}: Option[bool] discv5UdpPort* {. desc: "Listening UDP port for Node Discovery v5.", @@ -774,8 +753,7 @@ proc completeCmdArg*(T: type IpAddress, val: string): seq[string] = return @[] proc defaultListenAddress*(): IpAddress = - # TODO: How should we select between IPv4 and IPv6 - # Maybe there should be a config option for this. + # TODO: Should probably listen on both ipv4 and ipv6 by default. (static parseIpAddress("0.0.0.0")) proc defaultColocationLimit*(): int = @@ -884,3 +862,188 @@ proc defaultWakuNodeConf*(): ConfResult[WakuNodeConf] = return ok(conf) except CatchableError: return err("exception in defaultWakuNodeConf: " & getCurrentExceptionMsg()) + +proc toKeystoreGeneratorConf*(n: WakuNodeConf): RlnKeystoreGeneratorConf = + RlnKeystoreGeneratorConf( + execute: n.execute, + chainId: n.rlnRelayChainId, + ethClientAddress: n.rlnRelayEthClientAddress.string, + ethContractAddress: n.rlnRelayEthContractAddress, + userMessageLimit: n.rlnRelayUserMessageLimit, + ethPrivateKey: n.rlnRelayEthPrivateKey, + credPath: n.rlnRelayCredPath, + credPassword: n.rlnRelayCredPassword, + ) + +proc toInspectRlnDbConf*(n: WakuNodeConf): InspectRlnDbConf = + return InspectRlnDbConf(treePath: n.treePath) + +proc toClusterConf( + preset: string, clusterId: Option[uint16] +): ConfResult[Option[ClusterConf]] = + var lcPreset = toLowerAscii(preset) + if clusterId.isSome() and clusterId.get() == 1: + warn( + "TWN - The Waku Network configuration will not be applied when `--cluster-id=1` is passed in future releases. Use `--preset=twn` instead." + ) + lcPreset = "twn" + + case lcPreset + of "": + ok(none(ClusterConf)) + of "twn": + ok(some(ClusterConf.TheWakuNetworkConf())) + else: + err("Invalid --preset value passed: " & lcPreset) + +proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] = + var b = WakuConfBuilder.init() + + b.withLogLevel(n.logLevel) + b.withLogFormat(n.logFormat) + + b.rlnRelayConf.withEnabled(n.rlnRelay) + if n.rlnRelayCredPath != "": + b.rlnRelayConf.withCredPath(n.rlnRelayCredPath) + if n.rlnRelayCredPassword != "": + b.rlnRelayConf.withCredPassword(n.rlnRelayCredPassword) + if n.rlnRelayEthClientAddress.string != "": + b.rlnRelayConf.withEthClientAddress(n.rlnRelayEthClientAddress.string) + if n.rlnRelayEthContractAddress != "": + b.rlnRelayConf.withEthContractAddress(n.rlnRelayEthContractAddress) + + if n.rlnRelayChainId != 0: + b.rlnRelayConf.withChainId(n.rlnRelayChainId) + b.rlnRelayConf.withUserMessageLimit(n.rlnRelayUserMessageLimit) + b.rlnRelayConf.withEpochSizeSec(n.rlnEpochSizeSec) + + if n.rlnRelayCredIndex.isSome(): + b.rlnRelayConf.withCredIndex(n.rlnRelayCredIndex.get()) + b.rlnRelayConf.withDynamic(n.rlnRelayDynamic) + + b.rlnRelayConf.withTreePath(n.rlnRelayTreePath) + + if n.maxMessageSize != "": + b.withMaxMessageSize(n.maxMessageSize) + + b.withProtectedShards(n.protectedShards) + b.withClusterId(n.clusterId) + + let clusterConf = toClusterConf(n.preset, some(n.clusterId)).valueOr: + return err("Error determining cluster from preset: " & $error) + + if clusterConf.isSome(): + b.withClusterConf(clusterConf.get()) + + b.withAgentString(n.agentString) + + if n.nodeKey.isSome(): + b.withNodeKey(n.nodeKey.get()) + + b.withP2pListenAddress(n.listenAddress) + b.withP2pTcpPort(n.tcpPort) + b.withPortsShift(n.portsShift) + b.withNatStrategy(n.nat) + b.withExtMultiAddrs(n.extMultiAddrs) + b.withExtMultiAddrsOnly(n.extMultiAddrsOnly) + b.withMaxConnections(n.maxConnections) + + if n.maxRelayPeers.isSome(): + b.withMaxRelayPeers(n.maxRelayPeers.get()) + + if n.relayServiceRatio != "": + b.withRelayServiceRatio(n.relayServiceRatio) + b.withColocationLimit(n.colocationLimit) + + if n.peerStoreCapacity.isSome: + b.withPeerStoreCapacity(n.peerStoreCapacity.get()) + + b.withPeerPersistence(n.peerPersistence) + b.withDnsAddrs(n.dnsAddrs) + b.withDnsAddrsNameServers(n.dnsAddrsNameServers) + b.withDns4DomainName(n.dns4DomainName) + b.withCircuitRelayClient(n.isRelayClient) + b.withRelay(n.relay) + b.withRelayPeerExchange(n.relayPeerExchange) + b.withRelayShardedPeerManagement(n.relayShardedPeerManagement) + b.withStaticNodes(n.staticNodes) + b.withKeepAlive(n.keepAlive) + + if n.numShardsInNetwork != 0: + b.withNumShardsInNetwork(n.numShardsInNetwork) + + b.withShards(n.shards) + b.withContentTopics(n.contentTopics) + + b.storeServiceConf.withEnabled(n.store) + b.storeServiceConf.withSupportV2(n.legacyStore) + b.storeServiceConf.withRetentionPolicy(n.storeMessageRetentionPolicy) + b.storeServiceConf.withDbUrl(n.storeMessageDbUrl) + b.storeServiceConf.withDbVacuum(n.storeMessageDbVacuum) + b.storeServiceConf.withDbMigration(n.storeMessageDbMigration) + b.storeServiceConf.withMaxNumDbConnections(n.storeMaxNumDbConnections) + b.storeServiceConf.withResume(n.storeResume) + + # TODO: can we just use `Option` on the CLI? + if n.storenode != "": + b.withRemoteStoreNode(n.storenode) + if n.filternode != "": + b.withRemoteFilterNode(n.filternode) + if n.lightpushnode != "": + b.withRemoteLightPushNode(n.lightpushnode) + if n.peerExchangeNode != "": + b.withRemotePeerExchangeNode(n.peerExchangeNode) + + b.storeServiceConf.storeSyncConf.withEnabled(n.storeSync) + b.storeServiceConf.storeSyncConf.withIntervalSec(n.storeSyncInterval) + b.storeServiceConf.storeSyncConf.withRangeSec(n.storeSyncRange) + b.storeServiceConf.storeSyncConf.withRelayJitterSec(n.storeSyncRelayJitter) + + b.filterServiceConf.withEnabled(n.filter) + b.filterServiceConf.withSubscriptionTimeout(n.filterSubscriptionTimeout) + b.filterServiceConf.withMaxPeersToServe(n.filterMaxPeersToServe) + b.filterServiceConf.withMaxCriteria(n.filterMaxCriteria) + + b.withLightPush(n.lightpush) + b.withP2pReliability(n.reliabilityEnabled) + + b.restServerConf.withEnabled(n.rest) + b.restServerConf.withListenAddress(n.restAddress) + b.restServerConf.withPort(n.restPort) + b.restServerConf.withRelayCacheCapacity(n.restRelayCacheCapacity) + b.restServerConf.withAdmin(n.restAdmin) + b.restServerConf.withAllowOrigin(n.restAllowOrigin) + + b.metricsServerConf.withEnabled(n.metricsServer) + b.metricsServerConf.withHttpAddress(n.metricsServerAddress) + b.metricsServerConf.withHttpPort(n.metricsServerPort) + b.metricsServerConf.withLogging(n.metricsLogging) + + b.dnsDiscoveryConf.withEnabled(n.dnsDiscovery) + b.dnsDiscoveryConf.withEnrTreeUrl(n.dnsDiscoveryUrl) + b.dnsDiscoveryConf.withNameServers(n.dnsDiscoveryNameServers) + + if n.discv5Discovery.isSome(): + b.discv5Conf.withEnabled(n.discv5Discovery.get()) + + b.discv5Conf.withUdpPort(n.discv5UdpPort) + b.discv5Conf.withBootstrapNodes(n.discv5BootstrapNodes) + b.discv5Conf.withEnrAutoUpdate(n.discv5EnrAutoUpdate) + b.discv5Conf.withTableIpLimit(n.discv5TableIpLimit) + b.discv5Conf.withBucketIpLimit(n.discv5BucketIpLimit) + b.discv5Conf.withBitsPerHop(n.discv5BitsPerHop) + b.discv5Conf.withDiscv5Only(n.discv5Only) + + b.withPeerExchange(n.peerExchange) + + b.withRendezvous(n.rendezvous) + + b.webSocketConf.withEnabled(n.websocketSupport) + b.webSocketConf.withWebSocketPort(n.websocketPort) + b.webSocketConf.withSecureEnabled(n.websocketSecureSupport) + b.webSocketConf.withKeyPath(n.websocketSecureKeyPath) + b.webSocketConf.withCertPath(n.websocketSecureCertPath) + + b.withRateLimits(n.rateLimits) + + return b.build() diff --git a/waku/factory/internal_config.nim b/waku/factory/internal_config.nim index b5275d00b..72af28340 100644 --- a/waku/factory/internal_config.nim +++ b/waku/factory/internal_config.nim @@ -4,21 +4,20 @@ import libp2p/crypto/crypto, libp2p/multiaddress, libp2p/nameresolving/dnsresolver, - std/[options, sequtils, strutils, net], + std/[options, sequtils, net], results import - ./external_config, ../common/utils/nat, - ../node/config, - ../waku_enr/capabilities, + ../node/net_config, ../waku_enr, ../waku_core, - ./networks_config + ./waku_conf, + ./network_conf proc enrConfiguration*( - conf: WakuNodeConf, netConfig: NetConfig, key: crypto.PrivateKey + conf: WakuConf, netConfig: NetConfig ): Result[enr.Record, string] = - var enrBuilder = EnrBuilder.init(key) + var enrBuilder = EnrBuilder.init(conf.nodeKey) enrBuilder.withIpAddressAndPorts( netConfig.enrIp, netConfig.enrPort, netConfig.discv5UdpPort @@ -44,19 +43,12 @@ proc enrConfiguration*( return ok(record) -proc validateExtMultiAddrs*(vals: seq[string]): Result[seq[MultiAddress], string] = - var multiaddrs: seq[MultiAddress] - for val in vals: - let multiaddr = ?MultiAddress.init(val) - multiaddrs.add(multiaddr) - return ok(multiaddrs) - proc dnsResolve*( - domain: string, conf: WakuNodeConf + domain: string, dnsAddrsNameServers: seq[IpAddress] ): Future[Result[string, string]] {.async.} = # Use conf's DNS servers var nameServers: seq[TransportAddress] - for ip in conf.dnsAddrsNameServers: + for ip in dnsAddrsNameServers: nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53 let dnsResolver = DnsResolver.new(nameServers) @@ -69,14 +61,24 @@ proc dnsResolve*( else: return err("Could not resolve IP from DNS: empty response") -proc networkConfiguration*(conf: WakuNodeConf, clientId: string): NetConfigResult = +# TODO: Reduce number of parameters, can be done once the same is done on Netconfig.init +proc networkConfiguration*( + clusterId: uint16, + conf: NetworkConfig, + discv5Conf: Option[Discv5Conf], + webSocketConf: Option[WebSocketConf], + wakuFlags: CapabilitiesBitfield, + dnsAddrsNameServers: seq[IpAddress], + portsShift: uint16, + clientId: string, +): NetConfigResult = ## `udpPort` is only supplied to satisfy underlying APIs but is not ## actually a supported transport for libp2p traffic. let natRes = setupNat( - conf.nat, + conf.natStrategy.string, clientId, - Port(uint16(conf.tcpPort) + conf.portsShift), - Port(uint16(conf.tcpPort) + conf.portsShift), + Port(uint16(conf.p2pTcpPort) + portsShift), + Port(uint16(conf.p2pTcpPort) + portsShift), ) if natRes.isErr(): return err("failed to setup NAT: " & $natRes.error) @@ -84,15 +86,9 @@ proc networkConfiguration*(conf: WakuNodeConf, clientId: string): NetConfigResul var (extIp, extTcpPort, _) = natRes.get() let - dns4DomainName = - if conf.dns4DomainName != "": - some(conf.dns4DomainName) - else: - none(string) - discv5UdpPort = - if conf.discv5Discovery: - some(Port(uint16(conf.discv5UdpPort) + conf.portsShift)) + if discv5Conf.isSome(): + some(Port(uint16(discv5Conf.get().udpPort) + portsShift)) else: none(Port) @@ -101,34 +97,15 @@ proc networkConfiguration*(conf: WakuNodeConf, clientId: string): NetConfigResul ## extPort as well. The following heuristic assumes that, in absence of ## manual config, the external port is the same as the bind port. extPort = - if (extIp.isSome() or dns4DomainName.isSome()) and extTcpPort.isNone(): - some(Port(uint16(conf.tcpPort) + conf.portsShift)) + if (extIp.isSome() or conf.dns4DomainName.isSome()) and extTcpPort.isNone(): + some(Port(uint16(conf.p2pTcpPort) + portsShift)) else: extTcpPort - extMultiAddrs = - if (conf.extMultiAddrs.len > 0): - let extMultiAddrsValidationRes = validateExtMultiAddrs(conf.extMultiAddrs) - if extMultiAddrsValidationRes.isErr(): - return - err("invalid external multiaddress: " & $extMultiAddrsValidationRes.error) - else: - extMultiAddrsValidationRes.get() - else: - @[] - - wakuFlags = CapabilitiesBitfield.init( - lightpush = conf.lightpush, - filter = conf.filter, - store = conf.store, - relay = conf.relay, - sync = conf.storeSync, - ) - # Resolve and use DNS domain IP - if dns4DomainName.isSome() and extIp.isNone(): + if conf.dns4DomainName.isSome() and extIp.isNone(): try: - let dnsRes = waitFor dnsResolve(conf.dns4DomainName, conf) + let dnsRes = waitFor dnsResolve(conf.dns4DomainName.get(), dnsAddrsNameServers) if dnsRes.isErr(): return err($dnsRes.error) # Pass error down the stack @@ -138,92 +115,38 @@ proc networkConfiguration*(conf: WakuNodeConf, clientId: string): NetConfigResul return err("Could not update extIp to resolved DNS IP: " & getCurrentExceptionMsg()) + let (wsEnabled, wsBindPort, wssEnabled) = + if webSocketConf.isSome: + let wsConf = webSocketConf.get() + (true, some(Port(wsConf.port.uint16 + portsShift)), wsConf.secureConf.isSome) + else: + (false, none(Port), false) + # Wrap in none because NetConfig does not have a default constructor # TODO: We could change bindIp in NetConfig to be something less restrictive # than IpAddress, which doesn't allow default construction let netConfigRes = NetConfig.init( - clusterId = conf.clusterId, - bindIp = conf.listenAddress, - bindPort = Port(uint16(conf.tcpPort) + conf.portsShift), + clusterId = clusterId, + bindIp = conf.p2pListenAddress, + bindPort = Port(uint16(conf.p2pTcpPort) + portsShift), extIp = extIp, extPort = extPort, - extMultiAddrs = extMultiAddrs, + extMultiAddrs = conf.extMultiAddrs, extMultiAddrsOnly = conf.extMultiAddrsOnly, - wsBindPort = Port(uint16(conf.websocketPort) + conf.portsShift), - wsEnabled = conf.websocketSupport, - wssEnabled = conf.websocketSecureSupport, - dns4DomainName = dns4DomainName, + wsBindPort = wsBindPort, + wsEnabled = wsEnabled, + wssEnabled = wssEnabled, + dns4DomainName = conf.dns4DomainName, discv5UdpPort = discv5UdpPort, wakuFlags = some(wakuFlags), - dnsNameServers = conf.dnsAddrsNameServers, ) return netConfigRes -proc applyPresetConfiguration*(srcConf: WakuNodeConf): Result[WakuNodeConf, string] = - var resConf = srcConf - - if resConf.clusterId == 1: - warn( - "TWN - The Waku Network configuration will not be applied when `--cluster-id=1` is passed in future releases. Use `--preset=twn` instead." - ) - resConf.preset = "twn" - - case toLowerAscii(resConf.preset) - of "twn": - let twnClusterConf = ClusterConf.TheWakuNetworkConf() - - # Override configuration - resConf.maxMessageSize = twnClusterConf.maxMessageSize - resConf.clusterId = twnClusterConf.clusterId - resConf.rlnRelay = twnClusterConf.rlnRelay - resConf.rlnRelayEthContractAddress = twnClusterConf.rlnRelayEthContractAddress - resConf.rlnRelayChainId = twnClusterConf.rlnRelayChainId - resConf.rlnRelayDynamic = twnClusterConf.rlnRelayDynamic - resConf.rlnRelayBandwidthThreshold = twnClusterConf.rlnRelayBandwidthThreshold - resConf.discv5Discovery = twnClusterConf.discv5Discovery - resConf.discv5BootstrapNodes = - resConf.discv5BootstrapNodes & twnClusterConf.discv5BootstrapNodes - resConf.rlnEpochSizeSec = twnClusterConf.rlnEpochSizeSec - resConf.rlnRelayUserMessageLimit = twnClusterConf.rlnRelayUserMessageLimit - resConf.numShardsInNetwork = twnClusterConf.numShardsInNetwork - - if resConf.relay: - resConf.rlnRelay = twnClusterConf.rlnRelay - else: - discard - - return ok(resConf) - # TODO: numShardsInNetwork should be mandatory with autosharding, and unneeded otherwise -proc getNumShardsInNetwork*(conf: WakuNodeConf): uint32 = +proc getNumShardsInNetwork*(conf: WakuConf): uint32 = if conf.numShardsInNetwork != 0: return conf.numShardsInNetwork # If conf.numShardsInNetwork is not set, use 1024 - the maximum possible as per the static sharding spec # https://github.com/waku-org/specs/blob/master/standards/core/relay-sharding.md#static-sharding return uint32(MaxShardIndex + 1) - -proc validateShards*(conf: WakuNodeConf): Result[void, string] = - let numShardsInNetwork = getNumShardsInNetwork(conf) - - for shard in conf.shards: - if shard >= numShardsInNetwork: - let msg = - "validateShards invalid shard: " & $shard & " when numShardsInNetwork: " & - $numShardsInNetwork # fmt doesn't work - error "validateShards failed", error = msg - return err(msg) - - return ok() - -proc getNodeKey*( - conf: WakuNodeConf, rng: ref HmacDrbgContext = crypto.newRng() -): Result[PrivateKey, string] = - if conf.nodekey.isSome(): - return ok(conf.nodekey.get()) - - warn "missing node key, generating new set" - let key = crypto.PrivateKey.random(Secp256k1, rng[]).valueOr: - error "Failed to generate key", error = error - return err("Failed to generate key: " & $error) - return ok(key) diff --git a/waku/factory/network_conf.nim b/waku/factory/network_conf.nim new file mode 100644 index 000000000..c5179e53a --- /dev/null +++ b/waku/factory/network_conf.nim @@ -0,0 +1,34 @@ +import std/[net, options, strutils] +import libp2p/multiaddress + +type WebSocketSecureConf* {.requiresInit.} = object + keyPath*: string + certPath*: string + +type WebSocketConf* = object + port*: Port + secureConf*: Option[WebSocketSecureConf] + +type NetworkConf* = object + natStrategy*: string # TODO: make enum + p2pTcpPort*: Port + dns4DomainName*: Option[string] + p2pListenAddress*: IpAddress + extMultiAddrs*: seq[MultiAddress] + extMultiAddrsOnly*: bool + webSocketConf*: Option[WebSocketConf] + +proc validateNoEmptyStrings(networkConf: NetworkConf): Result[void, string] = + if networkConf.dns4DomainName.isSome() and + isEmptyOrWhiteSpace(networkConf.dns4DomainName.get().string): + return err("dns4DomainName is an empty string, set it to none(string) instead") + + if networkConf.webSocketConf.isSome() and + networkConf.webSocketConf.get().secureConf.isSome(): + let secureConf = networkConf.webSocketConf.get().secureConf.get() + if isEmptyOrWhiteSpace(secureConf.keyPath): + return err("websocket.secureConf.keyPath is an empty string") + if isEmptyOrWhiteSpace(secureConf.certPath): + return err("websocket.secureConf.certPath is an empty string") + + return ok() diff --git a/waku/factory/networks_config.nim b/waku/factory/networks_config.nim index 01e4cf19e..aceada3fe 100644 --- a/waku/factory/networks_config.nim +++ b/waku/factory/networks_config.nim @@ -1,15 +1,17 @@ {.push raises: [].} +# TODO: Rename this type to match file name + type ClusterConf* = object - maxMessageSize*: string + maxMessageSize*: string # TODO: static convert to a uint64 clusterId*: uint16 rlnRelay*: bool rlnRelayEthContractAddress*: string rlnRelayChainId*: uint rlnRelayDynamic*: bool - rlnRelayBandwidthThreshold*: int rlnEpochSizeSec*: uint64 rlnRelayUserMessageLimit*: uint64 + # TODO: should be uint16 like the `shards` parameter numShardsInNetwork*: uint32 discv5Discovery*: bool discv5BootstrapNodes*: seq[string] @@ -25,12 +27,10 @@ proc TheWakuNetworkConf*(T: type ClusterConf): ClusterConf = rlnRelayEthContractAddress: "0xfe7a9eabcE779a090FD702346Fd0bFAc02ce6Ac8", rlnRelayDynamic: true, rlnRelayChainId: 11155111, - rlnRelayBandwidthThreshold: 0, rlnEpochSizeSec: 600, rlnRelayUserMessageLimit: 100, numShardsInNetwork: 8, discv5Discovery: true, - # TODO: Why is this part of the conf? eg an edge node would not have this discv5BootstrapNodes: @[ "enr:-QESuED0qW1BCmF-oH_ARGPr97Nv767bl_43uoy70vrbah3EaCAdK3Q0iRQ6wkSTTpdrg_dU_NC2ydO8leSlRpBX4pxiAYJpZIJ2NIJpcIRA4VDAim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOTd-h5owwj-cx7xrmbvQKU8CV3Fomfdvcv1MBc-67T5oN0Y3CCdl-DdWRwgiMohXdha3UyDw", diff --git a/waku/factory/node_factory.nim b/waku/factory/node_factory.nim index 78093c6cd..1b8f8e59b 100644 --- a/waku/factory/node_factory.nim +++ b/waku/factory/node_factory.nim @@ -10,7 +10,7 @@ import import ./internal_config, - ./external_config, + ./waku_conf, ./builder, ./validator_signed, ../waku_enr/sharding, @@ -35,7 +35,6 @@ import ../node/peer_manager/peer_store/waku_peer_storage, ../node/peer_manager/peer_store/migrations as peer_store_sqlite_migrations, ../waku_lightpush_legacy/common, - ../common/utils/parse_size_units, ../common/rate_limit/setting, ../common/databases/dburl @@ -56,10 +55,9 @@ proc setupPeerStorage(): Result[Option[WakuPeerStorage], string] = ## Init waku node instance proc initNode( - conf: WakuNodeConf, + conf: WakuConf, netConfig: NetConfig, rng: ref HmacDrbgContext, - nodeKey: crypto.PrivateKey, record: enr.Record, peerStore: Option[WakuPeerStorage], relay: Relay, @@ -86,17 +84,24 @@ proc initNode( else: peerStore.get() + let (secureKey, secureCert) = + if conf.webSocketConf.isSome() and conf.webSocketConf.get().secureConf.isSome(): + let wssConf = conf.webSocketConf.get().secureConf.get() + (some(wssConf.keyPath), some(wssConf.certPath)) + else: + (none(string), none(string)) + # Build waku node instance var builder = WakuNodeBuilder.init() builder.withRng(rng) - builder.withNodeKey(nodekey) + builder.withNodeKey(conf.nodeKey) builder.withRecord(record) builder.withNetworkConfiguration(netConfig) builder.withPeerStorage(pStorage, capacity = conf.peerStoreCapacity) builder.withSwitchConfiguration( maxConnections = some(conf.maxConnections.int), - secureKey = some(conf.websocketSecureKeyPath), - secureCert = some(conf.websocketSecureCertPath), + secureKey = secureKey, + secureCert = secureCert, nameResolver = dnsResolver, sendSignedPeerRecord = conf.relayPeerExchange, # We send our own signed peer record when peer exchange enabled @@ -148,13 +153,13 @@ proc getAutoshards*( return ok(autoshards) proc setupProtocols( - node: WakuNode, conf: WakuNodeConf, nodeKey: crypto.PrivateKey + node: WakuNode, conf: WakuConf ): Future[Result[void, string]] {.async.} = ## Setup configured protocols on an existing Waku v2 node. ## Optionally include persistent message storage. ## No protocols are started yet. - if conf.discv5Only: + if conf.discv5Conf.isSome() and conf.discv5Conf.get().discv5Only: notice "Running node only with Discv5, not mounting additional protocols" return ok() @@ -167,11 +172,12 @@ proc setupProtocols( error "Unrecoverable error occurred", error = msg quit(QuitFailure) - if conf.store: - if conf.legacyStore: + if conf.storeServiceConf.isSome(): + let storeServiceConf = conf.storeServiceConf.get() + if storeServiceConf.supportV2: let archiveDriverRes = waitFor legacy_driver.ArchiveDriver.new( - conf.storeMessageDbUrl, conf.storeMessageDbVacuum, conf.storeMessageDbMigration, - conf.storeMaxNumDbConnections, onFatalErrorAction, + storeServiceConf.dbUrl, storeServiceConf.dbVacuum, storeServiceConf.dbMigration, + storeServiceConf.maxNumDbConnections, onFatalErrorAction, ) if archiveDriverRes.isErr(): return err("failed to setup legacy archive driver: " & archiveDriverRes.error) @@ -191,26 +197,26 @@ proc setupProtocols( ## So for now, we need to make sure that when legacy store is enabled and we use sqlite ## that we migrate our db according to legacy store's schema to have the extra field - let engineRes = dburl.getDbEngine(conf.storeMessageDbUrl) + let engineRes = dburl.getDbEngine(storeServiceConf.dbUrl) if engineRes.isErr(): return err("error getting db engine in setupProtocols: " & engineRes.error) let engine = engineRes.get() let migrate = - if engine == "sqlite" and conf.legacyStore: + if engine == "sqlite" and storeServiceConf.supportV2: false else: - conf.storeMessageDbMigration + storeServiceConf.dbMigration let archiveDriverRes = waitFor driver.ArchiveDriver.new( - conf.storeMessageDbUrl, conf.storeMessageDbVacuum, migrate, - conf.storeMaxNumDbConnections, onFatalErrorAction, + storeServiceConf.dbUrl, storeServiceConf.dbVacuum, migrate, + storeServiceConf.maxNumDbConnections, onFatalErrorAction, ) if archiveDriverRes.isErr(): return err("failed to setup archive driver: " & archiveDriverRes.error) - let retPolicyRes = policy.RetentionPolicy.new(conf.storeMessageRetentionPolicy) + let retPolicyRes = policy.RetentionPolicy.new(storeServiceConf.retentionPolicy) if retPolicyRes.isErr(): return err("failed to create retention policy: " & retPolicyRes.error) @@ -218,7 +224,7 @@ proc setupProtocols( if mountArcRes.isErr(): return err("failed to mount waku archive protocol: " & mountArcRes.error) - if conf.legacyStore: + if storeServiceConf.supportV2: # Store legacy setup try: await mountLegacyStore(node, node.rateLimitSettings.getSetting(STOREV2)) @@ -232,17 +238,28 @@ proc setupProtocols( except CatchableError: return err("failed to mount waku store protocol: " & getCurrentExceptionMsg()) + if storeServiceConf.storeSyncConf.isSome(): + let confStoreSync = storeServiceConf.storeSyncConf.get() + + ( + await node.mountStoreSync( + confStoreSync.rangeSec, confStoreSync.intervalSec, + confStoreSync.relayJitterSec, + ) + ).isOkOr: + return err("failed to mount waku store sync protocol: " & $error) + mountStoreClient(node) - if conf.storenode != "": - let storeNode = parsePeerInfo(conf.storenode) + if conf.remoteStoreNode.isSome(): + let storeNode = parsePeerInfo(conf.remoteStoreNode.get()) if storeNode.isOk(): node.peerManager.addServicePeer(storeNode.value, store_common.WakuStoreCodec) else: return err("failed to set node waku store peer: " & storeNode.error) mountLegacyStoreClient(node) - if conf.storenode != "": - let storeNode = parsePeerInfo(conf.storenode) + if conf.remoteStoreNode.isSome(): + let storeNode = parsePeerInfo(conf.remoteStoreNode.get()) if storeNode.isOk(): node.peerManager.addServicePeer( storeNode.value, legacy_common.WakuLegacyStoreCodec @@ -250,7 +267,7 @@ proc setupProtocols( else: return err("failed to set node waku legacy store peer: " & storeNode.error) - if conf.store and conf.storeResume: + if conf.storeServiceConf.isSome and conf.storeServiceConf.get().resume: node.setupStoreResume() # If conf.numShardsInNetwork is not set, use the number of shards configured as numShardsInNetwork @@ -296,14 +313,14 @@ proc setupProtocols( let shards = confShards & autoShards if conf.relay: - let parsedMaxMsgSize = parseMsgSize(conf.maxMessageSize).valueOr: - return err("failed to parse 'max-num-bytes-msg-size' param: " & $error) - - debug "Setting max message size", num_bytes = parsedMaxMsgSize + debug "Setting max message size", num_bytes = conf.maxMessageSizeBytes ( await mountRelay( - node, shards, peerExchangeHandler = peerExchangeHandler, int(parsedMaxMsgSize) + node, + shards, + peerExchangeHandler = peerExchangeHandler, + int(conf.maxMessageSizeBytes), ) ).isOkOr: return err("failed to mount waku relay protocol: " & $error) @@ -330,18 +347,18 @@ proc setupProtocols( except CatchableError: return err("failed to mount libp2p ping protocol: " & getCurrentExceptionMsg()) - if conf.rlnRelay: + if conf.rlnRelayConf.isSome(): + let rlnRelayConf = conf.rlnRelayConf.get() let rlnConf = WakuRlnConfig( - rlnRelayDynamic: conf.rlnRelayDynamic, - rlnRelayCredIndex: conf.rlnRelayCredIndex, - rlnRelayEthContractAddress: conf.rlnRelayEthContractAddress, - rlnRelayChainId: conf.rlnRelayChainId, - rlnRelayEthClientAddress: string(conf.rlnRelayethClientAddress), - rlnRelayCredPath: conf.rlnRelayCredPath, - rlnRelayCredPassword: conf.rlnRelayCredPassword, - rlnRelayTreePath: conf.rlnRelayTreePath, - rlnRelayUserMessageLimit: conf.rlnRelayUserMessageLimit, - rlnEpochSizeSec: conf.rlnEpochSizeSec, + dynamic: rlnRelayConf.dynamic, + credIndex: rlnRelayConf.credIndex, + ethContractAddress: rlnRelayConf.ethContractAddress, + chainId: rlnRelayConf.chainId, + ethClientAddress: rlnRelayConf.ethClientAddress, + creds: rlnRelayConf.creds, + treePath: rlnRelayConf.treePath, + userMessageLimit: rlnRelayConf.userMessageLimit, + epochSizeSec: rlnRelayConf.epochSizeSec, onFatalErrorAction: onFatalErrorAction, ) @@ -351,7 +368,7 @@ proc setupProtocols( return err("failed to mount waku RLN relay protocol: " & getCurrentExceptionMsg()) # NOTE Must be mounted after relay - if conf.lightpush: + if conf.lightPush: try: await mountLightPush(node, node.rateLimitSettings.getSetting(LIGHTPUSH)) await mountLegacyLightPush(node, node.rateLimitSettings.getSetting(LIGHTPUSH)) @@ -360,8 +377,8 @@ proc setupProtocols( mountLightPushClient(node) mountLegacyLightPushClient(node) - if conf.lightpushnode != "": - let lightPushNode = parsePeerInfo(conf.lightpushnode) + if conf.remoteLightPushNode.isSome(): + let lightPushNode = parsePeerInfo(conf.remoteLightPushNode.get()) if lightPushNode.isOk(): node.peerManager.addServicePeer(lightPushNode.value, WakuLightPushCodec) node.peerManager.addServicePeer(lightPushNode.value, WakuLegacyLightPushCodec) @@ -369,21 +386,22 @@ proc setupProtocols( return err("failed to set node waku lightpush peer: " & lightPushNode.error) # Filter setup. NOTE Must be mounted after relay - if conf.filter: + if conf.filterServiceConf.isSome(): + let confFilter = conf.filterServiceConf.get() try: await mountFilter( node, - subscriptionTimeout = chronos.seconds(conf.filterSubscriptionTimeout), - maxFilterPeers = conf.filterMaxPeersToServe, - maxFilterCriteriaPerPeer = conf.filterMaxCriteria, + subscriptionTimeout = chronos.seconds(confFilter.subscriptionTimeout), + maxFilterPeers = confFilter.maxPeersToServe, + maxFilterCriteriaPerPeer = confFilter.maxCriteria, rateLimitSetting = node.rateLimitSettings.getSetting(FILTER), ) except CatchableError: return err("failed to mount waku filter protocol: " & getCurrentExceptionMsg()) await node.mountFilterClient() - if conf.filternode != "": - let filterNode = parsePeerInfo(conf.filternode) + if conf.remoteFilterNode.isSome(): + let filterNode = parsePeerInfo(conf.remoteFilterNode.get()) if filterNode.isOk(): try: node.peerManager.addServicePeer(filterNode.value, WakuFilterSubscribeCodec) @@ -394,14 +412,6 @@ proc setupProtocols( else: return err("failed to set node waku filter peer: " & filterNode.error) - if conf.storeSync: - ( - await node.mountStoreSync( - conf.storeSyncRange, conf.storeSyncInterval, conf.storeSyncRelayJitter - ) - ).isOkOr: - return err("failed to mount waku store sync protocol: " & $error) - # waku peer exchange setup if conf.peerExchange: try: @@ -412,8 +422,8 @@ proc setupProtocols( return err("failed to mount waku peer-exchange protocol: " & getCurrentExceptionMsg()) - if conf.peerExchangeNode != "": - let peerExchangeNode = parsePeerInfo(conf.peerExchangeNode) + if conf.remotePeerExchangeNode.isSome(): + let peerExchangeNode = parsePeerInfo(conf.remotePeerExchangeNode.get()) if peerExchangeNode.isOk(): node.peerManager.addServicePeer(peerExchangeNode.value, WakuPeerExchangeCodec) else: @@ -425,7 +435,7 @@ proc setupProtocols( ## Start node proc startNode*( - node: WakuNode, conf: WakuNodeConf, dynamicBootstrapNodes: seq[RemotePeerInfo] = @[] + node: WakuNode, conf: WakuConf, dynamicBootstrapNodes: seq[RemotePeerInfo] = @[] ): Future[Result[void, string]] {.async: (raises: []).} = ## Start a configured node and all mounted protocols. ## Connect to static nodes and start @@ -438,9 +448,9 @@ proc startNode*( return err("failed to start waku node: " & getCurrentExceptionMsg()) # Connect to configured static nodes - if conf.staticnodes.len > 0: + if conf.staticNodes.len > 0: try: - await connectToNodes(node, conf.staticnodes, "static") + await connectToNodes(node, conf.staticNodes, "static") except CatchableError: return err("failed to connect to static nodes: " & getCurrentExceptionMsg()) @@ -453,16 +463,18 @@ proc startNode*( err("failed to connect to dynamic bootstrap nodes: " & getCurrentExceptionMsg()) # retrieve px peers and add the to the peer store - if conf.peerExchangeNode != "": + if conf.remotePeerExchangeNode.isSome(): var desiredOutDegree = DefaultPXNumPeersReq if not node.wakuRelay.isNil() and node.wakuRelay.parameters.d.uint64() > 0: desiredOutDegree = node.wakuRelay.parameters.d.uint64() (await node.fetchPeerExchangePeers(desiredOutDegree)).isOkOr: error "error while fetching peers from peer exchange", error = error + # TODO: behavior described by comment is undesired. PX as client should be used in tandem with discv5. + # # Use px to periodically get peers if discv5 is disabled, as discv5 nodes have their own # periodic loop to find peers and px returned peers actually come from discv5 - if conf.peerExchange and not conf.discv5Discovery: + if conf.peerExchange and not conf.discv5Conf.isSome(): node.startPeerExchangeLoop() # Start keepalive, if enabled @@ -476,27 +488,21 @@ proc startNode*( return ok() proc setupNode*( - conf: WakuNodeConf, rng: ref HmacDrbgContext = crypto.newRng(), relay: Relay + wakuConf: WakuConf, rng: ref HmacDrbgContext = crypto.newRng(), relay: Relay ): Result[WakuNode, string] = - # Use provided key only if corresponding rng is also provided - let key = - if conf.nodeKey.isSome(): - conf.nodeKey.get() - else: - warn "missing key, generating new" - crypto.PrivateKey.random(Secp256k1, rng[]).valueOr: - error "Failed to generate key", error = error - return err("Failed to generate key: " & $error) - - let netConfig = networkConfiguration(conf, clientId).valueOr: + let netConfig = networkConfiguration( + wakuConf.clusterId, wakuConf.networkConf, wakuConf.discv5Conf, + wakuConf.webSocketConf, wakuConf.wakuFlags, wakuConf.dnsAddrsNameServers, + wakuConf.portsShift, clientId, + ).valueOr: error "failed to create internal config", error = error return err("failed to create internal config: " & error) - let record = enrConfiguration(conf, netConfig, key).valueOr: + let record = enrConfiguration(wakuConf, netConfig).valueOr: error "failed to create record", error = error return err("failed to create record: " & error) - if isClusterMismatched(record, conf.clusterId): + if isClusterMismatched(record, wakuConf.clusterId): error "cluster id mismatch configured shards" return err("cluster id mismatch configured shards") @@ -504,21 +510,21 @@ proc setupNode*( ## Peer persistence var peerStore: Option[WakuPeerStorage] - if conf.peerPersistence: + if wakuConf.peerPersistence: peerStore = setupPeerStorage().valueOr: error "Setting up storage failed", error = "failed to setup peer store " & error return err("Setting up storage failed: " & error) debug "Initializing node" - let node = initNode(conf, netConfig, rng, key, record, peerStore, relay).valueOr: + let node = initNode(wakuConf, netConfig, rng, record, peerStore, relay).valueOr: error "Initializing node failed", error = error return err("Initializing node failed: " & error) debug "Mounting protocols" try: - (waitFor node.setupProtocols(conf, key)).isOkOr: + (waitFor node.setupProtocols(wakuConf)).isOkOr: error "Mounting protocols failed", error = error return err("Mounting protocols failed: " & error) except CatchableError: diff --git a/waku/factory/validator_signed.nim b/waku/factory/validator_signed.nim index 59ee384b1..0da380ab5 100644 --- a/waku/factory/validator_signed.nim +++ b/waku/factory/validator_signed.nim @@ -13,7 +13,7 @@ import const MessageWindowInSec = 5 * 60 # +- 5 minutes -import ./external_config, ../waku_relay/protocol, ../waku_core +import ./waku_conf, ../waku_relay/protocol, ../waku_core declarePublicCounter waku_msg_validator_signed_outcome, "number of messages for each validation outcome", ["result"] diff --git a/waku/factory/waku.nim b/waku/factory/waku.nim index c40db3b54..55d311963 100644 --- a/waku/factory/waku.nim +++ b/waku/factory/waku.nim @@ -42,7 +42,8 @@ import ../factory/internal_config, ../factory/external_config, ../factory/app_callbacks, - ../waku_enr/multiaddr + ../waku_enr/multiaddr, + ./waku_conf logScope: topics = "wakunode waku" @@ -52,12 +53,13 @@ const git_version* {.strdefine.} = "n/a" type Waku* = ref object version: string - conf: WakuNodeConf - rng: ref HmacDrbgContext + conf*: WakuConf + rng*: ref HmacDrbgContext + key: crypto.PrivateKey wakuDiscv5*: WakuDiscoveryV5 - dynamicBootstrapNodes: seq[RemotePeerInfo] + dynamicBootstrapNodes*: seq[RemotePeerInfo] dnsRetryLoopHandle: Future[void] networkConnLoopHandle: Future[void] discoveryMngr: DiscoveryManager @@ -70,37 +72,11 @@ type Waku* = ref object metricsServer*: MetricsHttpServerRef appCallbacks*: AppCallbacks -proc logConfig(conf: WakuNodeConf) = - info "Configuration: Enabled protocols", - relay = conf.relay, - rlnRelay = conf.rlnRelay, - store = conf.store, - filter = conf.filter, - lightpush = conf.lightpush, - peerExchange = conf.peerExchange - - info "Configuration. Network", cluster = conf.clusterId - - for shard in conf.shards: - info "Configuration. Shards", shard = shard - - for i in conf.discv5BootstrapNodes: - info "Configuration. Bootstrap nodes", node = i - - if conf.rlnRelay and conf.rlnRelayDynamic: - info "Configuration. Validation", - mechanism = "onchain rln", - contract = conf.rlnRelayEthContractAddress, - maxMessageSize = conf.maxMessageSize, - rlnEpochSizeSec = conf.rlnEpochSizeSec, - rlnRelayUserMessageLimit = conf.rlnRelayUserMessageLimit, - rlnRelayEthClientAddress = string(conf.rlnRelayEthClientAddress) - func version*(waku: Waku): string = waku.version proc setupSwitchServices( - waku: Waku, conf: WakuNodeConf, circuitRelay: Relay, rng: ref HmacDrbgContext + waku: Waku, conf: WakuConf, circuitRelay: Relay, rng: ref HmacDrbgContext ) = proc onReservation(addresses: seq[MultiAddress]) {.gcsafe, raises: [].} = debug "circuit relay handler new reserve event", @@ -116,7 +92,7 @@ proc setupSwitchServices( error "failed to update announced multiaddress", error = $error let autonatService = getAutonatService(rng) - if conf.isRelayClient: + if conf.circuitRelayClient: ## The node is considered to be behind a NAT or firewall and then it ## should struggle to be reachable and establish connections to other nodes const MaxNumRelayServers = 2 @@ -131,12 +107,13 @@ proc setupSwitchServices( ## Initialisation proc newCircuitRelay(isRelayClient: bool): Relay = + # TODO: Does it mean it's a circuit-relay server when it's false? if isRelayClient: return RelayClient.new() return Relay.new() proc setupAppCallbacks( - node: WakuNode, conf: WakuNodeConf, appCallbacks: AppCallbacks + node: WakuNode, conf: WakuConf, appCallbacks: AppCallbacks ): Result[void, string] = if appCallbacks.isNil(): info "No external callbacks to be set" @@ -171,52 +148,36 @@ proc setupAppCallbacks( return ok() proc new*( - T: type Waku, confCopy: var WakuNodeConf, appCallbacks: AppCallbacks = nil + T: type Waku, wakuConf: WakuConf, appCallbacks: AppCallbacks = nil ): Result[Waku, string] = let rng = crypto.newRng() - logging.setupLog(confCopy.logLevel, confCopy.logFormat) + logging.setupLog(wakuConf.logLevel, wakuConf.logFormat) - confCopy = block: - let res = applyPresetConfiguration(confCopy) - if res.isErr(): - error "Failed to complete the config", error = res.error - return err("Failed to complete the config:" & $res.error) - res.get() + ?wakuConf.validate() - logConfig(confCopy) + wakuConf.logConf() info "Running nwaku node", version = git_version - let validateShardsRes = validateShards(confCopy) - if validateShardsRes.isErr(): - error "Failed validating shards", error = $validateShardsRes.error - return err("Failed validating shards: " & $validateShardsRes.error) + var relay = newCircuitRelay(wakuConf.circuitRelayClient) - let keyRes = getNodeKey(confCopy, rng) - if keyRes.isErr(): - error "Failed to generate key", error = $keyRes.error - return err("Failed to generate key: " & $keyRes.error) - confCopy.nodeKey = some(keyRes.get()) - - var relay = newCircuitRelay(confCopy.isRelayClient) - - let nodeRes = setupNode(confCopy, rng, relay) + let nodeRes = setupNode(wakuConf, rng, relay) if nodeRes.isErr(): error "Failed setting up node", error = nodeRes.error return err("Failed setting up node: " & nodeRes.error) let node = nodeRes.get() - node.setupAppCallbacks(confCopy, appCallbacks).isOkOr: + node.setupAppCallbacks(wakuConf, appCallbacks).isOkOr: error "Failed setting up app callbacks", error = error return err("Failed setting up app callbacks: " & $error) ## Delivery Monitor var deliveryMonitor: DeliveryMonitor - if confCopy.reliabilityEnabled: - if confCopy.storenode == "": - return err("A storenode should be set when reliability mode is on") + if wakuConf.p2pReliability: + if wakuConf.remoteStoreNode.isNone(): + return err("A remoteStoreNode should be set when reliability mode is on") let deliveryMonitorRes = DeliveryMonitor.new( node.wakuStoreClient, node.wakuRelay, node.wakuLightpushClient, @@ -228,16 +189,15 @@ proc new*( var waku = Waku( version: git_version, - # TODO: WakuNodeConf is re-used for too many context, `conf` here should be a dedicated subtype - conf: confCopy, + conf: wakuConf, rng: rng, - key: confCopy.nodekey.get(), + key: wakuConf.nodeKey, node: node, deliveryMonitor: deliveryMonitor, appCallbacks: appCallbacks, ) - waku.setupSwitchServices(confCopy, relay, rng) + waku.setupSwitchServices(wakuConf, relay, rng) ok(waku) @@ -265,13 +225,16 @@ proc getRunningNetConfig(waku: ptr Waku): Result[NetConfig, string] = return err("Could not retrieve ports " & error) if tcpPort.isSome(): - conf.tcpPort = tcpPort.get() + conf.networkConf.p2pTcpPort = tcpPort.get() - if websocketPort.isSome(): - conf.websocketPort = websocketPort.get() + if websocketPort.isSome() and conf.webSocketConf.isSome(): + conf.webSocketConf.get().port = websocketPort.get() # Rebuild NetConfig with bound port values - let netConf = networkConfiguration(conf, clientId).valueOr: + let netConf = networkConfiguration( + conf.clusterId, conf.networkConf, conf.discv5Conf, conf.webSocketConf, + conf.wakuFlags, conf.dnsAddrsNameServers, conf.portsShift, clientId, + ).valueOr: return err("Could not update NetConfig: " & error) return ok(netConf) @@ -279,8 +242,7 @@ proc getRunningNetConfig(waku: ptr Waku): Result[NetConfig, string] = proc updateEnr(waku: ptr Waku): Result[void, string] = let netConf: NetConfig = getRunningNetConfig(waku).valueOr: return err("error calling updateNetConfig: " & $error) - - let record = enrConfiguration(waku[].conf, netConf, waku[].key).valueOr: + let record = enrConfiguration(waku[].conf, netConf).valueOr: return err("ENR setup failed: " & error) if isClusterMismatched(record, waku[].conf.clusterId): @@ -319,7 +281,9 @@ proc updateAddressInENR(waku: ptr Waku): Result[void, string] = return ok() proc updateWaku(waku: ptr Waku): Result[void, string] = - if waku[].conf.tcpPort == Port(0) or waku[].conf.websocketPort == Port(0): + let conf = waku[].conf + if conf.networkConf.p2pTcpPort == Port(0) or + (conf.websocketConf.isSome() and conf.websocketConf.get.port == Port(0)): updateEnr(waku).isOkOr: return err("error calling updateEnr: " & $error) @@ -332,15 +296,17 @@ proc updateWaku(waku: ptr Waku): Result[void, string] = proc startDnsDiscoveryRetryLoop(waku: ptr Waku): Future[void] {.async.} = while true: await sleepAsync(30.seconds) - let dynamicBootstrapNodesRes = await waku_dnsdisc.retrieveDynamicBootstrapNodes( - waku.conf.dnsDiscoveryUrl, waku.conf.dnsDiscoveryNameServers - ) - if dynamicBootstrapNodesRes.isErr(): - error "Retrieving dynamic bootstrap nodes failed", - error = dynamicBootstrapNodesRes.error - continue + if waku.conf.dnsDiscoveryConf.isSome(): + let dnsDiscoveryConf = waku.conf.dnsDiscoveryConf.get() + let dynamicBootstrapNodesRes = await waku_dnsdisc.retrieveDynamicBootstrapNodes( + dnsDiscoveryConf.enrTreeUrl, dnsDiscoveryConf.nameServers + ) + if dynamicBootstrapNodesRes.isErr(): + error "Retrieving dynamic bootstrap nodes failed", + error = dynamicBootstrapNodesRes.error + continue - waku[].dynamicBootstrapNodes = dynamicBootstrapNodesRes.get() + waku[].dynamicBootstrapNodes = dynamicBootstrapNodesRes.get() if not waku[].wakuDiscv5.isNil(): let dynamicBootstrapEnrs = waku[].dynamicBootstrapNodes @@ -375,20 +341,23 @@ proc startNetworkConnectivityLoop(waku: Waku): Future[void] {.async.} = proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} = debug "Retrieve dynamic bootstrap nodes" + let conf = waku[].conf - let dynamicBootstrapNodesRes = await waku_dnsdisc.retrieveDynamicBootstrapNodes( - waku.conf.dnsDiscoveryUrl, waku.conf.dnsDiscoveryNameServers - ) + if conf.dnsDiscoveryConf.isSome(): + let dnsDiscoveryConf = waku.conf.dnsDiscoveryConf.get() + let dynamicBootstrapNodesRes = await waku_dnsdisc.retrieveDynamicBootstrapNodes( + dnsDiscoveryConf.enrTreeUrl, dnsDiscoveryConf.nameServers + ) - if dynamicBootstrapNodesRes.isErr(): - error "Retrieving dynamic bootstrap nodes failed", - error = dynamicBootstrapNodesRes.error - # Start Dns Discovery retry loop - waku[].dnsRetryLoopHandle = waku.startDnsDiscoveryRetryLoop() - else: - waku[].dynamicBootstrapNodes = dynamicBootstrapNodesRes.get() + if dynamicBootstrapNodesRes.isErr(): + error "Retrieving dynamic bootstrap nodes failed", + error = dynamicBootstrapNodesRes.error + # Start Dns Discovery retry loop + waku[].dnsRetryLoopHandle = waku.startDnsDiscoveryRetryLoop() + else: + waku[].dynamicBootstrapNodes = dynamicBootstrapNodesRes.get() - if not waku[].conf.discv5Only: + if conf.discv5Conf.isNone or not conf.discv5Conf.get().discv5Only: (await startNode(waku.node, waku.conf, waku.dynamicBootstrapNodes)).isOkOr: return err("error while calling startNode: " & $error) @@ -397,10 +366,17 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} = return err("Error in updateApp: " & $error) ## Discv5 - if waku[].conf.discv5Discovery or waku[].conf.discv5Only: + if conf.discv5Conf.isSome: waku[].wakuDiscV5 = waku_discv5.setupDiscoveryV5( - waku.node.enr, waku.node.peerManager, waku.node.topicSubscriptionQueue, waku.conf, - waku.dynamicBootstrapNodes, waku.rng, waku.key, + waku.node.enr, + waku.node.peerManager, + waku.node.topicSubscriptionQueue, + conf.discv5Conf.get(), + waku.dynamicBootstrapNodes, + waku.rng, + conf.nodeKey, + conf.networkConf.p2pListenAddress, + conf.portsShift, ) (await waku.wakuDiscV5.start()).isOkOr: diff --git a/waku/factory/waku_conf.nim b/waku/factory/waku_conf.nim new file mode 100644 index 000000000..766a17aa8 --- /dev/null +++ b/waku/factory/waku_conf.nim @@ -0,0 +1,249 @@ +import + std/[net, options, strutils], + chronicles, + libp2p/crypto/crypto, + libp2p/multiaddress, + secp256k1, + results + +import + ../waku_rln_relay/rln_relay, + ../waku_api/rest/builder, + ../discovery/waku_discv5, + ../node/waku_metrics, + ../common/logging, + ../waku_enr/capabilities, + ./network_conf + +export RlnRelayConf, RlnRelayCreds, RestServerConf, Discv5Conf, MetricsServerConf + +logScope: + topics = "waku conf" + +# TODO: should be defined in validator_signed.nim and imported here +type ProtectedShard* {.requiresInit.} = object + shard*: uint16 + key*: secp256k1.SkPublicKey + +type DnsDiscoveryConf* {.requiresInit.} = object + enrTreeUrl*: string + # TODO: should probably only have one set of name servers (see dnsaddrs) + nameServers*: seq[IpAddress] + +type StoreSyncConf* {.requiresInit.} = object + rangeSec*: uint32 + intervalSec*: uint32 + relayJitterSec*: uint32 + +type StoreServiceConf* {.requiresInit.} = object + dbMigration*: bool + dbURl*: string + dbVacuum*: bool + supportV2*: bool + maxNumDbConnections*: int + retentionPolicy*: string + resume*: bool + storeSyncConf*: Option[StoreSyncConf] + +type FilterServiceConf* {.requiresInit.} = object + maxPeersToServe*: uint32 + subscriptionTimeout*: uint16 + maxCriteria*: uint32 + +type NetworkConfig* = object # TODO: make enum + natStrategy*: string + p2pTcpPort*: Port + dns4DomainName*: Option[string] + p2pListenAddress*: IpAddress + extMultiAddrs*: seq[MultiAddress] + extMultiAddrsOnly*: bool + +## `WakuConf` is a valid configuration for a Waku node +## All information needed by a waku node should be contained +## In this object. A convenient `validate` method enables doing +## sanity checks beyond type enforcement. +## If `Option` is `some` it means the related protocol is enabled. +type WakuConf* {.requiresInit.} = ref object + # ref because `getRunningNetConfig` modifies it + nodeKey*: crypto.PrivateKey + + clusterId*: uint16 + shards*: seq[uint16] + protectedShards*: seq[ProtectedShard] + + # TODO: move to an autoShardingConf + numShardsInNetwork*: uint32 + contentTopics*: seq[string] + + relay*: bool + lightPush*: bool + peerExchange*: bool + + # TODO: remove relay peer exchange + relayPeerExchange*: bool + rendezvous*: bool + circuitRelayClient*: bool + keepAlive*: bool + + discv5Conf*: Option[Discv5Conf] + dnsDiscoveryConf*: Option[DnsDiscoveryConf] + filterServiceConf*: Option[FilterServiceConf] + storeServiceConf*: Option[StoreServiceConf] + rlnRelayConf*: Option[RlnRelayConf] + restServerConf*: Option[RestServerConf] + metricsServerConf*: Option[MetricsServerConf] + webSocketConf*: Option[WebSocketConf] + + portsShift*: uint16 + dnsAddrs*: bool + dnsAddrsNameServers*: seq[IpAddress] + networkConf*: NetworkConfig + wakuFlags*: CapabilitiesBitfield + + # TODO: could probably make it a `PeerRemoteInfo` + staticNodes*: seq[string] + remoteStoreNode*: Option[string] + remoteLightPushNode*: Option[string] + remoteFilterNode*: Option[string] + remotePeerExchangeNode*: Option[string] + + maxMessageSizeBytes*: uint64 + + logLevel*: logging.LogLevel + logFormat*: logging.LogFormat + + peerPersistence*: bool + # TODO: should clearly be a uint + peerStoreCapacity*: Option[int] + # TODO: should clearly be a uint + maxConnections*: int + + agentString*: string + + colocationLimit*: int + + # TODO: use proper type + rateLimits*: seq[string] + + # TODO: those could be in a relay conf object + maxRelayPeers*: Option[int] + relayShardedPeerManagement*: bool + # TODO: use proper type + relayServiceRatio*: string + + p2pReliability*: bool + +proc logConf*(conf: WakuConf) = + info "Configuration: Enabled protocols", + relay = conf.relay, + rlnRelay = conf.rlnRelayConf.isSome(), + store = conf.storeServiceConf.isSome(), + filter = conf.filterServiceConf.isSome(), + lightPush = conf.lightPush, + peerExchange = conf.peerExchange + + info "Configuration. Network", cluster = conf.clusterId + + for shard in conf.shards: + info "Configuration. Shards", shard = shard + + if conf.discv5Conf.isSome(): + for i in conf.discv5Conf.get().bootstrapNodes: + info "Configuration. Bootstrap nodes", node = i.string + + if conf.rlnRelayConf.isSome(): + var rlnRelayConf = conf.rlnRelayConf.get() + if rlnRelayConf.dynamic: + info "Configuration. Validation", + mechanism = "onchain rln", + contract = rlnRelayConf.ethContractAddress.string, + maxMessageSize = conf.maxMessageSizeBytes, + rlnEpochSizeSec = rlnRelayConf.epochSizeSec, + rlnRelayUserMessageLimit = rlnRelayConf.userMessageLimit, + rlnRelayEthClientAddress = string(rlnRelayConf.ethClientAddress) + +proc validateNodeKey(wakuConf: WakuConf): Result[void, string] = + wakuConf.nodeKey.getPublicKey().isOkOr: + return err("Node key is invalid") + return ok() + +proc validateShards(wakuConf: WakuConf): Result[void, string] = + let numShardsInNetwork = wakuConf.numShardsInNetwork + + # TODO: fix up this behaviour + if numShardsInNetwork == 0: + return ok() + + for shard in wakuConf.shards: + if shard >= numShardsInNetwork: + let msg = + "validateShards invalid shard: " & $shard & " when numShardsInNetwork: " & + $numShardsInNetwork # fmt doesn't work + error "validateShards failed", error = msg + return err(msg) + + return ok() + +proc validateNoEmptyStrings(wakuConf: WakuConf): Result[void, string] = + if wakuConf.networkConf.dns4DomainName.isSome() and + isEmptyOrWhiteSpace(wakuConf.networkConf.dns4DomainName.get().string): + return err("dns4DomainName is an empty string, set it to none(string) instead") + + if isEmptyOrWhiteSpace(wakuConf.relayServiceRatio): + return err("relayServiceRatio is an empty string") + + for sn in wakuConf.staticNodes: + if isEmptyOrWhiteSpace(sn): + return err("staticNodes contain an empty string") + + if wakuConf.remoteStoreNode.isSome() and + isEmptyOrWhiteSpace(wakuConf.remoteStoreNode.get()): + return err("remoteStoreNode is an empty string, set it to none(string) instead") + + if wakuConf.remoteLightPushNode.isSome() and + isEmptyOrWhiteSpace(wakuConf.remoteLightPushNode.get()): + return err("remoteLightPushNode is an empty string, set it to none(string) instead") + + if wakuConf.remotePeerExchangeNode.isSome() and + isEmptyOrWhiteSpace(wakuConf.remotePeerExchangeNode.get()): + return + err("remotePeerExchangeNode is an empty string, set it to none(string) instead") + + if wakuConf.remoteFilterNode.isSome() and + isEmptyOrWhiteSpace(wakuConf.remoteFilterNode.get()): + return + err("remotePeerExchangeNode is an empty string, set it to none(string) instead") + + if wakuConf.dnsDiscoveryConf.isSome() and + isEmptyOrWhiteSpace(wakuConf.dnsDiscoveryConf.get().enrTreeUrl): + return err("dnsDiscoveryConf.enrTreeUrl is an empty string") + + # TODO: rln relay config should validate itself + if wakuConf.rlnRelayConf.isSome(): + let rlnRelayConf = wakuConf.rlnRelayConf.get() + + if isEmptyOrWhiteSpace(rlnRelayConf.treePath): + return err("rlnRelayConf.treepath is an empty string") + if isEmptyOrWhiteSpace(rlnRelayConf.ethClientAddress): + return err("rlnRelayConf.ethClientAddress is an empty string") + if isEmptyOrWhiteSpace(rlnRelayConf.ethContractAddress): + return err("rlnRelayConf.ethContractAddress is an empty string") + + if rlnRelayConf.creds.isSome(): + let creds = rlnRelayConf.creds.get() + if isEmptyOrWhiteSpace(creds.path): + return err ( + "rlnRelayConf.creds.path is an empty string, set rlnRelayConf.creds it to none instead" + ) + if isEmptyOrWhiteSpace(creds.password): + return err ( + "rlnRelayConf.creds.password is an empty string, set rlnRelayConf.creds to none instead" + ) + + return ok() + +proc validate*(wakuConf: WakuConf): Result[void, string] = + ?wakuConf.validateNodeKey() + ?wakuConf.validateShards() + ?wakuConf.validateNoEmptyStrings() + return ok() diff --git a/waku/node/config.nim b/waku/node/net_config.nim similarity index 90% rename from waku/node/config.nim rename to waku/node/net_config.nim index 51aadb48d..a45d95f92 100644 --- a/waku/node/config.nim +++ b/waku/node/net_config.nim @@ -61,6 +61,8 @@ proc isWsAddress*(ma: MultiAddress): bool = proc containsWsAddress(extMultiAddrs: seq[MultiAddress]): bool = return extMultiAddrs.filterIt(it.isWsAddress()).len > 0 +const DefaultWsBindPort = static(Port(8000)) +# TODO: migrate to builder pattern with nested configs proc init*( T: type NetConfig, bindIp: IpAddress, @@ -69,7 +71,7 @@ proc init*( extPort = none(Port), extMultiAddrs = newSeq[MultiAddress](), extMultiAddrsOnly: bool = false, - wsBindPort: Port = Port(8000), + wsBindPort: Option[Port] = some(DefaultWsBindPort), wsEnabled: bool = false, wssEnabled: bool = false, dns4DomainName = none(string), @@ -86,7 +88,9 @@ proc init*( var wsHostAddress = none(MultiAddress) if wsEnabled or wssEnabled: try: - wsHostAddress = some(ip4TcpEndPoint(bindIp, wsbindPort) & wsFlag(wssEnabled)) + wsHostAddress = some( + ip4TcpEndPoint(bindIp, wsbindPort.get(DefaultWsBindPort)) & wsFlag(wssEnabled) + ) except CatchableError: return err(getCurrentExceptionMsg()) @@ -113,8 +117,10 @@ proc init*( if wsHostAddress.isSome(): try: - wsExtAddress = - some(dns4TcpEndPoint(dns4DomainName.get(), wsBindPort) & wsFlag(wssEnabled)) + wsExtAddress = some( + dns4TcpEndPoint(dns4DomainName.get(), wsBindPort.get(DefaultWsBindPort)) & + wsFlag(wssEnabled) + ) except CatchableError: return err(getCurrentExceptionMsg()) else: @@ -124,8 +130,10 @@ proc init*( if wsHostAddress.isSome(): try: - wsExtAddress = - some(ip4TcpEndPoint(extIp.get(), wsBindPort) & wsFlag(wssEnabled)) + wsExtAddress = some( + ip4TcpEndPoint(extIp.get(), wsBindPort.get(DefaultWsBindPort)) & + wsFlag(wssEnabled) + ) except CatchableError: return err(getCurrentExceptionMsg()) diff --git a/waku/node/waku_metrics.nim b/waku/node/waku_metrics.nim index c349f0849..ba61f6ef8 100644 --- a/waku/node/waku_metrics.nim +++ b/waku/node/waku_metrics.nim @@ -5,14 +5,18 @@ import ../waku_rln_relay/protocol_metrics as rln_metrics, ../utils/collector, ./peer_manager, - ./waku_node, - ../factory/external_config + ./waku_node const LogInterval = 10.minutes logScope: topics = "waku node metrics" +type MetricsServerConf* = object + httpAddress*: IpAddress + httpPort*: Port + logging*: bool + proc startMetricsLog*() = var logMetrics: CallbackFunc @@ -70,17 +74,15 @@ proc startMetricsServer( return ok(server) proc startMetricsServerAndLogging*( - conf: WakuNodeConf + conf: MetricsServerConf, portsShift: uint16 ): Result[MetricsHttpServerRef, string] = var metricsServer: MetricsHttpServerRef - if conf.metricsServer: - metricsServer = startMetricsServer( - conf.metricsServerAddress, Port(conf.metricsServerPort + conf.portsShift) - ).valueOr: - return - err("Starting metrics server failed. Continuing in current state:" & $error) + metricsServer = startMetricsServer( + conf.httpAddress, Port(conf.httpPort.uint16 + portsShift) + ).valueOr: + return err("Starting metrics server failed. Continuing in current state:" & $error) - if conf.metricsLogging: + if conf.logging: startMetricsLog() return ok(metricsServer) diff --git a/waku/node/waku_node.nim b/waku/node/waku_node.nim index bb8b6f9c3..e38b1e795 100644 --- a/waku/node/waku_node.nim +++ b/waku/node/waku_node.nim @@ -46,7 +46,7 @@ import ../waku_enr, ../waku_peer_exchange, ../waku_rln_relay, - ./config, + ./net_config, ./peer_manager, ../common/rate_limit/setting @@ -207,9 +207,9 @@ proc mountSharding*( proc mountStoreSync*( node: WakuNode, - storeSyncRange = 3600, - storeSyncInterval = 300, - storeSyncRelayJitter = 20, + storeSyncRange = 3600.uint32, + storeSyncInterval = 300.uint32, + storeSyncRelayJitter = 20.uint32, ): Future[Result[void, string]] {.async.} = let idsChannel = newAsyncQueue[SyncID](0) let wantsChannel = newAsyncQueue[PeerId](0) @@ -1231,8 +1231,7 @@ proc mountRlnRelay*( raise newException(CatchableError, "failed to mount WakuRlnRelay: " & rlnRelayRes.error) let rlnRelay = rlnRelayRes.get() - - if (rlnConf.rlnRelayUserMessageLimit > rlnRelay.groupManager.rlnRelayMaxMessageLimit): + if (rlnConf.userMessageLimit > rlnRelay.groupManager.rlnRelayMaxMessageLimit): error "rln-relay-user-message-limit can't exceed the MAX_MESSAGE_LIMIT in the rln contract" let validator = generateRlnValidator(rlnRelay, spamHandler) diff --git a/waku/utils/collector.nim b/waku/utils/collector.nim index de6411ae3..3586a2d6a 100644 --- a/waku/utils/collector.nim +++ b/waku/utils/collector.nim @@ -22,9 +22,19 @@ template parseAndAccumulate*(collector: Collector, cumulativeValue: float64): fl cumulativeValue = total freshCount +template parseAndAccumulate*( + collector: typedesc[IgnoredCollector], cumulativeValue: float64 +): float64 = + ## Used when metrics are disabled (undefined `metrics` compilation flag) + 0.0 + template collectorAsF64*(collector: Collector): float64 = ## This template is used to get metrics from 0 ## Serves as a wrapper for parseCollectorIntoF64 which is gcsafe {.gcsafe.}: let total = parseCollectorIntoF64(collector) total + +template collectorAsF64*(collector: typedesc[IgnoredCollector]): float64 = + ## Used when metrics are disabled (undefined `metrics` compilation flag) + 0.0 diff --git a/waku/waku_api/rest/builder.nim b/waku/waku_api/rest/builder.nim index 747835fc8..6e880f5a3 100644 --- a/waku/waku_api/rest/builder.nim +++ b/waku/waku_api/rest/builder.nim @@ -5,7 +5,6 @@ import presto import waku/waku_node, waku/discovery/waku_discv5, - waku/factory/external_config, waku/waku_api/message_cache, waku/waku_api/handlers, waku/waku_api/rest/server, @@ -31,12 +30,18 @@ import var restServerNotInstalledTab {.threadvar.}: TableRef[string, string] restServerNotInstalledTab = newTable[string, string]() -proc startRestServerEsentials*( - nodeHealthMonitor: WakuNodeHealthMonitor, conf: WakuNodeConf -): Result[WakuRestServerRef, string] = - if not conf.rest: - return ok(nil) +export WakuRestServerRef +type RestServerConf* = object + allowOrigin*: seq[string] + listenAddress*: IpAddress + port*: Port + admin*: bool + relayCacheCapacity*: uint32 + +proc startRestServerEssentials*( + nodeHealthMonitor: WakuNodeHealthMonitor, conf: RestServerConf, portsShift: uint16 +): Result[WakuRestServerRef, string] = let requestErrorHandler: RestRequestErrorHandler = proc( error: RestRequestError, request: HttpRequestRef ): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} = @@ -72,13 +77,13 @@ proc startRestServerEsentials*( return defaultResponse() let allowedOrigin = - if len(conf.restAllowOrigin) > 0: - some(conf.restAllowOrigin.join(",")) + if len(conf.allowOrigin) > 0: + some(conf.allowOrigin.join(",")) else: none(string) - let address = conf.restAddress - let port = Port(conf.restPort + conf.portsShift) + let address = conf.listenAddress + let port = Port(conf.port.uint16 + portsShift) let server = ?newRestHttpServer( address, @@ -112,14 +117,16 @@ proc startRestServerProtocolSupport*( restServer: WakuRestServerRef, node: WakuNode, wakuDiscv5: WakuDiscoveryV5, - conf: WakuNodeConf, + conf: RestServerConf, + relayEnabled: bool, + lightPushEnabled: bool, + clusterId: uint16, + shards: seq[uint16], + contentTopics: seq[string], ): Result[void, string] = - if not conf.rest: - return ok() - var router = restServer.router ## Admin REST API - if conf.restAdmin: + if conf.admin: installAdminApiHandlers(router, node) else: restServerNotInstalledTab["admin"] = @@ -129,22 +136,23 @@ proc startRestServerProtocolSupport*( installDebugApiHandlers(router, node) ## Relay REST API - if conf.relay: + if relayEnabled: ## This MessageCache is used, f.e., in js-waku<>nwaku interop tests. ## js-waku tests asks nwaku-docker through REST whether a message is properly received. - let cache = MessageCache.init(int(conf.restRelayCacheCapacity)) + const RestRelayCacheCapacity = 50 + let cache = MessageCache.init(int(RestRelayCacheCapacity)) let handler: WakuRelayHandler = messageCacheHandler(cache) - for shard in conf.shards: - let pubsubTopic = $RelayShard(clusterId: conf.clusterId, shardId: shard) + for shard in shards: + let pubsubTopic = $RelayShard(clusterId: clusterId, shardId: shard) cache.pubsubSubscribe(pubsubTopic) ## TODO: remove this line. use observer-observable pattern ## within waku_node::registerRelayDefaultHandler discard node.wakuRelay.subscribe(pubsubTopic, handler) - for contentTopic in conf.contentTopics: + for contentTopic in contentTopics: cache.contentSubscribe(contentTopic) let shard = node.wakuSharding.getShard(contentTopic).valueOr: @@ -192,7 +200,7 @@ proc startRestServerProtocolSupport*( ## or install it to be used with self-hosted lightpush service ## We either get lightpushnode (lightpush service node) from config or discovered or self served if (node.wakuLegacyLightpushClient != nil) or - (conf.lightpush and node.wakuLegacyLightPush != nil and node.wakuRelay != nil): + (lightPushEnabled and node.wakuLegacyLightPush != nil and node.wakuRelay != nil): let lightDiscoHandler = if not wakuDiscv5.isNil(): some(defaultDiscoveryHandler(wakuDiscv5, Lightpush)) diff --git a/waku/waku_api/rest/server.nim b/waku/waku_api/rest/server.nim index b8ad405c3..f16dfe83f 100644 --- a/waku/waku_api/rest/server.nim +++ b/waku/waku_api/rest/server.nim @@ -23,7 +23,7 @@ type ### Configuration -type RestServerConf* = object +type RestServerConf* {.requiresInit.} = object cacheSize*: Natural ## \ ## The maximum number of recently accessed states that are kept in \ diff --git a/waku/waku_node.nim b/waku/waku_node.nim index f1c647111..74415e9de 100644 --- a/waku/waku_node.nim +++ b/waku/waku_node.nim @@ -1,7 +1,7 @@ import - ./node/config, + ./node/net_config, ./node/waku_switch as switch, ./node/waku_node as node, ./node/health_monitor as health_monitor -export config, switch, node, health_monitor +export net_config, switch, node, health_monitor diff --git a/waku/waku_rln_relay/rln_relay.nim b/waku/waku_rln_relay/rln_relay.nim index b48d6894e..268f1c93d 100644 --- a/waku/waku_rln_relay/rln_relay.nim +++ b/waku/waku_rln_relay/rln_relay.nim @@ -32,18 +32,26 @@ import logScope: topics = "waku rln_relay" -type WakuRlnConfig* = object - rlnRelayDynamic*: bool - rlnRelayCredIndex*: Option[uint] - rlnRelayEthContractAddress*: string - rlnRelayEthClientAddress*: string - rlnRelayChainId*: uint - rlnRelayCredPath*: string - rlnRelayCredPassword*: string - rlnRelayTreePath*: string - rlnEpochSizeSec*: uint64 +type RlnRelayCreds* {.requiresInit.} = object + path*: string + password*: string + +type RlnRelayConf* = object of RootObj + # TODO: severals parameters are only needed when it's dynamic + # change the config to either nest or use enum/type variant so it's obvious + # and then it can be set to `requiresInit` + dynamic*: bool + credIndex*: Option[uint] + ethContractAddress*: string + ethClientAddress*: string + chainId*: uint + creds*: Option[RlnRelayCreds] + treePath*: string + epochSizeSec*: uint64 + userMessageLimit*: uint64 + +type WakuRlnConfig* = object of RlnRelayConf onFatalErrorAction*: OnFatalErrorHandler - rlnRelayUserMessageLimit*: uint64 proc createMembershipList*( rln: ptr RLN, n: int @@ -421,10 +429,10 @@ proc mount( groupManager: GroupManager wakuRlnRelay: WakuRLNRelay # create an RLN instance - let rlnInstance = createRLNInstance(tree_path = conf.rlnRelayTreePath).valueOr: + let rlnInstance = createRLNInstance(tree_path = conf.treePath).valueOr: return err("could not create RLN instance: " & $error) - if not conf.rlnRelayDynamic: + if not conf.dynamic: # static setup let parsedGroupKeys = StaticGroupKeys.toIdentityCredentials().valueOr: return err("could not parse static group keys: " & $error) @@ -432,31 +440,27 @@ proc mount( groupManager = StaticGroupManager( groupSize: StaticGroupSize, groupKeys: parsedGroupKeys, - membershipIndex: conf.rlnRelayCredIndex, + membershipIndex: conf.credIndex, rlnInstance: rlnInstance, onFatalErrorAction: conf.onFatalErrorAction, ) # we don't persist credentials in static mode since they exist in ./constants.nim else: - # dynamic setup - proc useValueOrNone(s: string): Option[string] = - if s == "": - none(string) + let (rlnRelayCredPath, rlnRelayCredPassword) = + if conf.creds.isSome: + (some(conf.creds.get().path), some(conf.creds.get().password)) else: - some(s) + (none(string), none(string)) - let - rlnRelayCredPath = useValueOrNone(conf.rlnRelayCredPath) - rlnRelayCredPassword = useValueOrNone(conf.rlnRelayCredPassword) groupManager = OnchainGroupManager( - ethClientUrl: string(conf.rlnRelayethClientAddress), - ethContractAddress: $conf.rlnRelayEthContractAddress, - chainId: conf.rlnRelayChainId, + ethClientUrl: string(conf.ethClientAddress), + ethContractAddress: $conf.ethContractAddress, + chainId: conf.chainId, rlnInstance: rlnInstance, registrationHandler: registrationHandler, keystorePath: rlnRelayCredPath, keystorePassword: rlnRelayCredPassword, - membershipIndex: conf.rlnRelayCredIndex, + membershipIndex: conf.credIndex, onFatalErrorAction: conf.onFatalErrorAction, ) @@ -469,10 +473,9 @@ proc mount( wakuRlnRelay = WakuRLNRelay( groupManager: groupManager, - nonceManager: - NonceManager.init(conf.rlnRelayUserMessageLimit, conf.rlnEpochSizeSec.float), - rlnEpochSizeSec: conf.rlnEpochSizeSec, - rlnMaxEpochGap: max(uint64(MaxClockGapSeconds / float64(conf.rlnEpochSizeSec)), 1), + nonceManager: NonceManager.init(conf.userMessageLimit, conf.epochSizeSec.float), + rlnEpochSizeSec: conf.epochSizeSec, + rlnMaxEpochGap: max(uint64(MaxClockGapSeconds / float64(conf.epochSizeSec)), 1), onFatalErrorAction: conf.onFatalErrorAction, ) From d86babac3ae7f4605205dc0fe6faa96628156a0a Mon Sep 17 00:00:00 2001 From: Darshan K <35736874+darshankabariya@users.noreply.github.com> Date: Fri, 9 May 2025 05:37:58 +0530 Subject: [PATCH 39/48] feat: deprecate sync / local merkle tree (#3312) --- Makefile | 2 +- .../test_rln_group_manager_onchain.nim | 264 +++---- .../test_wakunode_rln_relay.nim | 1 - vendor/zerokit | 2 +- waku/waku_rln_relay/conversion_utils.nim | 50 +- .../group_manager/group_manager_base.nim | 6 +- .../group_manager/on_chain/group_manager.nim | 648 ++++++++---------- waku/waku_rln_relay/protocol_metrics.nim | 5 + waku/waku_rln_relay/protocol_types.nim | 14 + waku/waku_rln_relay/rln/rln_interface.nim | 15 + waku/waku_rln_relay/rln_relay.nim | 10 +- 11 files changed, 458 insertions(+), 559 deletions(-) diff --git a/Makefile b/Makefile index 5da2d6076..d15668673 100644 --- a/Makefile +++ b/Makefile @@ -165,7 +165,7 @@ nimbus-build-system-nimble-dir: .PHONY: librln LIBRLN_BUILDDIR := $(CURDIR)/vendor/zerokit -LIBRLN_VERSION := v0.5.1 +LIBRLN_VERSION := v0.7.0 ifeq ($(detected_OS),Windows) LIBRLN_FILE := rln.lib diff --git a/tests/waku_rln_relay/test_rln_group_manager_onchain.nim b/tests/waku_rln_relay/test_rln_group_manager_onchain.nim index b6fc44e27..25a3166ce 100644 --- a/tests/waku_rln_relay/test_rln_group_manager_onchain.nim +++ b/tests/waku_rln_relay/test_rln_group_manager_onchain.nim @@ -3,7 +3,7 @@ {.push raises: [].} import - std/[options, sequtils, deques], + std/[options, sequtils, deques, random], results, stew/byteutils, testutils/unittests, @@ -13,7 +13,8 @@ import web3, libp2p/crypto/crypto, eth/keys, - tests/testlib/testasync + tests/testlib/testasync, + tests/testlib/testutils import waku/[ @@ -47,7 +48,6 @@ suite "Onchain group manager": manager.ethRpc.isSome() manager.wakuRlnContract.isSome() manager.initialized - manager.rlnContractDeployedBlockNumber > 0.Quantity manager.rlnRelayMaxMessageLimit == 100 asyncTest "should error on initialization when chainId does not match": @@ -97,18 +97,13 @@ suite "Onchain group manager": echo "---" asyncTest "should error if contract does not exist": - var triggeredError = false - manager.ethContractAddress = "0x0000000000000000000000000000000000000000" - manager.onFatalErrorAction = proc(msg: string) {.gcsafe, closure.} = - echo "---" - discard - "Failed to get the deployed block number. Have you set the correct contract address?: No response from the Web3 provider" - echo msg - echo "---" - triggeredError = true - discard await manager.init() + var triggeredError = false + try: + discard await manager.init() + except CatchableError: + triggeredError = true check triggeredError @@ -119,103 +114,71 @@ suite "Onchain group manager": (await manager.init()).isErrOr: raiseAssert "Expected error when keystore file doesn't exist" - asyncTest "startGroupSync: should start group sync": + asyncTest "trackRootChanges: start tracking roots": (await manager.init()).isOkOr: raiseAssert $error - (await manager.startGroupSync()).isOkOr: - raiseAssert $error + discard manager.trackRootChanges() - asyncTest "startGroupSync: should guard against uninitialized state": - (await manager.startGroupSync()).isErrOr: - raiseAssert "Expected error when not initialized" + asyncTest "trackRootChanges: should guard against uninitialized state": + try: + discard manager.trackRootChanges() + except CatchableError: + check getCurrentExceptionMsg().len == 38 - asyncTest "startGroupSync: should sync to the state of the group": + asyncTest "trackRootChanges: should sync to the state of the group": let credentials = generateCredentials(manager.rlnInstance) - let rateCommitment = getRateCommitment(credentials, UserMessageLimit(1)).valueOr: - raiseAssert $error (await manager.init()).isOkOr: raiseAssert $error - let merkleRootBefore = manager.rlnInstance.getMerkleRoot().valueOr: - raiseAssert $error - - let fut = newFuture[void]("startGroupSync") - - proc generateCallback(fut: Future[void]): OnRegisterCallback = - proc callback(registrations: seq[Membership]): Future[void] {.async.} = - check: - registrations.len == 1 - registrations[0].index == 0 - registrations[0].rateCommitment == rateCommitment - fut.complete() - - return callback + let merkleRootBefore = manager.fetchMerkleRoot() try: - manager.onRegister(generateCallback(fut)) await manager.register(credentials, UserMessageLimit(1)) - (await manager.startGroupSync()).isOkOr: - raiseAssert $error except Exception, CatchableError: assert false, "exception raised: " & getCurrentExceptionMsg() - await fut + discard await withTimeout(trackRootChanges(manager), 15.seconds) - let merkleRootAfter = manager.rlnInstance.getMerkleRoot().valueOr: + let merkleRootAfter = manager.fetchMerkleRoot() + + let metadataSetRes = manager.setMetadata() + assert metadataSetRes.isOk(), metadataSetRes.error + + let metadataOpt = getMetadata(manager.rlnInstance).valueOr: raiseAssert $error - let metadataOpt = manager.rlnInstance.getMetadata().valueOr: - raiseAssert $error + assert metadataOpt.isSome(), "metadata is not set" + let metadata = metadataOpt.get() + check: - metadataOpt.get().validRoots == manager.validRoots.toSeq() + metadata.validRoots == manager.validRoots.toSeq() merkleRootBefore != merkleRootAfter - asyncTest "startGroupSync: should fetch history correctly": + asyncTest "trackRootChanges: should fetch history correctly": + # TODO: We can't use `trackRootChanges()` directly in this test because its current implementation + # relies on a busy loop rather than event-based monitoring. As a result, some root changes + # may be missed, leading to inconsistent test results (i.e., it may randomly return true or false). + # To ensure reliability, we use the `updateRoots()` function to validate the `validRoots` window + # after each registration. const credentialCount = 6 let credentials = generateCredentials(manager.rlnInstance, credentialCount) (await manager.init()).isOkOr: raiseAssert $error - let merkleRootBefore = manager.rlnInstance.getMerkleRoot().valueOr: - raiseAssert $error - - type TestGroupSyncFuts = array[0 .. credentialCount - 1, Future[void]] - var futures: TestGroupSyncFuts - for i in 0 ..< futures.len(): - futures[i] = newFuture[void]() - proc generateCallback( - futs: TestGroupSyncFuts, credentials: seq[IdentityCredential] - ): OnRegisterCallback = - var futureIndex = 0 - proc callback(registrations: seq[Membership]): Future[void] {.async.} = - let rateCommitment = - getRateCommitment(credentials[futureIndex], UserMessageLimit(1)) - if registrations.len == 1 and - registrations[0].rateCommitment == rateCommitment.get() and - registrations[0].index == MembershipIndex(futureIndex): - futs[futureIndex].complete() - futureIndex += 1 - - return callback + let merkleRootBefore = manager.fetchMerkleRoot() try: - manager.onRegister(generateCallback(futures, credentials)) - (await manager.startGroupSync()).isOkOr: - raiseAssert $error - for i in 0 ..< credentials.len(): await manager.register(credentials[i], UserMessageLimit(1)) + discard await manager.updateRoots() except Exception, CatchableError: assert false, "exception raised: " & getCurrentExceptionMsg() - await allFutures(futures) - - let merkleRootAfter = manager.rlnInstance.getMerkleRoot().valueOr: - raiseAssert $error + let merkleRootAfter = manager.fetchMerkleRoot() check: merkleRootBefore != merkleRootAfter - manager.validRootBuffer.len() == credentialCount - AcceptableRootWindowSize + manager.validRoots.len() == credentialCount asyncTest "register: should guard against uninitialized state": let dummyCommitment = default(IDCommitment) @@ -232,14 +195,12 @@ suite "Onchain group manager": assert false, "exception raised: " & getCurrentExceptionMsg() asyncTest "register: should register successfully": + # TODO :- similar to ```trackRootChanges: should fetch history correctly``` (await manager.init()).isOkOr: raiseAssert $error - (await manager.startGroupSync()).isOkOr: - raiseAssert $error let idCommitment = generateCredentials(manager.rlnInstance).idCommitment - let merkleRootBefore = manager.rlnInstance.getMerkleRoot().valueOr: - raiseAssert $error + let merkleRootBefore = manager.fetchMerkleRoot() try: await manager.register( @@ -251,10 +212,10 @@ suite "Onchain group manager": assert false, "exception raised when calling register: " & getCurrentExceptionMsg() - let merkleRootAfter = manager.rlnInstance.getMerkleRoot().valueOr: - raiseAssert $error + let merkleRootAfter = manager.fetchMerkleRoot() + check: - merkleRootAfter.inHex() != merkleRootBefore.inHex() + merkleRootAfter != merkleRootBefore manager.latestIndex == 1 asyncTest "register: callback is called": @@ -264,19 +225,19 @@ suite "Onchain group manager": let fut = newFuture[void]() proc callback(registrations: seq[Membership]): Future[void] {.async.} = - let rateCommitment = getRateCommitment(idCredentials, UserMessageLimit(1)) + let rateCommitment = getRateCommitment(idCredentials, UserMessageLimit(1)).get() check: registrations.len == 1 - registrations[0].rateCommitment == rateCommitment.get() + registrations[0].rateCommitment == rateCommitment registrations[0].index == 0 fut.complete() - manager.onRegister(callback) (await manager.init()).isOkOr: raiseAssert $error + + manager.onRegister(callback) + try: - (await manager.startGroupSync()).isOkOr: - raiseAssert $error await manager.register( RateCommitment( idCommitment: idCommitment, userMessageLimit: UserMessageLimit(1) @@ -298,38 +259,43 @@ suite "Onchain group manager": assert false, "exception raised: " & getCurrentExceptionMsg() asyncTest "validateRoot: should validate good root": - let credentials = generateCredentials(manager.rlnInstance) - (await manager.init()).isOkOr: - raiseAssert $error + let idCredentials = generateCredentials(manager.rlnInstance) + let idCommitment = idCredentials.idCommitment let fut = newFuture[void]() proc callback(registrations: seq[Membership]): Future[void] {.async.} = if registrations.len == 1 and registrations[0].rateCommitment == - getRateCommitment(credentials, UserMessageLimit(1)).get() and + getRateCommitment(idCredentials, UserMessageLimit(1)).get() and registrations[0].index == 0: - manager.idCredentials = some(credentials) + manager.idCredentials = some(idCredentials) fut.complete() manager.onRegister(callback) + (await manager.init()).isOkOr: + raiseAssert $error + try: - (await manager.startGroupSync()).isOkOr: - raiseAssert $error - await manager.register(credentials, UserMessageLimit(1)) + await manager.register(idCredentials, UserMessageLimit(1)) except Exception, CatchableError: assert false, "exception raised: " & getCurrentExceptionMsg() await fut + let rootUpdated = await manager.updateRoots() + + if rootUpdated: + let proofResult = await manager.fetchMerkleProofElements() + if proofResult.isErr(): + error "Failed to fetch Merkle proof", error = proofResult.error + manager.merkleProofCache = proofResult.get() let messageBytes = "Hello".toBytes() - # prepare the epoch let epoch = default(Epoch) debug "epoch in bytes", epochHex = epoch.inHex() - # generate proof let validProofRes = manager.generateProof( data = messageBytes, epoch = epoch, messageId = MessageId(1) ) @@ -338,38 +304,39 @@ suite "Onchain group manager": validProofRes.isOk() let validProof = validProofRes.get() - # validate the root (should be true) let validated = manager.validateRoot(validProof.merkleRoot) check: validated asyncTest "validateRoot: should reject bad root": + let idCredentials = generateCredentials(manager.rlnInstance) + let idCommitment = idCredentials.idCommitment + (await manager.init()).isOkOr: raiseAssert $error - (await manager.startGroupSync()).isOkOr: - raiseAssert $error - let credentials = generateCredentials(manager.rlnInstance) - - ## Assume the registration occured out of band - manager.idCredentials = some(credentials) - manager.membershipIndex = some(MembershipIndex(0)) manager.userMessageLimit = some(UserMessageLimit(1)) + manager.membershipIndex = some(MembershipIndex(0)) + manager.idCredentials = some(idCredentials) + + manager.merkleProofCache = newSeq[byte](640) + for i in 0 ..< 640: + manager.merkleProofCache[i] = byte(rand(255)) let messageBytes = "Hello".toBytes() - # prepare the epoch let epoch = default(Epoch) debug "epoch in bytes", epochHex = epoch.inHex() - # generate proof - let validProof = manager.generateProof( - data = messageBytes, epoch = epoch, messageId = MessageId(0) - ).valueOr: - raiseAssert $error + let validProofRes = manager.generateProof( + data = messageBytes, epoch = epoch, messageId = MessageId(1) + ) + + check: + validProofRes.isOk() + let validProof = validProofRes.get() - # validate the root (should be false) let validated = manager.validateRoot(validProof.merkleRoot) check: @@ -393,13 +360,19 @@ suite "Onchain group manager": manager.onRegister(callback) try: - (await manager.startGroupSync()).isOkOr: - raiseAssert $error await manager.register(credentials, UserMessageLimit(1)) except Exception, CatchableError: assert false, "exception raised: " & getCurrentExceptionMsg() await fut + let rootUpdated = await manager.updateRoots() + + if rootUpdated: + let proofResult = await manager.fetchMerkleProofElements() + if proofResult.isErr(): + error "Failed to fetch Merkle proof", error = proofResult.error + manager.merkleProofCache = proofResult.get() + let messageBytes = "Hello".toBytes() # prepare the epoch @@ -412,7 +385,6 @@ suite "Onchain group manager": ).valueOr: raiseAssert $error - # verify the proof (should be true) let verified = manager.verifyProof(messageBytes, validProof).valueOr: raiseAssert $error @@ -422,31 +394,23 @@ suite "Onchain group manager": asyncTest "verifyProof: should reject invalid proof": (await manager.init()).isOkOr: raiseAssert $error - (await manager.startGroupSync()).isOkOr: - raiseAssert $error let idCredential = generateCredentials(manager.rlnInstance) try: - await manager.register( - RateCommitment( - idCommitment: idCredential.idCommitment, userMessageLimit: UserMessageLimit(1) - ) - ) + await manager.register(idCredential, UserMessageLimit(1)) except Exception, CatchableError: assert false, "exception raised when calling startGroupSync: " & getCurrentExceptionMsg() - let idCredential2 = generateCredentials(manager.rlnInstance) - - ## Assume the registration occured out of band - manager.idCredentials = some(idCredential2) - manager.membershipIndex = some(MembershipIndex(0)) - manager.userMessageLimit = some(UserMessageLimit(1)) - let messageBytes = "Hello".toBytes() - # prepare the epoch + let rootUpdated = await manager.updateRoots() + + manager.merkleProofCache = newSeq[byte](640) + for i in 0 ..< 640: + manager.merkleProofCache[i] = byte(rand(255)) + let epoch = default(Epoch) debug "epoch in bytes", epochHex = epoch.inHex() @@ -466,8 +430,8 @@ suite "Onchain group manager": check: verified == false - asyncTest "backfillRootQueue: should backfill roots in event of chain reorg": - const credentialCount = 6 + asyncTest "root queue should be updated correctly": + const credentialCount = 12 let credentials = generateCredentials(manager.rlnInstance, credentialCount) (await manager.init()).isOkOr: raiseAssert $error @@ -493,33 +457,17 @@ suite "Onchain group manager": try: manager.onRegister(generateCallback(futures, credentials)) - (await manager.startGroupSync()).isOkOr: - raiseAssert $error for i in 0 ..< credentials.len(): await manager.register(credentials[i], UserMessageLimit(1)) + discard await manager.updateRoots() except Exception, CatchableError: assert false, "exception raised: " & getCurrentExceptionMsg() await allFutures(futures) - # At this point, we should have a full root queue, 5 roots, and partial buffer of 1 root check: - manager.validRoots.len() == credentialCount - 1 - manager.validRootBuffer.len() == 1 - - # We can now simulate a chain reorg by calling backfillRootQueue - let expectedLastRoot = manager.validRootBuffer[0] - try: - await manager.backfillRootQueue(1) - except Exception, CatchableError: - assert false, "exception raised: " & getCurrentExceptionMsg() - - # We should now have 5 roots in the queue, and no partial buffer - check: - manager.validRoots.len() == credentialCount - 1 - manager.validRootBuffer.len() == 0 - manager.validRoots[credentialCount - 2] == expectedLastRoot + manager.validRoots.len() == credentialCount asyncTest "isReady should return false if ethRpc is none": (await manager.init()).isOkOr: @@ -536,25 +484,9 @@ suite "Onchain group manager": check: isReady == false - asyncTest "isReady should return false if lastSeenBlockHead > lastProcessed": - (await manager.init()).isOkOr: - raiseAssert $error - - var isReady = true - try: - isReady = await manager.isReady() - except Exception, CatchableError: - assert false, "exception raised: " & getCurrentExceptionMsg() - - check: - isReady == false - asyncTest "isReady should return true if ethRpc is ready": (await manager.init()).isOkOr: raiseAssert $error - # node can only be ready after group sync is done - (await manager.startGroupSync()).isOkOr: - raiseAssert $error var isReady = false try: diff --git a/tests/waku_rln_relay/test_wakunode_rln_relay.nim b/tests/waku_rln_relay/test_wakunode_rln_relay.nim index a5237dab1..3ff6923e0 100644 --- a/tests/waku_rln_relay/test_wakunode_rln_relay.nim +++ b/tests/waku_rln_relay/test_wakunode_rln_relay.nim @@ -529,7 +529,6 @@ procSuite "WakuNode - RLN relay": xasyncTest "clearNullifierLog: should clear epochs > MaxEpochGap": ## This is skipped because is flaky and made CI randomly fail but is useful to run manually - # Given two nodes let contentTopic = ContentTopic("/waku/2/default-content/proto") diff --git a/vendor/zerokit b/vendor/zerokit index b9d27039c..ba467d370 160000 --- a/vendor/zerokit +++ b/vendor/zerokit @@ -1 +1 @@ -Subproject commit b9d27039c3266af108882d7a8bafc37400d29855 +Subproject commit ba467d370c56b7432522227de22fbd664d44ef3e diff --git a/waku/waku_rln_relay/conversion_utils.nim b/waku/waku_rln_relay/conversion_utils.nim index e710fea62..4a168ebeb 100644 --- a/waku/waku_rln_relay/conversion_utils.nim +++ b/waku/waku_rln_relay/conversion_utils.nim @@ -27,9 +27,6 @@ proc inHex*( valueHex = "0" & valueHex return toLowerAscii(valueHex) -proc toUserMessageLimit*(v: UInt256): UserMessageLimit = - return cast[UserMessageLimit](v) - proc encodeLengthPrefix*(input: openArray[byte]): seq[byte] = ## returns length prefixed version of the input ## with the following format [len<8>|input] @@ -78,6 +75,31 @@ proc serialize*( ) return output +proc serialize*(witness: RLNWitnessInput): seq[byte] = + ## Serializes the RLN witness into a byte array following zerokit's expected format. + ## The serialized format includes: + ## - identity_secret (32 bytes, little-endian with zero padding) + ## - user_message_limit (32 bytes, little-endian with zero padding) + ## - message_id (32 bytes, little-endian with zero padding) + ## - merkle tree depth (8 bytes, little-endian) = path_elements.len / 32 + ## - path_elements (each 32 bytes, ordered bottom-to-top) + ## - merkle tree depth again (8 bytes, little-endian) + ## - identity_path_index (sequence of bits as bytes, 0 = left, 1 = right) + ## - x (32 bytes, little-endian with zero padding) + ## - external_nullifier (32 bytes, little-endian with zero padding) + var buffer: seq[byte] + buffer.add(@(witness.identity_secret)) + buffer.add(@(witness.user_message_limit)) + buffer.add(@(witness.message_id)) + buffer.add(toBytes(uint64(witness.path_elements.len / 32), Endianness.littleEndian)) + for element in witness.path_elements: + buffer.add(element) + buffer.add(toBytes(uint64(witness.path_elements.len / 32), Endianness.littleEndian)) + buffer.add(witness.identity_path_index) + buffer.add(@(witness.x)) + buffer.add(@(witness.external_nullifier)) + return buffer + proc serialize*(proof: RateLimitProof, data: openArray[byte]): seq[byte] = ## a private proc to convert RateLimitProof and data to a byte seq ## this conversion is used in the proof verification proc @@ -133,3 +155,25 @@ func `+`*(a, b: Quantity): Quantity {.borrow.} func u256*(n: Quantity): UInt256 {.inline.} = n.uint64.stuint(256) + +proc uint64ToField*(n: uint64): array[32, byte] = + var output: array[32, byte] + let bytes = toBytes(n, Endianness.littleEndian) + output[0 ..< bytes.len] = bytes + return output + +proc UInt256ToField*(v: UInt256): array[32, byte] = + return cast[array[32, byte]](v) # already doesn't use `result` + +proc seqToField*(s: seq[byte]): array[32, byte] = + var output: array[32, byte] + let len = min(s.len, 32) + for i in 0 ..< len: + output[i] = s[i] + return output + +proc uint64ToIndex*(index: MembershipIndex, depth: int): seq[byte] = + var output = newSeq[byte](depth) + for i in 0 ..< depth: + output[i] = byte((index shr i) and 1) # LSB-first bit decomposition + return output diff --git a/waku/waku_rln_relay/group_manager/group_manager_base.nim b/waku/waku_rln_relay/group_manager/group_manager_base.nim index 818b36140..4a1c84e55 100644 --- a/waku/waku_rln_relay/group_manager/group_manager_base.nim +++ b/waku/waku_rln_relay/group_manager/group_manager_base.nim @@ -145,7 +145,6 @@ method validateRoot*( g: GroupManager, root: MerkleNode ): bool {.base, gcsafe, raises: [].} = ## validates the root against the valid roots queue - # Check if the root is in the valid roots queue if g.indexOfRoot(root) >= 0: return true return false @@ -175,7 +174,7 @@ method verifyProof*( method generateProof*( g: GroupManager, - data: openArray[byte], + data: seq[byte], epoch: Epoch, messageId: MessageId, rlnIdentifier = DefaultRlnIdentifier, @@ -189,6 +188,7 @@ method generateProof*( return err("membership index is not set") if g.userMessageLimit.isNone(): return err("user message limit is not set") + waku_rln_proof_generation_duration_seconds.nanosecondTime: let proof = proofGen( rlnInstance = g.rlnInstance, @@ -201,8 +201,6 @@ method generateProof*( ).valueOr: return err("proof generation failed: " & $error) - waku_rln_remaining_proofs_per_epoch.dec() - waku_rln_total_generated_proofs.inc() return ok(proof) method isReady*(g: GroupManager): Future[bool] {.base, async.} = diff --git a/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim b/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim index e61ffb956..fe3db9102 100644 --- a/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim +++ b/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim @@ -10,19 +10,18 @@ import nimcrypto/keccak as keccak, stint, json, - std/tables, + std/[strutils, tables, algorithm], stew/[byteutils, arrayops], - sequtils, - strutils + sequtils + import ../../../waku_keystore, ../../rln, + ../../rln/rln_interface, ../../conversion_utils, ../group_manager_base, ./retry_wrapper -from strutils import parseHexInt - export group_manager_base logScope: @@ -31,19 +30,23 @@ logScope: # using the when predicate does not work within the contract macro, hence need to dupe contract(WakuRlnContract): # this serves as an entrypoint into the rln membership set - proc register(idCommitment: UInt256, userMessageLimit: EthereumUInt32) + proc register(idCommitment: UInt256, userMessageLimit: UInt32) # Initializes the implementation contract (only used in unit tests) proc initialize(maxMessageLimit: UInt256) # this event is raised when a new member is registered - proc MemberRegistered(rateCommitment: UInt256, index: EthereumUInt32) {.event.} + proc MemberRegistered(rateCommitment: UInt256, index: UInt32) {.event.} # this function denotes existence of a given user - proc memberExists(idCommitment: Uint256): UInt256 {.view.} + proc memberExists(idCommitment: UInt256): UInt256 {.view.} # this constant describes the next index of a new member proc commitmentIndex(): UInt256 {.view.} # this constant describes the block number this contract was deployed on proc deployedBlockNumber(): UInt256 {.view.} # this constant describes max message limit of rln contract proc MAX_MESSAGE_LIMIT(): UInt256 {.view.} + # this function returns the merkleProof for a given index + # proc merkleProofElements(index: UInt40): seq[byte] {.view.} + # this function returns the merkle root + proc root(): UInt256 {.view.} type WakuRlnContractWithSender = Sender[WakuRlnContract] @@ -52,42 +55,14 @@ type ethPrivateKey*: Option[string] ethContractAddress*: string ethRpc*: Option[Web3] - rlnContractDeployedBlockNumber*: BlockNumber wakuRlnContract*: Option[WakuRlnContractWithSender] - latestProcessedBlock*: BlockNumber registrationTxHash*: Option[TxHash] chainId*: uint keystorePath*: Option[string] keystorePassword*: Option[string] registrationHandler*: Option[RegistrationHandler] - # this buffer exists to backfill appropriate roots for the merkle tree, - # in event of a reorg. we store 5 in the buffer. Maybe need to revisit this, - # because the average reorg depth is 1 to 2 blocks. - validRootBuffer*: Deque[MerkleNode] - # interval loop to shut down gracefully - blockFetchingActive*: bool - -const DefaultKeyStorePath* = "rlnKeystore.json" -const DefaultKeyStorePassword* = "password" - -const DefaultBlockPollRate* = 6.seconds - -template initializedGuard(g: OnchainGroupManager): untyped = - if not g.initialized: - raise newException(CatchableError, "OnchainGroupManager is not initialized") - -proc resultifiedInitGuard(g: OnchainGroupManager): GroupManagerResult[void] = - try: - initializedGuard(g) - return ok() - except CatchableError: - return err("OnchainGroupManager is not initialized") - -template retryWrapper( - g: OnchainGroupManager, res: auto, errStr: string, body: untyped -): auto = - retryWrapper(res, RetryStrategy.new(), errStr, g.onFatalErrorAction): - body + latestProcessedBlock*: BlockNumber + merkleProofCache*: seq[byte] proc setMetadata*( g: OnchainGroupManager, lastProcessedBlock = none(BlockNumber) @@ -112,33 +87,109 @@ proc setMetadata*( return err("failed to persist rln metadata: " & getCurrentExceptionMsg()) return ok() -method atomicBatch*( - g: OnchainGroupManager, - start: MembershipIndex, - rateCommitments = newSeq[RawRateCommitment](), - toRemoveIndices = newSeq[MembershipIndex](), -): Future[void] {.async: (raises: [Exception]), base.} = - initializedGuard(g) +proc fetchMerkleProofElements*( + g: OnchainGroupManager +): Future[Result[seq[byte], string]] {.async.} = + try: + let membershipIndex = g.membershipIndex.get() + let index40 = stuint(membershipIndex, 40) - waku_rln_membership_insertion_duration_seconds.nanosecondTime: - let operationSuccess = - g.rlnInstance.atomicWrite(some(start), rateCommitments, toRemoveIndices) - if not operationSuccess: - raise newException(CatchableError, "atomic batch operation failed") - # TODO: when slashing is enabled, we need to track slashed members - waku_rln_number_registered_memberships.set(int64(g.rlnInstance.leavesSet())) + let methodSig = "merkleProofElements(uint40)" + let methodIdDigest = keccak.keccak256.digest(methodSig) + let methodId = methodIdDigest.data[0 .. 3] - if g.registerCb.isSome(): - var membersSeq = newSeq[Membership]() - for i in 0 ..< rateCommitments.len: - var index = start + MembershipIndex(i) - debug "registering member to callback", - rateCommitment = rateCommitments[i], index = index - let member = Membership(rateCommitment: rateCommitments[i], index: index) - membersSeq.add(member) - await g.registerCb.get()(membersSeq) + var paddedParam = newSeq[byte](32) + let indexBytes = index40.toBytesBE() + for i in 0 ..< min(indexBytes.len, paddedParam.len): + paddedParam[paddedParam.len - indexBytes.len + i] = indexBytes[i] - g.validRootBuffer = g.slideRootQueue() + var callData = newSeq[byte]() + for b in methodId: + callData.add(b) + callData.add(paddedParam) + + var tx: TransactionArgs + tx.to = Opt.some(fromHex(Address, g.ethContractAddress)) + tx.data = Opt.some(callData) + + let responseBytes = await g.ethRpc.get().provider.eth_call(tx, "latest") + + return ok(responseBytes) + except CatchableError: + error "Failed to fetch Merkle proof elements", error = getCurrentExceptionMsg() + return err("Failed to fetch merkle proof elements: " & getCurrentExceptionMsg()) + +proc fetchMerkleRoot*( + g: OnchainGroupManager +): Future[Result[UInt256, string]] {.async.} = + try: + let merkleRootInvocation = g.wakuRlnContract.get().root() + let merkleRoot = await merkleRootInvocation.call() + return ok(merkleRoot) + except CatchableError: + error "Failed to fetch Merkle root", error = getCurrentExceptionMsg() + return err("Failed to fetch merkle root: " & getCurrentExceptionMsg()) + +template initializedGuard(g: OnchainGroupManager): untyped = + if not g.initialized: + raise newException(CatchableError, "OnchainGroupManager is not initialized") + +template retryWrapper( + g: OnchainGroupManager, res: auto, errStr: string, body: untyped +): auto = + retryWrapper(res, RetryStrategy.new(), errStr, g.onFatalErrorAction): + body + +method validateRoot*(g: OnchainGroupManager, root: MerkleNode): bool = + if g.validRoots.find(root) >= 0: + return true + return false + +proc updateRoots*(g: OnchainGroupManager): Future[bool] {.async.} = + let rootRes = await g.fetchMerkleRoot() + if rootRes.isErr(): + return false + + let merkleRoot = UInt256ToField(rootRes.get()) + if g.validRoots.len == 0: + g.validRoots.addLast(merkleRoot) + return true + + if g.validRoots[g.validRoots.len - 1] != merkleRoot: + if g.validRoots.len > AcceptableRootWindowSize: + discard g.validRoots.popFirst() + g.validRoots.addLast(merkleRoot) + return true + + return false + +proc trackRootChanges*(g: OnchainGroupManager) {.async: (raises: [CatchableError]).} = + try: + initializedGuard(g) + let ethRpc = g.ethRpc.get() + let wakuRlnContract = g.wakuRlnContract.get() + + const rpcDelay = 5.seconds + + while true: + let rootUpdated = await g.updateRoots() + + if rootUpdated: + if g.membershipIndex.isNone(): + error "membershipIndex is not set; skipping proof update" + else: + let proofResult = await g.fetchMerkleProofElements() + if proofResult.isErr(): + error "Failed to fetch Merkle proof", error = proofResult.error + g.merkleProofCache = proofResult.get() + + # also need update registerd membership + let memberCount = cast[int64](await wakuRlnContract.commitmentIndex().call()) + waku_rln_number_registered_memberships.set(float64(memberCount)) + + await sleepAsync(rpcDelay) + except CatchableError: + error "Fatal error in trackRootChanges", error = getCurrentExceptionMsg() method register*( g: OnchainGroupManager, rateCommitment: RateCommitment @@ -147,18 +198,14 @@ method register*( try: let leaf = rateCommitment.toLeaf().get() - await g.registerBatch(@[leaf]) + if g.registerCb.isSome(): + let idx = g.latestIndex + debug "registering member via callback", rateCommitment = leaf, index = idx + await g.registerCb.get()(@[Membership(rateCommitment: leaf, index: idx)]) + g.latestIndex.inc() except CatchableError: raise newException(ValueError, getCurrentExceptionMsg()) -method registerBatch*( - g: OnchainGroupManager, rateCommitments: seq[RawRateCommitment] -): Future[void] {.async: (raises: [Exception]).} = - initializedGuard(g) - - await g.atomicBatch(g.latestIndex, rateCommitments) - g.latestIndex += MembershipIndex(rateCommitments.len) - method register*( g: OnchainGroupManager, identityCredential: IdentityCredential, @@ -212,8 +259,19 @@ method register*( debug "parsed membershipIndex", membershipIndex g.userMessageLimit = some(userMessageLimit) g.membershipIndex = some(membershipIndex.toMembershipIndex()) + g.idCredentials = some(identityCredential) + + let rateCommitment = RateCommitment( + idCommitment: identityCredential.idCommitment, userMessageLimit: userMessageLimit + ) + .toLeaf() + .get() + + if g.registerCb.isSome(): + let member = Membership(rateCommitment: rateCommitment, index: g.latestIndex) + await g.registerCb.get()(@[member]) + g.latestIndex.inc() - # don't handle member insertion into the tree here, it will be handled by the event listener return method withdraw*( @@ -226,304 +284,173 @@ method withdrawBatch*( ): Future[void] {.async: (raises: [Exception]).} = initializedGuard(g) - # TODO: after slashing is enabled on the contract, use atomicBatch internally +proc getRootFromProofAndIndex( + g: OnchainGroupManager, elements: seq[byte], bits: seq[byte] +): GroupManagerResult[array[32, byte]] = + # this is a helper function to get root from merkle proof elements and index + # it's currently not used anywhere, but can be used to verify the root from the proof and index + # Compute leaf hash from idCommitment and messageLimit + let messageLimitField = uint64ToField(g.userMessageLimit.get()) + let leafHashRes = poseidon(@[g.idCredentials.get().idCommitment, @messageLimitField]) + if leafHashRes.isErr(): + return err("Failed to compute leaf hash: " & leafHashRes.error) -proc parseEvent( - event: type MemberRegistered, log: JsonNode -): GroupManagerResult[Membership] = - ## parses the `data` parameter of the `MemberRegistered` event `log` - ## returns an error if it cannot parse the `data` parameter - var rateCommitment: UInt256 - var index: UInt256 - var data: seq[byte] - try: - data = hexToSeqByte(log["data"].getStr()) - except ValueError: - return err( - "failed to parse the data field of the MemberRegistered event: " & - getCurrentExceptionMsg() - ) - var offset = 0 - try: - # Parse the rateCommitment - offset += decode(data, 0, offset, rateCommitment) - # Parse the index - offset += decode(data, 0, offset, index) - return ok( - Membership( - rateCommitment: rateCommitment.toRateCommitment(), - index: index.toMembershipIndex(), - ) - ) - except CatchableError: - return err("failed to parse the data field of the MemberRegistered event") + var hash = leafHashRes.get() + for i in 0 ..< bits.len: + let sibling = elements[i * 32 .. (i + 1) * 32 - 1] -type BlockTable* = OrderedTable[BlockNumber, seq[(Membership, bool)]] + let hashRes = + if bits[i] == 0: + poseidon(@[@hash, sibling]) + else: + poseidon(@[sibling, @hash]) -proc backfillRootQueue*( - g: OnchainGroupManager, len: uint -): Future[void] {.async: (raises: [Exception]).} = - if len > 0: - # backfill the tree's acceptable roots - for i in 0 .. len - 1: - # remove the last root - g.validRoots.popLast() - for i in 0 .. len - 1: - # add the backfilled root - g.validRoots.addLast(g.validRootBuffer.popLast()) + hash = hashRes.valueOr: + return err("Failed to compute poseidon hash: " & error) + hash = hashRes.get() -proc insert( - blockTable: var BlockTable, - blockNumber: BlockNumber, - member: Membership, - removed: bool, -) = - let memberTuple = (member, removed) - if blockTable.hasKeyOrPut(blockNumber, @[memberTuple]): - try: - blockTable[blockNumber].add(memberTuple) - except KeyError: # qed - error "could not insert member into block table", - blockNumber = blockNumber, member = member + return ok(hash) -proc getRawEvents( - g: OnchainGroupManager, fromBlock: BlockNumber, toBlock: BlockNumber -): Future[JsonNode] {.async: (raises: [Exception]).} = - initializedGuard(g) +method generateProof*( + g: OnchainGroupManager, + data: seq[byte], + epoch: Epoch, + messageId: MessageId, + rlnIdentifier = DefaultRlnIdentifier, +): GroupManagerResult[RateLimitProof] {.gcsafe, raises: [].} = + ## Generates an RLN proof using the cached Merkle proof and custom witness + # Ensure identity credentials and membership index are set + if g.idCredentials.isNone(): + return err("identity credentials are not set") + if g.membershipIndex.isNone(): + return err("membership index is not set") + if g.userMessageLimit.isNone(): + return err("user message limit is not set") - let ethRpc = g.ethRpc.get() - let wakuRlnContract = g.wakuRlnContract.get() + if (g.merkleProofCache.len mod 32) != 0: + return err("Invalid merkle proof cache length") - var eventStrs: seq[JsonString] - g.retryWrapper(eventStrs, "Failed to get the events"): - await wakuRlnContract.getJsonLogs( - MemberRegistered, - fromBlock = Opt.some(fromBlock.blockId()), - toBlock = Opt.some(toBlock.blockId()), - ) + let identity_secret = seqToField(g.idCredentials.get().idSecretHash) + let user_message_limit = uint64ToField(g.userMessageLimit.get()) + let message_id = uint64ToField(messageId) + var path_elements = newSeq[byte](0) - var events = newJArray() - for eventStr in eventStrs: - events.add(parseJson(eventStr.string)) - return events + if (g.merkleProofCache.len mod 32) != 0: + return err("Invalid merkle proof cache length") -proc getBlockTable( - g: OnchainGroupManager, fromBlock: BlockNumber, toBlock: BlockNumber -): Future[BlockTable] {.async: (raises: [Exception]).} = - initializedGuard(g) + let identity_path_index = uint64ToIndex(g.membershipIndex.get(), 20) + for i in 0 ..< g.merkleProofCache.len div 32: + let chunk = g.merkleProofCache[i * 32 .. (i + 1) * 32 - 1] + path_elements.add(chunk.reversed()) - var blockTable = default(BlockTable) + let x = keccak.keccak256.digest(data) - let events = await g.getRawEvents(fromBlock, toBlock) + let extNullifier = poseidon(@[@(epoch), @(rlnIdentifier)]).valueOr: + return err("Failed to compute external nullifier: " & error) - if events.len == 0: - trace "no events found" - return blockTable + let witness = RLNWitnessInput( + identity_secret: identity_secret, + user_message_limit: user_message_limit, + message_id: message_id, + path_elements: path_elements, + identity_path_index: identity_path_index, + x: x, + external_nullifier: extNullifier, + ) - for event in events: - let blockNumber = parseHexInt(event["blockNumber"].getStr()).BlockNumber - let removed = event["removed"].getBool() - let parsedEventRes = parseEvent(MemberRegistered, event) - if parsedEventRes.isErr(): - error "failed to parse the MemberRegistered event", error = parsedEventRes.error() - raise newException(ValueError, "failed to parse the MemberRegistered event") - let parsedEvent = parsedEventRes.get() - blockTable.insert(blockNumber, parsedEvent, removed) + let serializedWitness = serialize(witness) - return blockTable + var input_witness_buffer = toBuffer(serializedWitness) -proc handleEvents( - g: OnchainGroupManager, blockTable: BlockTable -): Future[void] {.async: (raises: [Exception]).} = - initializedGuard(g) + # Generate the proof using the zerokit API + var output_witness_buffer: Buffer + let witness_success = generate_proof_with_witness( + g.rlnInstance, addr input_witness_buffer, addr output_witness_buffer + ) - for blockNumber, members in blockTable.pairs(): - try: - let startIndex = blockTable[blockNumber].filterIt(not it[1])[0][0].index - let removalIndices = members.filterIt(it[1]).mapIt(it[0].index) - let rateCommitments = members.mapIt(it[0].rateCommitment) - await g.atomicBatch( - start = startIndex, - rateCommitments = rateCommitments, - toRemoveIndices = removalIndices, - ) - g.latestIndex = startIndex + MembershipIndex(rateCommitments.len) - trace "new members added to the Merkle tree", - commitments = rateCommitments.mapIt(it.inHex) - except CatchableError: - error "failed to insert members into the tree", error = getCurrentExceptionMsg() - raise newException(ValueError, "failed to insert members into the tree") + if not witness_success: + return err("Failed to generate proof") - return + # Parse the proof into a RateLimitProof object + var proofValue = cast[ptr array[320, byte]](output_witness_buffer.`ptr`) + let proofBytes: array[320, byte] = proofValue[] -proc handleRemovedEvents( - g: OnchainGroupManager, blockTable: BlockTable -): Future[void] {.async: (raises: [Exception]).} = - initializedGuard(g) + ## parse the proof as [ proof<128> | root<32> | external_nullifier<32> | share_x<32> | share_y<32> | nullifier<32> ] + let + proofOffset = 128 + rootOffset = proofOffset + 32 + externalNullifierOffset = rootOffset + 32 + shareXOffset = externalNullifierOffset + 32 + shareYOffset = shareXOffset + 32 + nullifierOffset = shareYOffset + 32 - # count number of blocks that have been removed - var numRemovedBlocks: uint = 0 - for blockNumber, members in blockTable.pairs(): - if members.anyIt(it[1]): - numRemovedBlocks += 1 + var + zkproof: ZKSNARK + proofRoot, shareX, shareY: MerkleNode + externalNullifier: ExternalNullifier + nullifier: Nullifier - await g.backfillRootQueue(numRemovedBlocks) + discard zkproof.copyFrom(proofBytes[0 .. proofOffset - 1]) + discard proofRoot.copyFrom(proofBytes[proofOffset .. rootOffset - 1]) + discard + externalNullifier.copyFrom(proofBytes[rootOffset .. externalNullifierOffset - 1]) + discard shareX.copyFrom(proofBytes[externalNullifierOffset .. shareXOffset - 1]) + discard shareY.copyFrom(proofBytes[shareXOffset .. shareYOffset - 1]) + discard nullifier.copyFrom(proofBytes[shareYOffset .. nullifierOffset - 1]) -proc getAndHandleEvents( - g: OnchainGroupManager, fromBlock: BlockNumber, toBlock: BlockNumber -): Future[bool] {.async: (raises: [Exception]).} = - initializedGuard(g) - let blockTable = await g.getBlockTable(fromBlock, toBlock) - try: - await g.handleEvents(blockTable) - await g.handleRemovedEvents(blockTable) - except CatchableError: - error "failed to handle events", error = getCurrentExceptionMsg() - raise newException(ValueError, "failed to handle events") + # Create the RateLimitProof object + let output = RateLimitProof( + proof: zkproof, + merkleRoot: proofRoot, + externalNullifier: externalNullifier, + epoch: epoch, + rlnIdentifier: rlnIdentifier, + shareX: shareX, + shareY: shareY, + nullifier: nullifier, + ) - g.latestProcessedBlock = toBlock - return true + debug "Proof generated successfully" -proc runInInterval(g: OnchainGroupManager, cb: proc, interval: Duration) = - g.blockFetchingActive = false + waku_rln_remaining_proofs_per_epoch.dec() + waku_rln_total_generated_proofs.inc() + return ok(output) - proc runIntervalLoop() {.async, gcsafe.} = - g.blockFetchingActive = true +method verifyProof*( + g: OnchainGroupManager, # verifier context + input: seq[byte], # raw message data (signal) + proof: RateLimitProof, # proof received from the peer +): GroupManagerResult[bool] {.gcsafe, raises: [].} = + ## -- Verifies an RLN rate-limit proof against the set of valid Merkle roots -- - while g.blockFetchingActive: - var retCb: bool - g.retryWrapper(retCb, "Failed to run the interval block fetching loop"): - await cb() - await sleepAsync(interval) + var normalizedProof = proof - # using asyncSpawn is OK here since - # we make use of the error handling provided by - # OnFatalErrorHandler - asyncSpawn runIntervalLoop() + normalizedProof.externalNullifier = poseidon( + @[@(proof.epoch), @(proof.rlnIdentifier)] + ).valueOr: + return err("Failed to compute external nullifier: " & error) -proc getNewBlockCallback(g: OnchainGroupManager): proc = - let ethRpc = g.ethRpc.get() - proc wrappedCb(): Future[bool] {.async, gcsafe.} = - var latestBlock: BlockNumber - g.retryWrapper(latestBlock, "Failed to get the latest block number"): - cast[BlockNumber](await ethRpc.provider.eth_blockNumber()) + let proofBytes = serialize(normalizedProof, input) + let proofBuffer = proofBytes.toBuffer() - if latestBlock <= g.latestProcessedBlock: - return - # get logs from the last block - # inc by 1 to prevent double processing - let fromBlock = g.latestProcessedBlock + 1 - var handleBlockRes: bool - g.retryWrapper(handleBlockRes, "Failed to handle new block"): - await g.getAndHandleEvents(fromBlock, latestBlock) + let rootsBytes = serialize(g.validRoots.items().toSeq()) + let rootsBuffer = rootsBytes.toBuffer() - # cannot use isOkOr here because results in a compile-time error that - # shows the error is void for some reason - let setMetadataRes = g.setMetadata() - if setMetadataRes.isErr(): - error "failed to persist rln metadata", error = setMetadataRes.error + var validProof: bool # out-param + let ffiOk = verify_with_roots( + g.rlnInstance, # RLN context created at init() + addr proofBuffer, # (proof + signal) + addr rootsBuffer, # valid Merkle roots + addr validProof # will be set by the FFI call + , + ) - return handleBlockRes + if not ffiOk: + return err("could not verify the proof") + else: + trace "Proof verified successfully !" - return wrappedCb - -proc startListeningToEvents( - g: OnchainGroupManager -): Future[void] {.async: (raises: [Exception]).} = - initializedGuard(g) - - let ethRpc = g.ethRpc.get() - let newBlockCallback = g.getNewBlockCallback() - g.runInInterval(newBlockCallback, DefaultBlockPollRate) - -proc batchAwaitBlockHandlingFuture( - g: OnchainGroupManager, futs: seq[Future[bool]] -): Future[void] {.async: (raises: [Exception]).} = - for fut in futs: - try: - var handleBlockRes: bool - g.retryWrapper(handleBlockRes, "Failed to handle block"): - await fut - except CatchableError: - raise newException( - CatchableError, "could not fetch events from block: " & getCurrentExceptionMsg() - ) - -proc startOnchainSync( - g: OnchainGroupManager -): Future[void] {.async: (raises: [Exception]).} = - initializedGuard(g) - - let ethRpc = g.ethRpc.get() - - # static block chunk size - let blockChunkSize = 2_000.BlockNumber - # delay between rpc calls to not overload the rate limit - let rpcDelay = 200.milliseconds - # max number of futures to run concurrently - let maxFutures = 10 - - var fromBlock: BlockNumber = - if g.latestProcessedBlock > g.rlnContractDeployedBlockNumber: - info "syncing from last processed block", blockNumber = g.latestProcessedBlock - g.latestProcessedBlock + 1 - else: - info "syncing from rln contract deployed block", - blockNumber = g.rlnContractDeployedBlockNumber - g.rlnContractDeployedBlockNumber - - var futs = newSeq[Future[bool]]() - var currentLatestBlock: BlockNumber - g.retryWrapper(currentLatestBlock, "Failed to get the latest block number"): - cast[BlockNumber](await ethRpc.provider.eth_blockNumber()) - - try: - # we always want to sync from last processed block => latest - # chunk events - while true: - # if the fromBlock is less than 2k blocks behind the current block - # then fetch the new toBlock - if fromBlock >= currentLatestBlock: - break - - if fromBlock + blockChunkSize > currentLatestBlock: - g.retryWrapper(currentLatestBlock, "Failed to get the latest block number"): - cast[BlockNumber](await ethRpc.provider.eth_blockNumber()) - - let toBlock = min(fromBlock + blockChunkSize, currentLatestBlock) - debug "fetching events", fromBlock = fromBlock, toBlock = toBlock - await sleepAsync(rpcDelay) - futs.add(g.getAndHandleEvents(fromBlock, toBlock)) - if futs.len >= maxFutures or toBlock == currentLatestBlock: - await g.batchAwaitBlockHandlingFuture(futs) - g.setMetadata(lastProcessedBlock = some(toBlock)).isOkOr: - error "failed to persist rln metadata", error = $error - futs = newSeq[Future[bool]]() - fromBlock = toBlock + 1 - except CatchableError: - raise newException( - CatchableError, - "failed to get the history/reconcile missed blocks: " & getCurrentExceptionMsg(), - ) - - # listen to blockheaders and contract events - try: - await g.startListeningToEvents() - except CatchableError: - raise newException( - ValueError, "failed to start listening to events: " & getCurrentExceptionMsg() - ) - -method startGroupSync*( - g: OnchainGroupManager -): Future[GroupManagerResult[void]] {.async.} = - ?resultifiedInitGuard(g) - # Get archive history - try: - await startOnchainSync(g) - return ok() - except CatchableError, Exception: - return err("failed to start group sync: " & getCurrentExceptionMsg()) + return ok(validProof) method onRegister*(g: OnchainGroupManager, cb: OnRegisterCallback) {.gcsafe.} = g.registerCb = some(cb) @@ -609,53 +536,27 @@ method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.} let metadata = metadataGetOptRes.get().get() if metadata.chainId != uint(g.chainId): return err("persisted data: chain id mismatch") - if metadata.contractAddress != g.ethContractAddress.toLower(): return err("persisted data: contract address mismatch") - g.latestProcessedBlock = metadata.lastProcessedBlock.BlockNumber - g.validRoots = metadata.validRoots.toDeque() - var deployedBlockNumber: Uint256 - g.retryWrapper( - deployedBlockNumber, - "Failed to get the deployed block number. Have you set the correct contract address?", - ): - await wakuRlnContract.deployedBlockNumber().call() - debug "using rln contract", deployedBlockNumber, rlnContractAddress = contractAddress - g.rlnContractDeployedBlockNumber = cast[BlockNumber](deployedBlockNumber) - g.latestProcessedBlock = max(g.latestProcessedBlock, g.rlnContractDeployedBlockNumber) g.rlnRelayMaxMessageLimit = cast[uint64](await wakuRlnContract.MAX_MESSAGE_LIMIT().call()) proc onDisconnect() {.async.} = error "Ethereum client disconnected" - let fromBlock = max(g.latestProcessedBlock, g.rlnContractDeployedBlockNumber) - info "reconnecting with the Ethereum client, and restarting group sync", - fromBlock = fromBlock var newEthRpc: Web3 g.retryWrapper(newEthRpc, "Failed to reconnect with the Ethereum client"): await newWeb3(g.ethClientUrl) newEthRpc.ondisconnect = ethRpc.ondisconnect g.ethRpc = some(newEthRpc) - try: - await g.startOnchainSync() - except CatchableError, Exception: - g.onFatalErrorAction( - "failed to restart group sync" & ": " & getCurrentExceptionMsg() - ) - ethRpc.ondisconnect = proc() = asyncSpawn onDisconnect() - waku_rln_number_registered_memberships.set(int64(g.rlnInstance.leavesSet())) g.initialized = true - return ok() method stop*(g: OnchainGroupManager): Future[void] {.async, gcsafe.} = - g.blockFetchingActive = false - if g.ethRpc.isSome(): g.ethRpc.get().ondisconnect = nil await g.ethRpc.get().close() @@ -665,26 +566,13 @@ method stop*(g: OnchainGroupManager): Future[void] {.async, gcsafe.} = g.initialized = false -proc isSyncing*(g: OnchainGroupManager): Future[bool] {.async, gcsafe.} = - let ethRpc = g.ethRpc.get() - - var syncing: SyncingStatus - g.retryWrapper(syncing, "Failed to get the syncing status"): - await ethRpc.provider.eth_syncing() - return syncing.syncing - method isReady*(g: OnchainGroupManager): Future[bool] {.async.} = initializedGuard(g) if g.ethRpc.isNone(): return false - var currentBlock: BlockNumber - g.retryWrapper(currentBlock, "Failed to get the current block number"): - cast[BlockNumber](await g.ethRpc.get().provider.eth_blockNumber()) - - # the node is still able to process messages if it is behind the latest block by a factor of the valid roots - if u256(g.latestProcessedBlock.uint64) < (u256(currentBlock) - u256(g.validRoots.len)): + if g.wakuRlnContract.isNone(): return false - return not (await g.isSyncing()) + return true diff --git a/waku/waku_rln_relay/protocol_metrics.nim b/waku/waku_rln_relay/protocol_metrics.nim index 121727809..2210328f4 100644 --- a/waku/waku_rln_relay/protocol_metrics.nim +++ b/waku/waku_rln_relay/protocol_metrics.nim @@ -85,6 +85,7 @@ proc getRlnMetricsLogger*(): RLNMetricsLogger = var cumulativeProofsVerified = 0.float64 var cumulativeProofsGenerated = 0.float64 var cumulativeProofsRemaining = 100.float64 + var cumulativeRegisteredMember = 0.float64 when defined(metrics): logMetrics = proc() = @@ -107,6 +108,9 @@ proc getRlnMetricsLogger*(): RLNMetricsLogger = let freshProofsRemainingCount = parseAndAccumulate( waku_rln_remaining_proofs_per_epoch, cumulativeProofsRemaining ) + let freshRegisteredMemberCount = parseAndAccumulate( + waku_rln_number_registered_memberships, cumulativeRegisteredMember + ) info "Total messages", count = freshMsgCount info "Total spam messages", count = freshSpamCount @@ -116,5 +120,6 @@ proc getRlnMetricsLogger*(): RLNMetricsLogger = info "Total proofs verified", count = freshProofsVerifiedCount info "Total proofs generated", count = freshProofsGeneratedCount info "Total proofs remaining", count = freshProofsRemainingCount + info "Total registered members", count = freshRegisteredMemberCount return logMetrics diff --git a/waku/waku_rln_relay/protocol_types.nim b/waku/waku_rln_relay/protocol_types.nim index 97b1c34ea..c6f52e00b 100644 --- a/waku/waku_rln_relay/protocol_types.nim +++ b/waku/waku_rln_relay/protocol_types.nim @@ -52,6 +52,20 @@ type RateLimitProof* = object ## the external nullifier used for the generation of the `proof` (derived from poseidon([epoch, rln_identifier])) externalNullifier*: ExternalNullifier +type UInt40* = StUint[40] +type UInt32* = StUint[32] + +type + Field = array[32, byte] # Field element representation (256 bits) + RLNWitnessInput* = object + identity_secret*: Field + user_message_limit*: Field + message_id*: Field + path_elements*: seq[byte] + identity_path_index*: seq[byte] + x*: Field + external_nullifier*: Field + type ProofMetadata* = object nullifier*: Nullifier shareX*: MerkleNode diff --git a/waku/waku_rln_relay/rln/rln_interface.nim b/waku/waku_rln_relay/rln/rln_interface.nim index cc468b124..27b3bbee9 100644 --- a/waku/waku_rln_relay/rln/rln_interface.nim +++ b/waku/waku_rln_relay/rln/rln_interface.nim @@ -130,6 +130,21 @@ proc generate_proof*( ## integers wrapped in <> indicate value sizes in bytes ## the return bool value indicates the success or failure of the operation +proc generate_proof_with_witness*( + ctx: ptr RLN, input_buffer: ptr Buffer, output_buffer: ptr Buffer +): bool {.importc: "generate_rln_proof_with_witness".} + +## rln-v2 +## "witness" term refer to collection of secret inputs with proper serialization +## input_buffer has to be serialized as [ identity_secret<32> | user_message_limit<32> | message_id<32> | path_elements> | identity_path_index> | x<32> | external_nullifier<32> ] +## output_buffer holds the proof data and should be parsed as [ proof<128> | root<32> | external_nullifier<32> | share_x<32> | share_y<32> | nullifier<32> ] +## rln-v1 +## input_buffer has to be serialized as [ id_key<32> | path_elements> | identity_path_index> | x<32> | epoch<32> | rln_identifier<32> ] +## output_buffer holds the proof data and should be parsed as [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> ] +## integers wrapped in <> indicate value sizes in bytes +## path_elements and identity_path_index serialize a merkle proof and are vectors of elements of 32 and 1 bytes respectively +## the return bool value indicates the success or failure of the operation + proc verify*( ctx: ptr RLN, proof_buffer: ptr Buffer, proof_is_valid_ptr: ptr bool ): bool {.importc: "verify_rln_proof".} diff --git a/waku/waku_rln_relay/rln_relay.nim b/waku/waku_rln_relay/rln_relay.nim index 268f1c93d..a42b04f8b 100644 --- a/waku/waku_rln_relay/rln_relay.nim +++ b/waku/waku_rln_relay/rln_relay.nim @@ -98,6 +98,7 @@ type WakuRLNRelay* = ref object of RootObj onFatalErrorAction*: OnFatalErrorHandler nonceManager*: NonceManager epochMonitorFuture*: Future[void] + rootChangesFuture*: Future[void] proc calcEpoch*(rlnPeer: WakuRLNRelay, t: float64): Epoch = ## gets time `t` as `flaot64` with subseconds resolution in the fractional part @@ -252,6 +253,7 @@ proc validateMessage*( waku_rln_errors_total.inc(labelValues = ["proof_verification"]) warn "invalid message: proof verification failed", payloadLen = msg.payload.len return MessageValidationResult.Invalid + if not proofVerificationRes.value(): # invalid proof warn "invalid message: invalid proof", payloadLen = msg.payload.len @@ -467,9 +469,6 @@ proc mount( # Initialize the groupManager (await groupManager.init()).isOkOr: return err("could not initialize the group manager: " & $error) - # Start the group sync - (await groupManager.startGroupSync()).isOkOr: - return err("could not start the group sync: " & $error) wakuRlnRelay = WakuRLNRelay( groupManager: groupManager, @@ -479,6 +478,11 @@ proc mount( onFatalErrorAction: conf.onFatalErrorAction, ) + # track root changes on smart contract merkle tree + if groupManager of OnchainGroupManager: + let onchainManager = cast[OnchainGroupManager](groupManager) + wakuRlnRelay.rootChangesFuture = onchainManager.trackRootChanges() + # Start epoch monitoring in the background wakuRlnRelay.epochMonitorFuture = monitorEpochs(wakuRlnRelay) return ok(wakuRlnRelay) From 42ab866f2c2ebbaf0f5c16e9d4633da9206dc3de Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Mon, 12 May 2025 10:57:13 +0200 Subject: [PATCH 40/48] chore: allow multiple rln eth clients (#3402) * use of multiple Eth clients instead of just one * config_chat2 enhance param comment * group_manager: raise exception if could not connect to any of the eth clients --- apps/chat2/chat2.nim | 3 +- apps/chat2/config_chat2.nim | 12 +++--- apps/networkmonitor/networkmonitor.nim | 2 +- apps/networkmonitor/networkmonitor_config.nim | 11 ++--- tests/factory/test_external_config.nim | 4 +- .../test_rln_group_manager_onchain.nim | 4 +- tests/waku_rln_relay/utils_onchain.nim | 8 ++-- .../rln_keystore_generator.nim | 6 +-- .../conf_builder/rln_relay_conf_builder.nim | 13 +++--- waku/factory/external_config.nim | 17 ++++---- waku/factory/node_factory.nim | 2 +- waku/factory/waku_conf.nim | 6 +-- .../group_manager/on_chain/group_manager.nim | 40 +++++++++++++++---- waku/waku_rln_relay/rln_relay.nim | 4 +- 14 files changed, 80 insertions(+), 52 deletions(-) diff --git a/apps/chat2/chat2.nim b/apps/chat2/chat2.nim index c25ce86d4..127a761c0 100644 --- a/apps/chat2/chat2.nim +++ b/apps/chat2/chat2.nim @@ -560,8 +560,7 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = dynamic: conf.rlnRelayDynamic, credIndex: conf.rlnRelayCredIndex, chainId: conf.rlnRelayChainId, - ethContractAddress: conf.rlnRelayEthContractAddress, - ethClientAddress: string(conf.rlnRelayethClientAddress), + ethClientUrls: conf.ethClientUrls.mapIt(string(it)), creds: some( RlnRelayCreds( path: conf.rlnRelayCredPath, password: conf.rlnRelayCredPassword diff --git a/apps/chat2/config_chat2.nim b/apps/chat2/config_chat2.nim index 830222cd9..8cc525208 100644 --- a/apps/chat2/config_chat2.nim +++ b/apps/chat2/config_chat2.nim @@ -18,7 +18,8 @@ type prod test - EthRpcUrl = distinct string + EthRpcUrl* = distinct string + Chat2Conf* = object ## General node config logLevel* {. desc: "Sets the log level.", defaultValue: LogLevel.INFO, name: "log-level" @@ -248,11 +249,12 @@ type name: "rln-relay-id-commitment-key" .}: string - rlnRelayEthClientAddress* {. - desc: "HTTP address of an Ethereum testnet client e.g., http://localhost:8540/", - defaultValue: "http://localhost:8540/", + ethClientUrls* {. + desc: + "HTTP address of an Ethereum testnet client e.g., http://localhost:8540/. Argument may be repeated.", + defaultValue: newSeq[EthRpcUrl](0), name: "rln-relay-eth-client-address" - .}: EthRpcUrl + .}: seq[EthRpcUrl] rlnRelayEthContractAddress* {. desc: "Address of membership contract on an Ethereum testnet", diff --git a/apps/networkmonitor/networkmonitor.nim b/apps/networkmonitor/networkmonitor.nim index c8b8fe092..7b71a630e 100644 --- a/apps/networkmonitor/networkmonitor.nim +++ b/apps/networkmonitor/networkmonitor.nim @@ -638,7 +638,7 @@ when isMainModule: dynamic: conf.rlnRelayDynamic, credIndex: some(uint(0)), ethContractAddress: conf.rlnRelayEthContractAddress, - ethClientAddress: string(conf.rlnRelayethClientAddress), + ethClientUrls: conf.ethClientUrls.mapIt(string(it)), treePath: conf.rlnRelayTreePath, epochSizeSec: conf.rlnEpochSizeSec, creds: none(RlnRelayCreds), diff --git a/apps/networkmonitor/networkmonitor_config.nim b/apps/networkmonitor/networkmonitor_config.nim index bf1662649..04245f9dd 100644 --- a/apps/networkmonitor/networkmonitor_config.nim +++ b/apps/networkmonitor/networkmonitor_config.nim @@ -8,7 +8,7 @@ import stew/shims/net, regex -type EthRpcUrl = distinct string +type EthRpcUrl* = distinct string type NetworkMonitorConf* = object logLevel* {. @@ -82,11 +82,12 @@ type NetworkMonitorConf* = object name: "rln-relay-tree-path" .}: string - rlnRelayEthClientAddress* {. - desc: "HTTP address of an Ethereum testnet client e.g., http://localhost:8540/", - defaultValue: "http://localhost:8540/", + ethClientUrls* {. + desc: + "HTTP address of an Ethereum testnet client e.g., http://localhost:8540/. Argument may be repeated.", + defaultValue: newSeq[EthRpcUrl](0), name: "rln-relay-eth-client-address" - .}: EthRpcUrl + .}: seq[EthRpcUrl] rlnRelayEthContractAddress* {. desc: "Address of membership contract on an Ethereum testnet", diff --git a/tests/factory/test_external_config.nim b/tests/factory/test_external_config.nim index 1caeb6e7b..5bd4e2c86 100644 --- a/tests/factory/test_external_config.nim +++ b/tests/factory/test_external_config.nim @@ -26,7 +26,7 @@ suite "Waku config - apply preset": cmd: noCommand, preset: "twn", relay: true, - rlnRelayEthClientAddress: "http://someaddress".EthRpcUrl, + ethClientUrls: @["http://someaddress".EthRpcUrl], rlnRelayTreePath: "/tmp/sometreepath", ) @@ -109,7 +109,7 @@ suite "Waku config - apply preset": cmd: noCommand, clusterId: 1.uint16, relay: true, - rlnRelayEthClientAddress: "http://someaddress".EthRpcUrl, + ethClientUrls: @["http://someaddress".EthRpcUrl], rlnRelayTreePath: "/tmp/sometreepath", ) diff --git a/tests/waku_rln_relay/test_rln_group_manager_onchain.nim b/tests/waku_rln_relay/test_rln_group_manager_onchain.nim index 25a3166ce..7ba64e39b 100644 --- a/tests/waku_rln_relay/test_rln_group_manager_onchain.nim +++ b/tests/waku_rln_relay/test_rln_group_manager_onchain.nim @@ -77,10 +77,10 @@ suite "Onchain group manager": assert metadata.contractAddress == manager.ethContractAddress, "contractAddress is not equal to " & manager.ethContractAddress - let differentContractAddress = await uploadRLNContract(manager.ethClientUrl) + let differentContractAddress = await uploadRLNContract(manager.ethClientUrls[0]) # simulating a change in the contractAddress let manager2 = OnchainGroupManager( - ethClientUrl: EthClient, + ethClientUrls: @[EthClient], ethContractAddress: $differentContractAddress, rlnInstance: manager.rlnInstance, onFatalErrorAction: proc(errStr: string) = diff --git a/tests/waku_rln_relay/utils_onchain.nim b/tests/waku_rln_relay/utils_onchain.nim index 82eaf085e..433f865c4 100644 --- a/tests/waku_rln_relay/utils_onchain.nim +++ b/tests/waku_rln_relay/utils_onchain.nim @@ -250,7 +250,7 @@ proc stopAnvil*(runAnvil: Process) {.used.} = error "Anvil daemon termination failed: ", err = getCurrentExceptionMsg() proc setupOnchainGroupManager*( - ethClientAddress: string = EthClient, amountEth: UInt256 = 10.u256 + ethClientUrl: string = EthClient, amountEth: UInt256 = 10.u256 ): Future[OnchainGroupManager] {.async.} = let rlnInstanceRes = createRlnInstance(tree_path = genTempPath("rln_tree", "group_manager_onchain")) @@ -259,9 +259,9 @@ proc setupOnchainGroupManager*( let rlnInstance = rlnInstanceRes.get() - let contractAddress = await uploadRLNContract(ethClientAddress) + let contractAddress = await uploadRLNContract(ethClientUrl) # connect to the eth client - let web3 = await newWeb3(ethClientAddress) + let web3 = await newWeb3(ethClientUrl) let accounts = await web3.provider.eth_accounts() web3.defaultAccount = accounts[0] @@ -275,7 +275,7 @@ proc setupOnchainGroupManager*( ) let manager = OnchainGroupManager( - ethClientUrl: ethClientAddress, + ethClientUrls: @[ethClientUrl], ethContractAddress: $contractAddress, chainId: CHAIN_ID, ethPrivateKey: some($privateKey), diff --git a/tools/rln_keystore_generator/rln_keystore_generator.nim b/tools/rln_keystore_generator/rln_keystore_generator.nim index 0ca1c9968..cd501e52d 100644 --- a/tools/rln_keystore_generator/rln_keystore_generator.nim +++ b/tools/rln_keystore_generator/rln_keystore_generator.nim @@ -3,7 +3,7 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import chronicles, results, std/tempfiles +import chronicles, results, std/[tempfiles, sequtils] import waku/[ @@ -19,7 +19,7 @@ logScope: type RlnKeystoreGeneratorConf* = object execute*: bool ethContractAddress*: string - ethClientAddress*: string + ethClientUrls*: seq[string] chainId*: uint credPath*: string credPassword*: string @@ -65,7 +65,7 @@ proc doRlnKeystoreGenerator*(conf: RlnKeystoreGeneratorConf) = # 4. initialize OnchainGroupManager let groupManager = OnchainGroupManager( - ethClientUrl: string(conf.ethClientAddress), + ethClientUrls: conf.ethClientUrls, chainId: conf.chainId, ethContractAddress: conf.ethContractAddress, rlnInstance: rlnInstance, diff --git a/waku/factory/conf_builder/rln_relay_conf_builder.nim b/waku/factory/conf_builder/rln_relay_conf_builder.nim index ff126d058..ea87eb278 100644 --- a/waku/factory/conf_builder/rln_relay_conf_builder.nim +++ b/waku/factory/conf_builder/rln_relay_conf_builder.nim @@ -9,9 +9,8 @@ logScope: ############################## type RlnRelayConfBuilder* = object enabled*: Option[bool] - chainId*: Option[uint] - ethClientAddress*: Option[string] + ethClientUrls*: Option[seq[string]] ethContractAddress*: Option[string] credIndex*: Option[uint] credPassword*: Option[string] @@ -42,8 +41,8 @@ proc withCredPath*(b: var RlnRelayConfBuilder, credPath: string) = proc withDynamic*(b: var RlnRelayConfBuilder, dynamic: bool) = b.dynamic = some(dynamic) -proc withEthClientAddress*(b: var RlnRelayConfBuilder, ethClientAddress: string) = - b.ethClientAddress = some(ethClientAddress) +proc withEthClientUrls*(b: var RlnRelayConfBuilder, ethClientUrls: seq[string]) = + b.ethClientUrls = some(ethClientUrls) proc withEthContractAddress*(b: var RlnRelayConfBuilder, ethContractAddress: string) = b.ethContractAddress = some(ethContractAddress) @@ -76,8 +75,8 @@ proc build*(b: RlnRelayConfBuilder): Result[Option[RlnRelayConf], string] = if b.dynamic.isNone(): return err("rlnRelay.dynamic is not specified") - if b.ethClientAddress.get("") == "": - return err("rlnRelay.ethClientAddress is not specified") + if b.ethClientUrls.get(newSeq[string](0)).len == 0: + return err("rlnRelay.ethClientUrls is not specified") if b.ethContractAddress.get("") == "": return err("rlnRelay.ethContractAddress is not specified") if b.epochSizeSec.isNone(): @@ -94,7 +93,7 @@ proc build*(b: RlnRelayConfBuilder): Result[Option[RlnRelayConf], string] = credIndex: b.credIndex, creds: creds, dynamic: b.dynamic.get(), - ethClientAddress: b.ethClientAddress.get(), + ethClientUrls: b.ethClientUrls.get(), ethContractAddress: b.ethContractAddress.get(), epochSizeSec: b.epochSizeSec.get(), userMessageLimit: b.userMessageLimit.get(), diff --git a/waku/factory/external_config.nim b/waku/factory/external_config.nim index 76b52b20b..ba0785f01 100644 --- a/waku/factory/external_config.nim +++ b/waku/factory/external_config.nim @@ -1,5 +1,5 @@ import - std/[strutils, strformat], + std/[strutils, strformat, sequtils], results, chronicles, chronos, @@ -75,11 +75,12 @@ type WakuNodeConf* = object name: "rln-relay-cred-path" .}: string - rlnRelayEthClientAddress* {. - desc: "HTTP address of an Ethereum testnet client e.g., http://localhost:8540/", - defaultValue: "http://localhost:8540/", + ethClientUrls* {. + desc: + "HTTP address of an Ethereum testnet client e.g., http://localhost:8540/. Argument may be repeated.", + defaultValue: @[EthRpcUrl("http://localhost:8540/")], name: "rln-relay-eth-client-address" - .}: EthRpcUrl + .}: seq[EthRpcUrl] rlnRelayEthContractAddress* {. desc: "Address of membership contract on an Ethereum testnet.", @@ -867,7 +868,7 @@ proc toKeystoreGeneratorConf*(n: WakuNodeConf): RlnKeystoreGeneratorConf = RlnKeystoreGeneratorConf( execute: n.execute, chainId: n.rlnRelayChainId, - ethClientAddress: n.rlnRelayEthClientAddress.string, + ethClientUrls: n.ethClientUrls.mapIt(string(it)), ethContractAddress: n.rlnRelayEthContractAddress, userMessageLimit: n.rlnRelayUserMessageLimit, ethPrivateKey: n.rlnRelayEthPrivateKey, @@ -907,8 +908,8 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] = b.rlnRelayConf.withCredPath(n.rlnRelayCredPath) if n.rlnRelayCredPassword != "": b.rlnRelayConf.withCredPassword(n.rlnRelayCredPassword) - if n.rlnRelayEthClientAddress.string != "": - b.rlnRelayConf.withEthClientAddress(n.rlnRelayEthClientAddress.string) + if n.ethClientUrls.len > 0: + b.rlnRelayConf.withEthClientUrls(n.ethClientUrls.mapIt(string(it))) if n.rlnRelayEthContractAddress != "": b.rlnRelayConf.withEthContractAddress(n.rlnRelayEthContractAddress) diff --git a/waku/factory/node_factory.nim b/waku/factory/node_factory.nim index 1b8f8e59b..7df5c2567 100644 --- a/waku/factory/node_factory.nim +++ b/waku/factory/node_factory.nim @@ -354,7 +354,7 @@ proc setupProtocols( credIndex: rlnRelayConf.credIndex, ethContractAddress: rlnRelayConf.ethContractAddress, chainId: rlnRelayConf.chainId, - ethClientAddress: rlnRelayConf.ethClientAddress, + ethClientUrls: rlnRelayConf.ethClientUrls, creds: rlnRelayConf.creds, treePath: rlnRelayConf.treePath, userMessageLimit: rlnRelayConf.userMessageLimit, diff --git a/waku/factory/waku_conf.nim b/waku/factory/waku_conf.nim index 766a17aa8..9f5160135 100644 --- a/waku/factory/waku_conf.nim +++ b/waku/factory/waku_conf.nim @@ -160,7 +160,7 @@ proc logConf*(conf: WakuConf) = maxMessageSize = conf.maxMessageSizeBytes, rlnEpochSizeSec = rlnRelayConf.epochSizeSec, rlnRelayUserMessageLimit = rlnRelayConf.userMessageLimit, - rlnRelayEthClientAddress = string(rlnRelayConf.ethClientAddress) + ethClientUrls = rlnRelayConf.ethClientUrls proc validateNodeKey(wakuConf: WakuConf): Result[void, string] = wakuConf.nodeKey.getPublicKey().isOkOr: @@ -224,8 +224,8 @@ proc validateNoEmptyStrings(wakuConf: WakuConf): Result[void, string] = if isEmptyOrWhiteSpace(rlnRelayConf.treePath): return err("rlnRelayConf.treepath is an empty string") - if isEmptyOrWhiteSpace(rlnRelayConf.ethClientAddress): - return err("rlnRelayConf.ethClientAddress is an empty string") + if rlnRelayConf.ethClientUrls.len == 0: + return err("rlnRelayConf.ethClientUrls is empty") if isEmptyOrWhiteSpace(rlnRelayConf.ethContractAddress): return err("rlnRelayConf.ethContractAddress is an empty string") diff --git a/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim b/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim index fe3db9102..61c9948ee 100644 --- a/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim +++ b/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim @@ -51,7 +51,7 @@ contract(WakuRlnContract): type WakuRlnContractWithSender = Sender[WakuRlnContract] OnchainGroupManager* = ref object of GroupManager - ethClientUrl*: string + ethClientUrls*: seq[string] ethPrivateKey*: Option[string] ethContractAddress*: string ethRpc*: Option[Web3] @@ -458,11 +458,35 @@ method onRegister*(g: OnchainGroupManager, cb: OnRegisterCallback) {.gcsafe.} = method onWithdraw*(g: OnchainGroupManager, cb: OnWithdrawCallback) {.gcsafe.} = g.withdrawCb = some(cb) +proc establishConnection( + g: OnchainGroupManager +): Future[GroupManagerResult[Web3]] {.async.} = + var ethRpc: Web3 + + g.retryWrapper(ethRpc, "Failed to connect to the Ethereum client"): + var innerEthRpc: Web3 + var connected = false + for clientUrl in g.ethClientUrls: + ## We give a chance to the user to provide multiple clients + ## and we try to connect to each of them + try: + innerEthRpc = await newWeb3(clientUrl) + connected = true + break + except CatchableError: + error "failed connect Eth client", error = getCurrentExceptionMsg() + + if not connected: + raise newException(CatchableError, "all failed") + + innerEthRpc + + return ok(ethRpc) + method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.} = # check if the Ethereum client is reachable - var ethRpc: Web3 - g.retryWrapper(ethRpc, "Failed to connect to the Ethereum client"): - await newWeb3(g.ethClientUrl) + let ethRpc: Web3 = (await establishConnection(g)).valueOr: + return err("failed to connect to Ethereum clients: " & $error) var fetchedChainId: uint g.retryWrapper(fetchedChainId, "Failed to get the chain id"): @@ -544,9 +568,11 @@ method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.} proc onDisconnect() {.async.} = error "Ethereum client disconnected" - var newEthRpc: Web3 - g.retryWrapper(newEthRpc, "Failed to reconnect with the Ethereum client"): - await newWeb3(g.ethClientUrl) + + var newEthRpc: Web3 = (await g.establishConnection()).valueOr: + g.onFatalErrorAction("failed to connect to Ethereum clients onDisconnect") + return + newEthRpc.ondisconnect = ethRpc.ondisconnect g.ethRpc = some(newEthRpc) diff --git a/waku/waku_rln_relay/rln_relay.nim b/waku/waku_rln_relay/rln_relay.nim index a42b04f8b..20f5b7b24 100644 --- a/waku/waku_rln_relay/rln_relay.nim +++ b/waku/waku_rln_relay/rln_relay.nim @@ -43,7 +43,7 @@ type RlnRelayConf* = object of RootObj dynamic*: bool credIndex*: Option[uint] ethContractAddress*: string - ethClientAddress*: string + ethClientUrls*: seq[string] chainId*: uint creds*: Option[RlnRelayCreds] treePath*: string @@ -455,7 +455,7 @@ proc mount( (none(string), none(string)) groupManager = OnchainGroupManager( - ethClientUrl: string(conf.ethClientAddress), + ethClientUrls: conf.ethClientUrls, ethContractAddress: $conf.ethContractAddress, chainId: conf.chainId, rlnInstance: rlnInstance, From 094a68e41d40cd3f863e2748691bb3fce99d8fb6 Mon Sep 17 00:00:00 2001 From: NagyZoltanPeter <113987313+NagyZoltanPeter@users.noreply.github.com> Date: Mon, 12 May 2025 15:23:19 +0200 Subject: [PATCH 41/48] fix: addPeer could unintentionally override metadata of previously stored peer with defaults and empty (#3403) * fix: addPeer could unintentionally override metadata of previously stored peer with defaults and empty * Add explanation why we discard updates of different peerStore books. Co-authored-by: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> --------- Co-authored-by: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> --- waku/node/peer_manager/waku_peer_store.nim | 27 ++++++++++++++-------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/waku/node/peer_manager/waku_peer_store.nim b/waku/node/peer_manager/waku_peer_store.nim index 777e4f2be..ee339e858 100644 --- a/waku/node/peer_manager/waku_peer_store.nim +++ b/waku/node/peer_manager/waku_peer_store.nim @@ -100,16 +100,23 @@ proc addPeer*(peerStore: PeerStore, peer: RemotePeerInfo, origin = UnknownOrigin protos.add($new_proto) peerStore[ProtoBook][peer.peerId] = protos - peerStore[AgentBook][peer.peerId] = peer.agent - peerStore[ProtoVersionBook][peer.peerId] = peer.protoVersion - peerStore[KeyBook][peer.peerId] = peer.publicKey - peerStore[ConnectionBook][peer.peerId] = peer.connectedness - peerStore[DisconnectBook][peer.peerId] = peer.disconnectTime - peerStore[SourceBook][peer.peerId] = - if origin != UnknownOrigin: origin else: peer.origin - peerStore[DirectionBook][peer.peerId] = peer.direction - peerStore[LastFailedConnBook][peer.peerId] = peer.lastFailedConn - peerStore[NumberFailedConnBook][peer.peerId] = peer.numberFailedConn + ## We don't care whether the item was already present in the table or not. Hence, we always discard the hasKeyOrPut's bool returned value + discard peerStore[AgentBook].book.hasKeyOrPut(peer.peerId, peer.agent) + discard peerStore[ProtoVersionBook].book.hasKeyOrPut(peer.peerId, peer.protoVersion) + discard peerStore[KeyBook].book.hasKeyOrPut(peer.peerId, peer.publicKey) + + discard peerStore[ConnectionBook].book.hasKeyOrPut(peer.peerId, peer.connectedness) + discard peerStore[DisconnectBook].book.hasKeyOrPut(peer.peerId, peer.disconnectTime) + if origin != UnknownOrigin: + peerStore[SourceBook][peer.peerId] = origin + else: + discard peerStore[SourceBook].book.hasKeyOrPut(peer.peerId, peer.origin) + + discard peerStore[DirectionBook].book.hasKeyOrPut(peer.peerId, peer.direction) + discard + peerStore[LastFailedConnBook].book.hasKeyOrPut(peer.peerId, peer.lastFailedConn) + discard + peerStore[NumberFailedConnBook].book.hasKeyOrPut(peer.peerId, peer.numberFailedConn) if peer.enr.isSome(): peerStore[ENRBook][peer.peerId] = peer.enr.get() From b435b51c4ef8bfae8d045164456b0a4b4d7f8f22 Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Tue, 13 May 2025 09:13:28 +0200 Subject: [PATCH 42/48] chore: Enhance feedback on error cli (#3405) * better error detail * rm duplicated block --- waku/discovery/waku_discv5.nim | 16 ++++----- waku/factory/external_config.nim | 2 +- waku/factory/waku.nim | 6 ++-- waku/factory/waku_conf.nim | 34 ++++++++----------- .../group_manager/on_chain/group_manager.nim | 3 -- 5 files changed, 26 insertions(+), 35 deletions(-) diff --git a/waku/discovery/waku_discv5.nim b/waku/discovery/waku_discv5.nim index 0c57eb384..221acef42 100644 --- a/waku/discovery/waku_discv5.nim +++ b/waku/discovery/waku_discv5.nim @@ -134,13 +134,13 @@ proc updateENRShards( ): Result[void, string] = ## Add or remove shards from the Discv5 ENR let newShardOp = topicsToRelayShards(newTopics).valueOr: - return err("ENR update failed: " & error) + return err("ENR update failed topicsToRelayShards: " & error) let newShard = newShardOp.valueOr: return ok() let typedRecord = wd.protocol.localNode.record.toTyped().valueOr: - return err("ENR update failed: " & $error) + return err("ENR update failed toTyped: " & $error) let currentShardsOp = typedRecord.relaySharding() @@ -149,17 +149,17 @@ proc updateENRShards( let currentShard = currentShardsOp.get() if currentShard.clusterId != newShard.clusterId: - return err("ENR update failed: clusterId id mismatch") + return err("ENR update failed: clusterId id mismatch in add") RelayShards.init( currentShard.clusterId, currentShard.shardIds & newShard.shardIds ).valueOr: - return err("ENR update failed: " & error) + return err("ENR update failed RelayShards.init in add: " & error) elif not add and currentShardsOp.isSome(): let currentShard = currentShardsOp.get() if currentShard.clusterId != newShard.clusterId: - return err("ENR update failed: clusterId id mismatch") + return err("ENR update failed: clusterId id mismatch in not add") let currentSet = toHashSet(currentShard.shardIds) let newSet = toHashSet(newShard.shardIds) @@ -170,7 +170,7 @@ proc updateENRShards( return err("ENR update failed: cannot remove all shards") RelayShards.init(currentShard.clusterId, indices).valueOr: - return err("ENR update failed: " & error) + return err("ENR update failed RelayShards.init in not add: " & error) elif add and currentShardsOp.isNone(): newShard else: @@ -181,12 +181,12 @@ proc updateENRShards( (ShardingBitVectorEnrField, resultShard.toBitVector()) else: let list = resultShard.toIndicesList().valueOr: - return err("ENR update failed: " & $error) + return err("ENR update failed toIndicesList: " & $error) (ShardingIndicesListEnrField, list) wd.protocol.updateRecord([(field, value)]).isOkOr: - return err("ENR update failed: " & $error) + return err("ENR update failed updateRecord: " & $error) return ok() diff --git a/waku/factory/external_config.nim b/waku/factory/external_config.nim index ba0785f01..800b61e63 100644 --- a/waku/factory/external_config.nim +++ b/waku/factory/external_config.nim @@ -70,7 +70,7 @@ type WakuNodeConf* = object .}: logging.LogFormat rlnRelayCredPath* {. - desc: "The path for peristing rln-relay credential", + desc: "The path for persisting rln-relay credential", defaultValue: "", name: "rln-relay-cred-path" .}: string diff --git a/waku/factory/waku.nim b/waku/factory/waku.nim index 55d311963..01dc7a36f 100644 --- a/waku/factory/waku.nim +++ b/waku/factory/waku.nim @@ -177,7 +177,7 @@ proc new*( var deliveryMonitor: DeliveryMonitor if wakuConf.p2pReliability: if wakuConf.remoteStoreNode.isNone(): - return err("A remoteStoreNode should be set when reliability mode is on") + return err("A storenode should be set when reliability mode is on") let deliveryMonitorRes = DeliveryMonitor.new( node.wakuStoreClient, node.wakuRelay, node.wakuLightpushClient, @@ -222,7 +222,7 @@ proc getPorts( proc getRunningNetConfig(waku: ptr Waku): Result[NetConfig, string] = var conf = waku[].conf let (tcpPort, websocketPort) = getPorts(waku[].node.switch.peerInfo.listenAddrs).valueOr: - return err("Could not retrieve ports " & error) + return err("Could not retrieve ports: " & error) if tcpPort.isSome(): conf.networkConf.p2pTcpPort = tcpPort.get() @@ -246,7 +246,7 @@ proc updateEnr(waku: ptr Waku): Result[void, string] = return err("ENR setup failed: " & error) if isClusterMismatched(record, waku[].conf.clusterId): - return err("cluster id mismatch configured shards") + return err("cluster-id mismatch configured shards") waku[].node.enr = record diff --git a/waku/factory/waku_conf.nim b/waku/factory/waku_conf.nim index 9f5160135..94b89a26e 100644 --- a/waku/factory/waku_conf.nim +++ b/waku/factory/waku_conf.nim @@ -164,7 +164,7 @@ proc logConf*(conf: WakuConf) = proc validateNodeKey(wakuConf: WakuConf): Result[void, string] = wakuConf.nodeKey.getPublicKey().isOkOr: - return err("Node key is invalid") + return err("nodekey param is invalid") return ok() proc validateShards(wakuConf: WakuConf): Result[void, string] = @@ -187,58 +187,52 @@ proc validateShards(wakuConf: WakuConf): Result[void, string] = proc validateNoEmptyStrings(wakuConf: WakuConf): Result[void, string] = if wakuConf.networkConf.dns4DomainName.isSome() and isEmptyOrWhiteSpace(wakuConf.networkConf.dns4DomainName.get().string): - return err("dns4DomainName is an empty string, set it to none(string) instead") + return err("dns4-domain-name is an empty string, set it to none(string) instead") if isEmptyOrWhiteSpace(wakuConf.relayServiceRatio): - return err("relayServiceRatio is an empty string") + return err("relay-service-ratio is an empty string") for sn in wakuConf.staticNodes: if isEmptyOrWhiteSpace(sn): - return err("staticNodes contain an empty string") + return err("staticnode contain an empty string") if wakuConf.remoteStoreNode.isSome() and isEmptyOrWhiteSpace(wakuConf.remoteStoreNode.get()): - return err("remoteStoreNode is an empty string, set it to none(string) instead") + return err("storenode is an empty string, set it to none(string) instead") if wakuConf.remoteLightPushNode.isSome() and isEmptyOrWhiteSpace(wakuConf.remoteLightPushNode.get()): - return err("remoteLightPushNode is an empty string, set it to none(string) instead") + return err("lightpushnode is an empty string, set it to none(string) instead") if wakuConf.remotePeerExchangeNode.isSome() and isEmptyOrWhiteSpace(wakuConf.remotePeerExchangeNode.get()): - return - err("remotePeerExchangeNode is an empty string, set it to none(string) instead") + return err("peer-exchange-node is an empty string, set it to none(string) instead") if wakuConf.remoteFilterNode.isSome() and isEmptyOrWhiteSpace(wakuConf.remoteFilterNode.get()): - return - err("remotePeerExchangeNode is an empty string, set it to none(string) instead") + return err("filternode is an empty string, set it to none(string) instead") if wakuConf.dnsDiscoveryConf.isSome() and isEmptyOrWhiteSpace(wakuConf.dnsDiscoveryConf.get().enrTreeUrl): - return err("dnsDiscoveryConf.enrTreeUrl is an empty string") + return err("dns-discovery-url is an empty string") # TODO: rln relay config should validate itself if wakuConf.rlnRelayConf.isSome(): let rlnRelayConf = wakuConf.rlnRelayConf.get() if isEmptyOrWhiteSpace(rlnRelayConf.treePath): - return err("rlnRelayConf.treepath is an empty string") + return err("rln-relay-tree-path is an empty string") if rlnRelayConf.ethClientUrls.len == 0: - return err("rlnRelayConf.ethClientUrls is empty") + return err("rln-relay-eth-client-address is empty") if isEmptyOrWhiteSpace(rlnRelayConf.ethContractAddress): - return err("rlnRelayConf.ethContractAddress is an empty string") + return err("rln-relay-eth-contract-address is an empty string") if rlnRelayConf.creds.isSome(): let creds = rlnRelayConf.creds.get() if isEmptyOrWhiteSpace(creds.path): - return err ( - "rlnRelayConf.creds.path is an empty string, set rlnRelayConf.creds it to none instead" - ) + return err ("rln-relay-cred-path is an empty string") if isEmptyOrWhiteSpace(creds.password): - return err ( - "rlnRelayConf.creds.password is an empty string, set rlnRelayConf.creds to none instead" - ) + return err ("rln-relay-cred-password is an empty string") return ok() diff --git a/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim b/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim index 61c9948ee..600291ecf 100644 --- a/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim +++ b/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim @@ -335,9 +335,6 @@ method generateProof*( let message_id = uint64ToField(messageId) var path_elements = newSeq[byte](0) - if (g.merkleProofCache.len mod 32) != 0: - return err("Invalid merkle proof cache length") - let identity_path_index = uint64ToIndex(g.membershipIndex.get(), 20) for i in 0 ..< g.merkleProofCache.len div 32: let chunk = g.merkleProofCache[i * 32 .. (i + 1) * 32 - 1] From 2926542fcdcbb7d3cce4bcb98d726a4881ffa5c5 Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Wed, 14 May 2025 11:05:02 +0200 Subject: [PATCH 43/48] simplify libwaku error returns (#3399) --- .../requests/discovery_request.nim | 2 +- .../requests/node_lifecycle_request.nim | 6 +++--- .../requests/protocols/lightpush_request.nim | 2 +- .../requests/protocols/relay_request.nim | 19 ++++++++----------- 4 files changed, 13 insertions(+), 16 deletions(-) diff --git a/library/waku_thread/inter_thread_communication/requests/discovery_request.nim b/library/waku_thread/inter_thread_communication/requests/discovery_request.nim index 078a43030..4eb193728 100644 --- a/library/waku_thread/inter_thread_communication/requests/discovery_request.nim +++ b/library/waku_thread/inter_thread_communication/requests/discovery_request.nim @@ -143,7 +143,7 @@ proc process*( of PEER_EXCHANGE: let numValidPeers = (await performPeerExchangeRequestTo(self[].numPeers, waku)).valueOr: error "PEER_EXCHANGE failed", error = error - return err("error calling performPeerExchangeRequestTo: " & $error) + return err($error) return ok($numValidPeers) error "discovery request not handled" diff --git a/library/waku_thread/inter_thread_communication/requests/node_lifecycle_request.nim b/library/waku_thread/inter_thread_communication/requests/node_lifecycle_request.nim index 8a874b681..8d504df89 100644 --- a/library/waku_thread/inter_thread_communication/requests/node_lifecycle_request.nim +++ b/library/waku_thread/inter_thread_communication/requests/node_lifecycle_request.nim @@ -92,16 +92,16 @@ proc process*( of CREATE_NODE: waku[] = (await createWaku(self.configJson, self.appCallbacks)).valueOr: error "CREATE_NODE failed", error = error - return err("error processing createWaku request: " & $error) + return err($error) of START_NODE: (await waku.startWaku()).isOkOr: error "START_NODE failed", error = error - return err("problem starting waku: " & $error) + return err($error) of STOP_NODE: try: await waku[].stop() except Exception: error "STOP_NODE failed", error = getCurrentExceptionMsg() - return err("exception stopping node: " & getCurrentExceptionMsg()) + return err(getCurrentExceptionMsg()) return ok("") diff --git a/library/waku_thread/inter_thread_communication/requests/protocols/lightpush_request.nim b/library/waku_thread/inter_thread_communication/requests/protocols/lightpush_request.nim index e7006ad06..f167cd239 100644 --- a/library/waku_thread/inter_thread_communication/requests/protocols/lightpush_request.nim +++ b/library/waku_thread/inter_thread_communication/requests/protocols/lightpush_request.nim @@ -104,6 +104,6 @@ proc process*( ) ).valueOr: error "PUBLISH failed", error = error - return err("LightpushRequest error publishing: " & $error) + return err($error) return ok(msgHashHex) diff --git a/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim b/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim index 6a437122a..c2f002c44 100644 --- a/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim +++ b/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim @@ -113,28 +113,25 @@ proc process*( (kind: SubscriptionKind.PubsubSub, topic: $self.pubsubTopic), handler = some(self.relayEventCallback), ).isOkOr: - let errorMsg = "Subscribe failed:" & $error - error "SUBSCRIBE failed", error = errorMsg - return err(errorMsg) + error "SUBSCRIBE failed", error + return err($error) of UNSUBSCRIBE: waku.node.unsubscribe((kind: SubscriptionKind.PubsubSub, topic: $self.pubsubTopic)).isOkOr: - let errorMsg = "Unsubscribe failed:" & $error - error "UNSUBSCRIBE failed", error = errorMsg - return err(errorMsg) + error "UNSUBSCRIBE failed", error + return err($error) of PUBLISH: let msg = self.message.toWakuMessage() let pubsubTopic = $self.pubsubTopic (await waku.node.wakuRelay.publish(pubsubTopic, msg)).isOkOr: - let errorMsg = "Message not sent." & $error - error "PUBLISH failed", error = errorMsg - return err(errorMsg) + error "PUBLISH failed", error + return err($error) let msgHash = computeMessageHash(pubSubTopic, msg).to0xHex return ok(msgHash) of NUM_CONNECTED_PEERS: let numConnPeers = waku.node.wakuRelay.getNumConnectedPeers($self.pubsubTopic).valueOr: - error "NUM_CONNECTED_PEERS failed", error = error + error "NUM_CONNECTED_PEERS failed", error return err($error) return ok($numConnPeers) of LIST_CONNECTED_PEERS: @@ -164,5 +161,5 @@ proc process*( @[protectedShard], uint16(self.clusterId) ) except ValueError: - return err("ADD_PROTECTED_SHARD exception: " & getCurrentExceptionMsg()) + return err(getCurrentExceptionMsg()) return ok("") From e321774e9196444fbdcbf7b4b89235c4666cb632 Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Wed, 14 May 2025 23:38:26 +0200 Subject: [PATCH 44/48] properly pass userMessageLimit to OnchainGroupManager (#3407) --- waku/waku_rln_relay/rln_relay.nim | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/waku/waku_rln_relay/rln_relay.nim b/waku/waku_rln_relay/rln_relay.nim index 20f5b7b24..5dae3bd51 100644 --- a/waku/waku_rln_relay/rln_relay.nim +++ b/waku/waku_rln_relay/rln_relay.nim @@ -414,9 +414,12 @@ proc generateRlnValidator*( proc monitorEpochs(wakuRlnRelay: WakuRLNRelay) {.async.} = while true: try: - waku_rln_remaining_proofs_per_epoch.set( - wakuRlnRelay.groupManager.userMessageLimit.get().float64 - ) + if wakuRlnRelay.groupManager.userMessageLimit.isSome(): + waku_rln_remaining_proofs_per_epoch.set( + wakuRlnRelay.groupManager.userMessageLimit.get().float64 + ) + else: + error "userMessageLimit is not set in monitorEpochs" except CatchableError: error "Error in epoch monitoring", error = getCurrentExceptionMsg() @@ -455,6 +458,7 @@ proc mount( (none(string), none(string)) groupManager = OnchainGroupManager( + userMessageLimit: some(conf.userMessageLimit), ethClientUrls: conf.ethClientUrls, ethContractAddress: $conf.ethContractAddress, chainId: conf.chainId, From 3aab1b83e485dbe8f15ccab7e21e83da050fe5a7 Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Fri, 16 May 2025 12:51:49 +0200 Subject: [PATCH 45/48] Update Dockerfile rust image (#3413) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 84e457767..8a1a743c9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # BUILD NIM APP ---------------------------------------------------------------- -FROM rust:1.77.1-alpine3.18 AS nim-build +FROM rust:1.81.0-alpine3.19 AS nim-build ARG NIMFLAGS ARG MAKE_TARGET=wakunode2 From d5063e7d89852682b133bdea207a1b3517fb1b0c Mon Sep 17 00:00:00 2001 From: NagyZoltanPeter <113987313+NagyZoltanPeter@users.noreply.github.com> Date: Wed, 21 May 2025 13:00:12 +0200 Subject: [PATCH 46/48] fix: enabling WebSocket connection also in case only websocket-secure-support enabled (#3417) * Enabling WebSocket connection also in case only websocket-secure-support is enabled --- waku/factory/conf_builder/web_socket_conf_builder.nim | 2 ++ 1 file changed, 2 insertions(+) diff --git a/waku/factory/conf_builder/web_socket_conf_builder.nim b/waku/factory/conf_builder/web_socket_conf_builder.nim index b091e2d1e..25ff6461d 100644 --- a/waku/factory/conf_builder/web_socket_conf_builder.nim +++ b/waku/factory/conf_builder/web_socket_conf_builder.nim @@ -22,6 +22,8 @@ proc withEnabled*(b: var WebSocketConfBuilder, enabled: bool) = proc withSecureEnabled*(b: var WebSocketConfBuilder, secureEnabled: bool) = b.secureEnabled = some(secureEnabled) + if b.secureEnabled.get(): + b.enabled = some(true) # ws must be enabled to use wss proc withWebSocketPort*(b: var WebSocketConfBuilder, webSocketPort: Port) = b.webSocketPort = some(webSocketPort) From 3bb40d48e38e5a3cb59022631f88dc2f65b6b658 Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Wed, 21 May 2025 22:13:05 +0200 Subject: [PATCH 47/48] wakucanary maintenance (#3415) - Add more possible protocols to monitor - Simplify protocol validation simple algorithm - Properly pass the shard CLI parameter to the ENR info - Mount metadata protocol - Properly use of quit(QuitFailure) --- apps/wakucanary/wakucanary.nim | 56 +++++++++++++++++++++++++--------- 1 file changed, 42 insertions(+), 14 deletions(-) diff --git a/apps/wakucanary/wakucanary.nim b/apps/wakucanary/wakucanary.nim index eec889eac..3565c619f 100644 --- a/apps/wakucanary/wakucanary.nim +++ b/apps/wakucanary/wakucanary.nim @@ -1,5 +1,5 @@ import - std/[strutils, sequtils, tables], + std/[strutils, sequtils, tables, strformat], confutils, chronos, stew/shims/net, @@ -21,6 +21,14 @@ const ProtocolsTable = { "relay": "/vac/waku/relay/", "lightpush": "/vac/waku/lightpush/", "filter": "/vac/waku/filter-subscribe/2", + "filter-push": "/vac/waku/filter-push/", + "ipfs-id": "/ipfs/id/", + "autonat": "/libp2p/autonat/", + "circuit-relay": "/libp2p/circuit/relay/", + "metadata": "/vac/waku/metadata/", + "rendezvous": "/rendezvous/", + "ipfs-ping": "/ipfs/ping/", + "peer-exchange": "/vac/waku/peer-exchange/", }.toTable const WebSocketPortOffset = 1000 @@ -105,21 +113,30 @@ proc parseCmdArg*(T: type chronos.Duration, p: string): T = proc completeCmdArg*(T: type chronos.Duration, val: string): seq[string] = return @[] -# checks if rawProtocols (skipping version) are supported in nodeProtocols proc areProtocolsSupported( - rawProtocols: seq[string], nodeProtocols: seq[string] + toValidateProtocols: seq[string], nodeProtocols: seq[string] ): bool = + ## Checks if all toValidateProtocols are contained in nodeProtocols. + ## nodeProtocols contains the full list of protocols currently informed by the node under analysis. + ## toValidateProtocols contains the protocols, without version number, that we want to check if they are supported by the node. var numOfSupportedProt: int = 0 - for nodeProtocol in nodeProtocols: - for rawProtocol in rawProtocols: - let protocolTag = ProtocolsTable[rawProtocol] + for rawProtocol in toValidateProtocols: + let protocolTag = ProtocolsTable[rawProtocol] + debug "Checking if protocol is supported", expected_protocol_tag = protocolTag + + var protocolSupported = false + for nodeProtocol in nodeProtocols: if nodeProtocol.startsWith(protocolTag): - info "Supported protocol ok", expected = protocolTag, supported = nodeProtocol + info "The node supports the protocol", supported_protocol = nodeProtocol numOfSupportedProt += 1 + protocolSupported = true break - if numOfSupportedProt == rawProtocols.len: + if not protocolSupported: + error "The node does not support the protocol", expected_protocol = protocolTag + + if numOfSupportedProt == toValidateProtocols.len: return true return false @@ -167,7 +184,7 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} = let peerRes = parsePeerInfo(conf.address) if peerRes.isErr(): error "Couldn't parse 'conf.address'", error = peerRes.error - return 1 + quit(QuitFailure) let peer = peerRes.value @@ -202,6 +219,12 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} = var enrBuilder = EnrBuilder.init(nodeKey) + enrBuilder.withWakuRelaySharding( + RelayShards(clusterId: conf.clusterId, shardIds: conf.shards) + ).isOkOr: + error "could not initialize ENR with shards", error + quit(QuitFailure) + let recordRes = enrBuilder.build() let record = if recordRes.isErr(): @@ -217,7 +240,7 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} = createDir(CertsDirectory) if generateSelfSignedCertificate(certPath, keyPath) != 0: error "Error generating key and certificate" - return 1 + quit(QuitFailure) builder.withRecord(record) builder.withNetworkConfiguration(netConfig.tryGet()) @@ -232,7 +255,11 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} = await mountLibp2pPing(node) except CatchableError: error "failed to mount libp2p ping protocol: " & getCurrentExceptionMsg() - return 1 + quit(QuitFailure) + + node.mountMetadata(conf.clusterId).isOkOr: + error "failed to mount metadata protocol", error + quit(QuitFailure) await node.start() @@ -243,7 +270,7 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} = let timedOut = not await node.connectToNodes(@[peer]).withTimeout(conf.timeout) if timedOut: error "Timedout after", timeout = conf.timeout - return 1 + quit(QuitFailure) let lp2pPeerStore = node.switch.peerStore let conStatus = node.peerManager.switch.peerStore[ConnectionBook][peer.peerId] @@ -253,13 +280,14 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} = if conStatus in [Connected, CanConnect]: let nodeProtocols = lp2pPeerStore[ProtoBook][peer.peerId] + if not areProtocolsSupported(conf.protocols, nodeProtocols): error "Not all protocols are supported", expected = conf.protocols, supported = nodeProtocols - return 1 + quit(QuitFailure) elif conStatus == CannotConnect: error "Could not connect", peerId = peer.peerId - return 1 + quit(QuitFailure) return 0 when isMainModule: From e4a4313d8292a72864736f0e5d43e8b8fe239489 Mon Sep 17 00:00:00 2001 From: markoburcul Date: Fri, 23 May 2025 09:05:22 +0200 Subject: [PATCH 48/48] nix: package outputs of build in .aar file Add nix `result` folder to gitignore also. Referenced issue: * https://github.com/waku-org/nwaku/issues/3232 --- .gitignore | 3 +++ flake.nix | 1 + nix/default.nix | 9 +++++++-- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index 69106b9df..7430c3e99 100644 --- a/.gitignore +++ b/.gitignore @@ -76,3 +76,6 @@ coverage_html_report/ .qmake.stash main-qt waku_handler.moc.cpp + +# Nix build result +result diff --git a/flake.nix b/flake.nix index 419c1d6f7..760f49337 100644 --- a/flake.nix +++ b/flake.nix @@ -51,6 +51,7 @@ src = self; targets = ["libwaku-android-arm64"]; androidArch = "aarch64-linux-android"; + abidir = "arm64-v8a"; zerokitPkg = zerokit.packages.${system}.zerokit-android-arm64; }; default = libwaku-android-arm64; diff --git a/nix/default.nix b/nix/default.nix index a9d31b46d..29eec844d 100644 --- a/nix/default.nix +++ b/nix/default.nix @@ -10,6 +10,7 @@ "x86_64-linux" "aarch64-linux" ], androidArch, + abidir, zerokitPkg, }: @@ -32,6 +33,7 @@ in stdenv.mkDerivation rec { buildInputs = with pkgs; [ openssl gmp + zip ]; # Dependencies that should only exist in the build environment. @@ -62,6 +64,7 @@ in stdenv.mkDerivation rec { ANDROID_NDK_HOME="${pkgs.androidPkgs.ndk}"; NIMFLAGS = "-d:disableMarchNative -d:git_revision_override=${revision}"; XDG_CACHE_HOME = "/tmp"; + androidManifest = ""; makeFlags = targets ++ [ "V=${toString verbosity}" @@ -98,8 +101,10 @@ in stdenv.mkDerivation rec { ''; installPhase = '' - mkdir -p $out/build/android - cp -r ./build/android/* $out/build/android/ + mkdir -p $out/jni + cp -r ./build/android/${abidir}/* $out/jni/ + echo '${androidManifest}' > $out/jni/AndroidManifest.xml + cd $out && zip -r libwaku.aar * ''; meta = with pkgs.lib; {