diff --git a/.github/ISSUE_TEMPLATE/bump_dependencies.md b/.github/ISSUE_TEMPLATE/bump_dependencies.md index 6c1e777a1..0413cbfd2 100644 --- a/.github/ISSUE_TEMPLATE/bump_dependencies.md +++ b/.github/ISSUE_TEMPLATE/bump_dependencies.md @@ -12,7 +12,6 @@ assignees: '' Update `nwaku` "vendor" dependencies. ### Items to bump -- [ ] negentropy - [ ] dnsclient.nim ( update to the latest tag version ) - [ ] nim-bearssl - [ ] nimbus-build-system @@ -38,12 +37,12 @@ Update `nwaku` "vendor" dependencies. - [ ] nim-sqlite3-abi ( update to the latest tag version ) - [ ] nim-stew - [ ] nim-stint -- [ ] nim-taskpools -- [ ] nim-testutils +- [ ] nim-taskpools ( update to the latest tag version ) +- [ ] nim-testutils ( update to the latest tag version ) - [ ] nim-toml-serialization - [ ] nim-unicodedb -- [ ] nim-unittest2 -- [ ] nim-web3 -- [ ] nim-websock +- [ ] nim-unittest2 ( update to the latest tag version ) +- [ ] nim-web3 ( update to the latest tag version ) +- [ ] nim-websock ( update to the latest tag version ) - [ ] nim-zlib -- [ ] zerokit ( this should be kept in version `v0.5.1` ) +- [ ] zerokit ( this should be kept in version `v0.7.0` ) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9d6a313e3..30708ead9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -119,12 +119,13 @@ jobs: sudo docker run --rm -d -e POSTGRES_PASSWORD=test123 -p 5432:5432 postgres:15.4-alpine3.18 postgres_enabled=1 fi - + export MAKEFLAGS="-j1" export NIMFLAGS="--colors:off -d:chronicles_colors:none" export USE_LIBBACKTRACE=0 - make V=1 LOG_LEVEL=DEBUG QUICK_AND_DIRTY_COMPILER=1 POSTGRES=$postgres_enabled test testwakunode2 + make V=1 LOG_LEVEL=DEBUG QUICK_AND_DIRTY_COMPILER=1 POSTGRES=$postgres_enabled test + make V=1 LOG_LEVEL=DEBUG QUICK_AND_DIRTY_COMPILER=1 POSTGRES=$postgres_enabled testwakunode2 build-docker-image: needs: changes @@ -140,14 +141,12 @@ jobs: secrets: inherit - js-waku-node: needs: build-docker-image uses: waku-org/js-waku/.github/workflows/test-node.yml@master with: nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }} test_type: node - debug: waku* js-waku-node-optional: needs: build-docker-image @@ -155,7 +154,6 @@ jobs: with: nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }} test_type: node-optional - debug: waku* lint: name: "Lint" diff --git a/.github/workflows/windows-build.yml b/.github/workflows/windows-build.yml index 3ac6ce15d..52cd7f91a 100644 --- a/.github/workflows/windows-build.yml +++ b/.github/workflows/windows-build.yml @@ -68,28 +68,6 @@ jobs: ./build_all.bat cd ../../../.. - - name: Building libunwind - run: | - cd vendor/nim-libbacktrace - mkdir -p vendor/libunwind/build - pushd vendor/libunwind - - cmake -S runtimes \ - -DLLVM_ENABLE_RUNTIMES="libunwind" \ - -DLIBUNWIND_ENABLE_SHARED=OFF -DLIBUNWIND_ENABLE_STATIC=ON \ - -DLIBUNWIND_INCLUDE_DOCS=OFF -DLIBUNWIND_INSTALL_HEADERS=ON \ - -DCMAKE_INSTALL_PREFIX="$(pwd)/../install/usr" \ - -G "MinGW Makefiles" -B build - - cd build - mingw32-make VERBOSE=1 clean - mingw32-make VERBOSE=1 unwind_static - mingw32-make VERBOSE=1 install-unwind - - popd - mkdir -p install/usr/lib - cp -r vendor/libunwind/build/lib/libunwind.a install/usr/lib/ - - name: Building miniupnpc run: | cd vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc @@ -103,12 +81,13 @@ jobs: make CC="gcc -fPIC -D_WIN32_WINNT=0x0600 -DNATPMP_STATICLIB" libnatpmp.a V=1 cd ../../../../ - - name: Building wakunode2 + - name: Building wakunode2.exe run: | - cd vendor/nim-libbacktrace - cp ./vendor/libunwind/build/lib/libunwind.a install/usr/lib - cd ../.. make wakunode2 LOG_LEVEL=DEBUG V=3 -j8 + + - name: Building libwaku.dll + run: | + make libwaku STATIC=0 LOG_LEVEL=DEBUG V=1 -j - name: Check Executable run: | @@ -118,3 +97,9 @@ jobs: echo "Build failed: wakunode2.exe not found" exit 1 fi + if [ -f "./build/libwaku.dll" ]; then + echo "libwaku.dll build successful" + else + echo "Build failed: libwaku.dll not found" + exit 1 + fi diff --git a/CHANGELOG.md b/CHANGELOG.md index 151392f1b..c7b0369b1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,10 @@ **Info:** before upgrading to this version, make sure you delete the previous rln_tree folder, i.e., the one that is passed through this CLI: `--rln-relay-tree-path`. +### Features +* lightpush v3 ([#3279](https://github.com/waku-org/nwaku/pull/3279)) ([e0b563ff](https://github.com/waku-org/nwaku/commit/e0b563ffe5af20bd26d37cd9b4eb9ed9eb82ff80)) + Upgrade for Waku Llightpush protocol with enhanced error handling. Read specification [here](https://github.com/waku-org/specs/blob/master/standards/core/lightpush.md) + This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): | Protocol | Spec status | Protocol id | | ---: | :---: | :--- | @@ -14,6 +18,7 @@ This release supports the following [libp2p protocols](https://docs.libp2p.io/co | [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | | [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | | [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | +| [`WAKU2-LIGHTPUSH v3`](https://github.com/waku-org/specs/blob/master/standards/core/lightpush.md) | `draft` | `/vac/waku/lightpush/3.0.0` | | [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` | | [`WAKU-SYNC`](https://github.com/waku-org/specs/blob/feat--waku-sync/standards/core/sync.md) | `draft` | `/vac/waku/sync/1.0.0` | diff --git a/Dockerfile b/Dockerfile index 8a1a743c9..b1f6b3c6a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -78,7 +78,7 @@ RUN make -j$(nproc) # Debug image -FROM prod AS debug +FROM prod AS debug-with-heaptrack RUN apk add --no-cache gdb libunwind diff --git a/Makefile b/Makefile index d15668673..e41b4207a 100644 --- a/Makefile +++ b/Makefile @@ -53,7 +53,19 @@ endif # default target, because it's the first one that doesn't start with '.' all: | wakunode2 example2 chat2 chat2bridge libwaku -test: | testcommon testwaku +TEST_FILE := $(word 2,$(MAKECMDGOALS)) +TEST_NAME := $(wordlist 3,$(words $(MAKECMDGOALS)),$(MAKECMDGOALS)) + +test: +ifeq ($(strip $(TEST_FILE)),) + $(MAKE) testcommon + $(MAKE) testwaku +else + $(MAKE) compile-test $(TEST_FILE) $(TEST_NAME) +endif +# this prevents make from erroring on unknown targets like "Index" +%: + @true waku.nims: ln -s waku.nimble $@ @@ -82,15 +94,18 @@ HEAPTRACKER ?= 0 HEAPTRACKER_INJECT ?= 0 ifeq ($(HEAPTRACKER), 1) # Needed to make nimbus-build-system use the Nim's 'heaptrack_support' branch -DOCKER_NIM_COMMIT := NIM_COMMIT=heaptrack_support -TARGET := heaptrack-build +DOCKER_NIM_COMMIT := NIM_COMMIT=heaptrack_support_v2.0.12 +TARGET := debug-with-heaptrack +NIM_COMMIT := heaptrack_support_v2.0.12 ifeq ($(HEAPTRACKER_INJECT), 1) # the Nim compiler will load 'libheaptrack_inject.so' HEAPTRACK_PARAMS := -d:heaptracker -d:heaptracker_inject +NIM_PARAMS := $(NIM_PARAMS) -d:heaptracker -d:heaptracker_inject else # the Nim compiler will load 'libheaptrack_preload.so' HEAPTRACK_PARAMS := -d:heaptracker +NIM_PARAMS := $(NIM_PARAMS) -d:heaptracker endif endif @@ -109,11 +124,8 @@ ifeq (, $(shell which cargo)) curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain stable endif -anvil: rustup -ifeq (, $(shell which anvil 2> /dev/null)) -# Install Anvil if it's not installed - ./scripts/install_anvil.sh -endif +rln-deps: rustup + ./scripts/install_rln_tests_dependencies.sh deps: | deps-common nat-libs waku.nims @@ -202,13 +214,14 @@ testcommon: | build deps ########## .PHONY: testwaku wakunode2 testwakunode2 example2 chat2 chat2bridge liteprotocoltester -# install anvil only for the testwaku target -testwaku: | build deps anvil librln +# install rln-deps only for the testwaku target +testwaku: | build deps rln-deps librln echo -e $(BUILD_MSG) "build/$@" && \ $(ENV_SCRIPT) nim test -d:os=$(shell uname) $(NIM_PARAMS) waku.nims wakunode2: | build deps librln echo -e $(BUILD_MSG) "build/$@" && \ + \ $(ENV_SCRIPT) nim wakunode2 $(NIM_PARAMS) waku.nims benchmarks: | build deps librln @@ -243,9 +256,10 @@ build/%: | build deps librln echo -e $(BUILD_MSG) "build/$*" && \ $(ENV_SCRIPT) nim buildone $(NIM_PARAMS) waku.nims $* -test/%: | build deps librln - echo -e $(BUILD_MSG) "test/$*" && \ - $(ENV_SCRIPT) nim testone $(NIM_PARAMS) waku.nims $* +compile-test: | build deps librln + echo -e $(BUILD_MSG) "$(TEST_FILE)" && \ + $(ENV_SCRIPT) nim buildTest $(NIM_PARAMS) waku.nims $(TEST_FILE) && \ + $(ENV_SCRIPT) nim execTest $(NIM_PARAMS) waku.nims $(TEST_FILE) "$(TEST_NAME)" ################ ## Waku tools ## @@ -343,12 +357,12 @@ docker-image: docker-quick-image: MAKE_TARGET ?= wakunode2 docker-quick-image: DOCKER_IMAGE_TAG ?= $(MAKE_TARGET)-$(GIT_VERSION) docker-quick-image: DOCKER_IMAGE_NAME ?= wakuorg/nwaku:$(DOCKER_IMAGE_TAG) -docker-quick-image: NIM_PARAMS := $(NIM_PARAMS) -d:chronicles_colors:none -d:insecure -d:postgres --passL:$(LIBRLN_FILE) --passL:-lm docker-quick-image: | build deps librln wakunode2 docker build \ --build-arg="MAKE_TARGET=$(MAKE_TARGET)" \ --tag $(DOCKER_IMAGE_NAME) \ - --file docker/binaries/Dockerfile.bn.amd64 \ + --target $(TARGET) \ + --file docker/binaries/Dockerfile.bn.local \ . docker-push: @@ -397,14 +411,16 @@ docker-liteprotocoltester-push: STATIC ?= 0 + libwaku: | build deps librln - rm -f build/libwaku* + rm -f build/libwaku* + ifeq ($(STATIC), 1) - echo -e $(BUILD_MSG) "build/$@.a" && \ - $(ENV_SCRIPT) nim libwakuStatic $(NIM_PARAMS) waku.nims + echo -e $(BUILD_MSG) "build/$@.a" && $(ENV_SCRIPT) nim libwakuStatic $(NIM_PARAMS) waku.nims +else ifeq ($(detected_OS),Windows) + echo -e $(BUILD_MSG) "build/$@.dll" && $(ENV_SCRIPT) nim libwakuDynamic $(NIM_PARAMS) waku.nims else - echo -e $(BUILD_MSG) "build/$@.so" && \ - $(ENV_SCRIPT) nim libwakuDynamic $(NIM_PARAMS) waku.nims + echo -e $(BUILD_MSG) "build/$@.so" && $(ENV_SCRIPT) nim libwakuDynamic $(NIM_PARAMS) waku.nims endif ##################### diff --git a/README.md b/README.md index 057d0b622..119c00052 100644 --- a/README.md +++ b/README.md @@ -87,7 +87,7 @@ pacman -S --noconfirm --needed mingw-w64-x86_64-python #### 3. Build Wakunode - Open Git Bash as administrator - clone nwaku and cd nwaku -- Execute: `./scripts/build_wakunode_windows.sh` +- Execute: `./scripts/build_windows.sh` #### 4. Troubleshooting If `wakunode2.exe` isn't generated: diff --git a/apps/chat2/chat2.nim b/apps/chat2/chat2.nim index 127a761c0..d18d35674 100644 --- a/apps/chat2/chat2.nim +++ b/apps/chat2/chat2.nim @@ -11,7 +11,6 @@ import confutils, chronicles, chronos, - stew/shims/net as stewNet, eth/keys, bearssl, stew/[byteutils, results], @@ -381,7 +380,7 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = if conf.relay: let shards = conf.shards.mapIt(RelayShard(clusterId: conf.clusterId, shardId: uint16(it))) - (await node.mountRelay(shards)).isOkOr: + (await node.mountRelay()).isOkOr: echo "failed to mount relay: " & error return @@ -536,7 +535,7 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = chat.printReceivedMessage(msg) node.subscribe( - (kind: PubsubSub, topic: DefaultPubsubTopic), some(WakuRelayHandler(handler)) + (kind: PubsubSub, topic: DefaultPubsubTopic), WakuRelayHandler(handler) ).isOkOr: error "failed to subscribe to pubsub topic", topic = DefaultPubsubTopic, error = error @@ -559,7 +558,7 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = let rlnConf = WakuRlnConfig( dynamic: conf.rlnRelayDynamic, credIndex: conf.rlnRelayCredIndex, - chainId: conf.rlnRelayChainId, + chainId: UInt256.fromBytesBE(conf.rlnRelayChainId.toBytesBE()), ethClientUrls: conf.ethClientUrls.mapIt(string(it)), creds: some( RlnRelayCreds( @@ -591,9 +590,6 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = await chat.readWriteLoop() - if conf.keepAlive: - node.startKeepalive() - runForever() proc main(rng: ref HmacDrbgContext) {.async.} = diff --git a/apps/chat2bridge/chat2bridge.nim b/apps/chat2bridge/chat2bridge.nim index 7a7a5d08f..c2bf9c032 100644 --- a/apps/chat2bridge/chat2bridge.nim +++ b/apps/chat2bridge/chat2bridge.nim @@ -23,6 +23,7 @@ import waku_store, factory/builder, common/utils/matterbridge_client, + common/rate_limit/setting, ], # Chat 2 imports ../chat2/chat2, @@ -232,7 +233,7 @@ proc start*(cmb: Chat2MatterBridge) {.async.} = except: error "exception in relayHandler: " & getCurrentExceptionMsg() - cmb.nodev2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)).isOkOr: + cmb.nodev2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), relayHandler).isOkOr: error "failed to subscribe to relay", topic = DefaultPubsubTopic, error = error return diff --git a/apps/liteprotocoltester/legacy_publisher.nim b/apps/liteprotocoltester/legacy_publisher.nim new file mode 100644 index 000000000..12733ad2d --- /dev/null +++ b/apps/liteprotocoltester/legacy_publisher.nim @@ -0,0 +1,24 @@ +import chronos, results, options +import waku/[waku_node, waku_core] +import publisher_base + +type LegacyPublisher* = ref object of PublisherBase + +proc new*(T: type LegacyPublisher, wakuNode: WakuNode): T = + if isNil(wakuNode.wakuLegacyLightpushClient): + wakuNode.mountLegacyLightPushClient() + + return LegacyPublisher(wakuNode: wakuNode) + +method send*( + self: LegacyPublisher, + topic: PubsubTopic, + message: WakuMessage, + servicePeer: RemotePeerInfo, +): Future[Result[void, string]] {.async.} = + # when error it must return original error desc due the text is used for distinction between error types in metrics. + discard ( + await self.wakuNode.legacyLightpushPublish(some(topic), message, servicePeer) + ).valueOr: + return err(error) + return ok() diff --git a/apps/liteprotocoltester/liteprotocoltester.nim b/apps/liteprotocoltester/liteprotocoltester.nim index 58f6bd2e3..939332cff 100644 --- a/apps/liteprotocoltester/liteprotocoltester.nim +++ b/apps/liteprotocoltester/liteprotocoltester.nim @@ -14,13 +14,11 @@ import waku/[ common/enr, common/logging, - factory/waku, + factory/waku as waku_factory, factory/external_config, waku_node, - node/health_monitor, node/waku_metrics, node/peer_manager, - waku_api/rest/builder as rest_server_builder, waku_lightpush/common, waku_filter_v2, waku_peer_exchange/protocol, @@ -28,8 +26,8 @@ import waku_core/multiaddrstr, ], ./tester_config, - ./lightpush_publisher, - ./filter_subscriber, + ./publisher, + ./receiver, ./diagnose_connections, ./service_peer_management @@ -49,7 +47,7 @@ when isMainModule: ## 5. Start monitoring tools and external interfaces ## 6. Setup graceful shutdown hooks - const versionString = "version / git commit hash: " & waku.git_version + const versionString = "version / git commit hash: " & waku_factory.git_version let confRes = LiteProtocolTesterConf.load(version = versionString) if confRes.isErr(): @@ -61,7 +59,7 @@ when isMainModule: ## Logging setup logging.setupLog(conf.logLevel, conf.logFormat) - info "Running Lite Protocol Tester node", version = waku.git_version + info "Running Lite Protocol Tester node", version = waku_factory.git_version logConfig(conf) ##Prepare Waku configuration @@ -69,13 +67,13 @@ when isMainModule: ## - override according to tester functionality ## - var wakuConf: WakuNodeConf + var wakuNodeConf: WakuNodeConf if conf.configFile.isSome(): try: var configFile {.threadvar.}: InputFile configFile = conf.configFile.get() - wakuConf = WakuNodeConf.load( + wakuNodeConf = WakuNodeConf.load( version = versionString, printUsage = false, secondarySources = proc( @@ -88,82 +86,54 @@ when isMainModule: error "Loading Waku configuration failed", error = getCurrentExceptionMsg() quit(QuitFailure) - wakuConf.logLevel = conf.logLevel - wakuConf.logFormat = conf.logFormat - wakuConf.nat = conf.nat - wakuConf.maxConnections = 500 - wakuConf.restAddress = conf.restAddress - wakuConf.restPort = conf.restPort - wakuConf.restAllowOrigin = conf.restAllowOrigin + wakuNodeConf.logLevel = conf.logLevel + wakuNodeConf.logFormat = conf.logFormat + wakuNodeConf.nat = conf.nat + wakuNodeConf.maxConnections = 500 + wakuNodeConf.restAddress = conf.restAddress + wakuNodeConf.restPort = conf.restPort + wakuNodeConf.restAllowOrigin = conf.restAllowOrigin - wakuConf.dnsAddrs = true - wakuConf.dnsAddrsNameServers = @[parseIpAddress("8.8.8.8"), parseIpAddress("1.1.1.1")] + wakuNodeConf.dnsAddrsNameServers = + @[parseIpAddress("8.8.8.8"), parseIpAddress("1.1.1.1")] - wakuConf.shards = @[conf.shard] - wakuConf.contentTopics = conf.contentTopics - wakuConf.clusterId = conf.clusterId + wakuNodeConf.shards = @[conf.shard] + wakuNodeConf.contentTopics = conf.contentTopics + wakuNodeConf.clusterId = conf.clusterId ## TODO: Depending on the tester needs we might extend here with shards, clusterId, etc... - wakuConf.metricsServer = true - wakuConf.metricsServerAddress = parseIpAddress("0.0.0.0") - wakuConf.metricsServerPort = conf.metricsPort + wakuNodeConf.metricsServer = true + wakuNodeConf.metricsServerAddress = parseIpAddress("0.0.0.0") + wakuNodeConf.metricsServerPort = conf.metricsPort # If bootstrap option is chosen we expect our clients will not mounted # so we will mount PeerExchange manually to gather possible service peers, # if got some we will mount the client protocols afterward. - wakuConf.peerExchange = false - wakuConf.relay = false - wakuConf.filter = false - wakuConf.lightpush = false - wakuConf.store = false + wakuNodeConf.peerExchange = false + wakuNodeConf.relay = false + wakuNodeConf.filter = false + wakuNodeConf.lightpush = false + wakuNodeConf.store = false - wakuConf.rest = false - wakuConf.relayServiceRatio = "40:60" + wakuNodeConf.rest = false + wakuNodeConf.relayServiceRatio = "40:60" - # NOTE: {.threadvar.} is used to make the global variable GC safe for the closure uses it - # It will always be called from main thread anyway. - # Ref: https://nim-lang.org/docs/manual.html#threads-gc-safety - var nodeHealthMonitor {.threadvar.}: WakuNodeHealthMonitor - nodeHealthMonitor = WakuNodeHealthMonitor() - nodeHealthMonitor.setOverallHealth(HealthStatus.INITIALIZING) - - let restServer = rest_server_builder.startRestServerEssentials( - nodeHealthMonitor, wakuConf - ).valueOr: - error "Starting esential REST server failed.", error = $error + let wakuConf = wakuNodeConf.toWakuConf().valueOr: + error "Issue converting toWakuConf", error = $error quit(QuitFailure) - var wakuApp = Waku.new(wakuConf).valueOr: + var waku = Waku.new(wakuConf).valueOr: error "Waku initialization failed", error = error quit(QuitFailure) - wakuApp.restServer = restServer - - nodeHealthMonitor.setNode(wakuApp.node) - - (waitFor startWaku(addr wakuApp)).isOkOr: + (waitFor startWaku(addr waku)).isOkOr: error "Starting waku failed", error = error quit(QuitFailure) - rest_server_builder.startRestServerProtocolSupport( - restServer, wakuApp.node, wakuApp.wakuDiscv5, wakuConf - ).isOkOr: - error "Starting protocols support REST server failed.", error = $error - quit(QuitFailure) - - wakuApp.metricsServer = waku_metrics.startMetricsServerAndLogging(wakuConf).valueOr: - error "Starting monitoring and external interfaces failed", error = error - quit(QuitFailure) - - nodeHealthMonitor.setOverallHealth(HealthStatus.READY) - debug "Setting up shutdown hooks" - ## Setup shutdown hooks for this process. - ## Stop node gracefully on shutdown. - proc asyncStopper(wakuApp: Waku) {.async: (raises: [Exception]).} = - nodeHealthMonitor.setOverallHealth(HealthStatus.SHUTTING_DOWN) - await wakuApp.stop() + proc asyncStopper(waku: Waku) {.async: (raises: [Exception]).} = + await waku.stop() quit(QuitSuccess) # Handle Ctrl-C SIGINT @@ -172,7 +142,7 @@ when isMainModule: # workaround for https://github.com/nim-lang/Nim/issues/4057 setupForeignThreadGc() notice "Shutting down after receiving SIGINT" - asyncSpawn asyncStopper(wakuApp) + asyncSpawn asyncStopper(waku) setControlCHook(handleCtrlC) @@ -180,7 +150,7 @@ when isMainModule: when defined(posix): proc handleSigterm(signal: cint) {.noconv.} = notice "Shutting down after receiving SIGTERM" - asyncSpawn asyncStopper(wakuApp) + asyncSpawn asyncStopper(waku) c_signal(ansi_c.SIGTERM, handleSigterm) @@ -193,7 +163,7 @@ when isMainModule: # Not available in -d:release mode writeStackTrace() - waitFor wakuApp.stop() + waitFor waku.stop() quit(QuitFailure) c_signal(ansi_c.SIGSEGV, handleSigsegv) @@ -212,7 +182,7 @@ when isMainModule: if conf.serviceNode.len == 0: if conf.bootstrapNode.len > 0: info "Bootstrapping with PeerExchange to gather random service node" - let futForServiceNode = pxLookupServiceNode(wakuApp.node, conf) + let futForServiceNode = pxLookupServiceNode(waku.node, conf) if not (waitFor futForServiceNode.withTimeout(20.minutes)): error "Service node not found in time via PX" quit(QuitFailure) @@ -222,7 +192,7 @@ when isMainModule: quit(QuitFailure) serviceNodePeerInfo = selectRandomServicePeer( - wakuApp.node.peerManager, none(RemotePeerInfo), codec + waku.node.peerManager, none(RemotePeerInfo), codec ).valueOr: error "Service node selection failed" quit(QuitFailure) @@ -237,11 +207,11 @@ when isMainModule: info "Service node to be used", serviceNode = $serviceNodePeerInfo - logSelfPeers(wakuApp.node.peerManager) + logSelfPeers(waku.node.peerManager) if conf.testFunc == TesterFunctionality.SENDER: - setupAndPublish(wakuApp.node, conf, serviceNodePeerInfo) + setupAndPublish(waku.node, conf, serviceNodePeerInfo) else: - setupAndSubscribe(wakuApp.node, conf, serviceNodePeerInfo) + setupAndListen(waku.node, conf, serviceNodePeerInfo) runForever() diff --git a/apps/liteprotocoltester/lpt_supervisor.py b/apps/liteprotocoltester/lpt_supervisor.py index 24c395b0a..7d882afd2 100755 --- a/apps/liteprotocoltester/lpt_supervisor.py +++ b/apps/liteprotocoltester/lpt_supervisor.py @@ -24,8 +24,8 @@ def run_tester_node(predefined_test_env): return os.system(script_cmd) if __name__ == "__main__": - if len(sys.argv) < 2 or sys.argv[1] not in ["RECEIVER", "SENDER"]: - print("Error: First argument must be either 'RECEIVER' or 'SENDER'") + if len(sys.argv) < 2 or sys.argv[1] not in ["RECEIVER", "SENDER", "SENDERV3"]: + print("Error: First argument must be either 'RECEIVER' or 'SENDER' or 'SENDERV3'") sys.exit(1) predefined_test_env_file = '/usr/bin/infra.env' diff --git a/apps/liteprotocoltester/lightpush_publisher.nim b/apps/liteprotocoltester/publisher.nim similarity index 92% rename from apps/liteprotocoltester/lightpush_publisher.nim rename to apps/liteprotocoltester/publisher.nim index d79e68590..d8031473d 100644 --- a/apps/liteprotocoltester/lightpush_publisher.nim +++ b/apps/liteprotocoltester/publisher.nim @@ -21,14 +21,17 @@ import ./tester_message, ./lpt_metrics, ./diagnose_connections, - ./service_peer_management + ./service_peer_management, + ./publisher_base, + ./legacy_publisher, + ./v3_publisher randomize() type SizeRange* = tuple[min: uint64, max: uint64] -var RANDOM_PALYLOAD {.threadvar.}: seq[byte] -RANDOM_PALYLOAD = urandom(1024 * 1024) +var RANDOM_PAYLOAD {.threadvar.}: seq[byte] +RANDOM_PAYLOAD = urandom(1024 * 1024) # 1MiB of random payload to be used to extend message proc prepareMessage( @@ -59,9 +62,8 @@ proc prepareMessage( if renderSize < len(contentPayload).uint64: renderSize = len(contentPayload).uint64 - let finalPayload = concat( - contentPayload, RANDOM_PALYLOAD[0 .. renderSize - len(contentPayload).uint64] - ) + let finalPayload = + concat(contentPayload, RANDOM_PAYLOAD[0 .. renderSize - len(contentPayload).uint64]) let message = WakuMessage( payload: finalPayload, # content of the message contentTopic: contentTopic, # content topic to publish to @@ -108,6 +110,7 @@ proc reportSentMessages() = proc publishMessages( wakuNode: WakuNode, + publisher: PublisherBase, servicePeer: RemotePeerInfo, lightpushPubsubTopic: PubsubTopic, lightpushContentTopic: ContentTopic, @@ -148,9 +151,7 @@ proc publishMessages( let publishStartTime = Moment.now() - let wlpRes = await wakuNode.legacyLightpushPublish( - some(lightpushPubsubTopic), message, actualServicePeer - ) + let wlpRes = await publisher.send(lightpushPubsubTopic, message, actualServicePeer) let publishDuration = Moment.now() - publishStartTime @@ -213,10 +214,13 @@ proc publishMessages( proc setupAndPublish*( wakuNode: WakuNode, conf: LiteProtocolTesterConf, servicePeer: RemotePeerInfo ) = - if isNil(wakuNode.wakuLightpushClient): - # if we have not yet initialized lightpush client, then do it as the only way we can get here is - # by having a service peer discovered. - wakuNode.mountLegacyLightPushClient() + var publisher: PublisherBase + if conf.lightpushVersion == LightpushVersion.LEGACY: + info "Using legacy lightpush protocol for publishing messages" + publisher = LegacyPublisher.new(wakuNode) + else: + info "Using lightpush v3 protocol for publishing messages" + publisher = V3Publisher.new(wakuNode) # give some time to receiver side to set up let waitTillStartTesting = conf.startPublishingAfter.seconds @@ -257,6 +261,7 @@ proc setupAndPublish*( # Start maintaining subscription asyncSpawn publishMessages( wakuNode, + publisher, servicePeer, conf.getPubsubTopic(), conf.contentTopics[0], diff --git a/apps/liteprotocoltester/publisher_base.nim b/apps/liteprotocoltester/publisher_base.nim new file mode 100644 index 000000000..de88d82f8 --- /dev/null +++ b/apps/liteprotocoltester/publisher_base.nim @@ -0,0 +1,14 @@ +import chronos, results +import waku/[waku_node, waku_core] + +type PublisherBase* = ref object of RootObj + wakuNode*: WakuNode + +method send*( + self: PublisherBase, + topic: PubsubTopic, + message: WakuMessage, + servicePeer: RemotePeerInfo, +): Future[Result[void, string]] {.base, async.} = + discard + # when error it must return original error desc due the text is used for distinction between error types in metrics. diff --git a/apps/liteprotocoltester/filter_subscriber.nim b/apps/liteprotocoltester/receiver.nim similarity index 99% rename from apps/liteprotocoltester/filter_subscriber.nim rename to apps/liteprotocoltester/receiver.nim index fbb11c92e..f0f41b1c5 100644 --- a/apps/liteprotocoltester/filter_subscriber.nim +++ b/apps/liteprotocoltester/receiver.nim @@ -116,7 +116,7 @@ proc maintainSubscription( await sleepAsync(30.seconds) # Subscription maintenance interval -proc setupAndSubscribe*( +proc setupAndListen*( wakuNode: WakuNode, conf: LiteProtocolTesterConf, servicePeer: RemotePeerInfo ) = if isNil(wakuNode.wakuFilterClient): diff --git a/apps/liteprotocoltester/run_tester_node.sh b/apps/liteprotocoltester/run_tester_node.sh index 4a80ca460..3c2d60e2f 100755 --- a/apps/liteprotocoltester/run_tester_node.sh +++ b/apps/liteprotocoltester/run_tester_node.sh @@ -25,7 +25,12 @@ fi FUNCTION=$2 if [ "${FUNCTION}" = "SENDER" ]; then - FUNCTION=--test-func=SENDER + FUNCTION="--test-func=SENDER --lightpush-version=LEGACY" + SERVICENAME=lightpush-service +fi + +if [ "${FUNCTION}" = "SENDERV3" ]; then + FUNCTION="--test-func=SENDER --lightpush-version=V3" SERVICENAME=lightpush-service fi diff --git a/apps/liteprotocoltester/run_tester_node_at_infra.sh b/apps/liteprotocoltester/run_tester_node_at_infra.sh index e926875aa..db26eb091 100644 --- a/apps/liteprotocoltester/run_tester_node_at_infra.sh +++ b/apps/liteprotocoltester/run_tester_node_at_infra.sh @@ -26,7 +26,15 @@ fi FUNCTION=$2 if [ "${FUNCTION}" = "SENDER" ]; then - FUNCTION=--test-func=SENDER + FUNCTION="--test-func=SENDER --lightpush-version=LEGACY" + SERIVCE_NODE_ADDR=${LIGHTPUSH_SERVICE_PEER:-${LIGHTPUSH_BOOTSTRAP:-}} + NODE_ARG=${LIGHTPUSH_SERVICE_PEER:+--service-node="${LIGHTPUSH_SERVICE_PEER}"} + NODE_ARG=${NODE_ARG:---bootstrap-node="${LIGHTPUSH_BOOTSTRAP}"} + METRICS_PORT=--metrics-port="${PUBLISHER_METRICS_PORT:-8003}" +fi + +if [ "${FUNCTION}" = "SENDERV3" ]; then + FUNCTION="--test-func=SENDER --lightpush-version=V3" SERIVCE_NODE_ADDR=${LIGHTPUSH_SERVICE_PEER:-${LIGHTPUSH_BOOTSTRAP:-}} NODE_ARG=${LIGHTPUSH_SERVICE_PEER:+--service-node="${LIGHTPUSH_SERVICE_PEER}"} NODE_ARG=${NODE_ARG:---bootstrap-node="${LIGHTPUSH_BOOTSTRAP}"} diff --git a/apps/liteprotocoltester/run_tester_node_on_fleet.sh b/apps/liteprotocoltester/run_tester_node_on_fleet.sh index 538a890e6..533f5b1bf 100644 --- a/apps/liteprotocoltester/run_tester_node_on_fleet.sh +++ b/apps/liteprotocoltester/run_tester_node_on_fleet.sh @@ -26,7 +26,15 @@ fi FUNCTION=$2 if [ "${FUNCTION}" = "SENDER" ]; then - FUNCTION=--test-func=SENDER + FUNCTION="--test-func=SENDER --lightpush-version=LEGACY" + SERIVCE_NODE_ADDR=${LIGHTPUSH_SERVICE_PEER:-${LIGHTPUSH_BOOTSTRAP:-}} + NODE_ARG=${LIGHTPUSH_SERVICE_PEER:+--service-node="${LIGHTPUSH_SERVICE_PEER}"} + NODE_ARG=${NODE_ARG:---bootstrap-node="${LIGHTPUSH_BOOTSTRAP}"} + METRICS_PORT=--metrics-port="${PUBLISHER_METRICS_PORT:-8003}" +fi + +if [ "${FUNCTION}" = "SENDERV3" ]; then + FUNCTION="--test-func=SENDER --lightpush-version=V3" SERIVCE_NODE_ADDR=${LIGHTPUSH_SERVICE_PEER:-${LIGHTPUSH_BOOTSTRAP:-}} NODE_ARG=${LIGHTPUSH_SERVICE_PEER:+--service-node="${LIGHTPUSH_SERVICE_PEER}"} NODE_ARG=${NODE_ARG:---bootstrap-node="${LIGHTPUSH_BOOTSTRAP}"} diff --git a/apps/liteprotocoltester/service_peer_management.nim b/apps/liteprotocoltester/service_peer_management.nim index a303c3c58..7d79e0f36 100644 --- a/apps/liteprotocoltester/service_peer_management.nim +++ b/apps/liteprotocoltester/service_peer_management.nim @@ -158,9 +158,7 @@ proc tryCallAllPxPeers*( proc pxLookupServiceNode*( node: WakuNode, conf: LiteProtocolTesterConf ): Future[Result[bool, void]] {.async.} = - var codec: string = WakuLightPushCodec - if conf.testFunc == TesterFunctionality.RECEIVER: - codec = WakuFilterSubscribeCodec + let codec: string = conf.getCodec() if node.wakuPeerExchange.isNil(): let peerExchangeNode = translateToRemotePeerInfo(conf.bootstrapNode).valueOr: diff --git a/apps/liteprotocoltester/tester_config.nim b/apps/liteprotocoltester/tester_config.nim index eccaafc06..c06a970b1 100644 --- a/apps/liteprotocoltester/tester_config.nim +++ b/apps/liteprotocoltester/tester_config.nim @@ -33,6 +33,10 @@ type TesterFunctionality* = enum SENDER # pumps messages to the network RECEIVER # gather and analyze messages from the network +type LightpushVersion* = enum + LEGACY # legacy lightpush protocol + V3 # lightpush v3 protocol + type LiteProtocolTesterConf* = object configFile* {. desc: @@ -80,6 +84,12 @@ type LiteProtocolTesterConf* = object name: "test-func" .}: TesterFunctionality + lightpushVersion* {. + desc: "Version of the sender to use. Supported values: legacy, v3.", + defaultValue: LightpushVersion.LEGACY, + name: "lightpush-version" + .}: LightpushVersion + numMessages* {. desc: "Number of messages to send.", defaultValue: 120, name: "num-messages" .}: uint32 @@ -190,4 +200,14 @@ proc load*(T: type LiteProtocolTesterConf, version = ""): ConfResult[T] = proc getPubsubTopic*(conf: LiteProtocolTesterConf): PubsubTopic = return $RelayShard(clusterId: conf.clusterId, shardId: conf.shard) +proc getCodec*(conf: LiteProtocolTesterConf): string = + return + if conf.testFunc == TesterFunctionality.RECEIVER: + WakuFilterSubscribeCodec + else: + if conf.lightpushVersion == LightpushVersion.LEGACY: + WakuLegacyLightPushCodec + else: + WakuLightPushCodec + {.pop.} diff --git a/apps/liteprotocoltester/v3_publisher.nim b/apps/liteprotocoltester/v3_publisher.nim new file mode 100644 index 000000000..74a3fdd05 --- /dev/null +++ b/apps/liteprotocoltester/v3_publisher.nim @@ -0,0 +1,29 @@ +import results, options, chronos +import waku/[waku_node, waku_core, waku_lightpush] +import publisher_base + +type V3Publisher* = ref object of PublisherBase + +proc new*(T: type V3Publisher, wakuNode: WakuNode): T = + if isNil(wakuNode.wakuLightpushClient): + wakuNode.mountLightPushClient() + + return V3Publisher(wakuNode: wakuNode) + +method send*( + self: V3Publisher, + topic: PubsubTopic, + message: WakuMessage, + servicePeer: RemotePeerInfo, +): Future[Result[void, string]] {.async.} = + # when error it must return original error desc due the text is used for distinction between error types in metrics. + discard ( + await self.wakuNode.lightpushPublish(some(topic), message, some(servicePeer)) + ).valueOr: + if error.code == NO_PEERS_TO_RELAY and + error.desc != some("No peers for topic, skipping publish"): + # TODO: We need better separation of errors happening on the client side or the server side.- + return err("dial_failure") + else: + return err($error.code) + return ok() diff --git a/apps/networkmonitor/networkmonitor.nim b/apps/networkmonitor/networkmonitor.nim index 7b71a630e..f391b3d20 100644 --- a/apps/networkmonitor/networkmonitor.nim +++ b/apps/networkmonitor/networkmonitor.nim @@ -554,7 +554,7 @@ proc subscribeAndHandleMessages( else: msgPerContentTopic[msg.contentTopic] = 1 - node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(WakuRelayHandler(handler))).isOkOr: + node.subscribe((kind: PubsubSub, topic: pubsubTopic), WakuRelayHandler(handler)).isOkOr: error "failed to subscribe to pubsub topic", pubsubTopic, error quit(1) @@ -570,17 +570,18 @@ when isMainModule: info "cli flags", conf = conf if conf.clusterId == 1: - let twnClusterConf = ClusterConf.TheWakuNetworkConf() + let twnNetworkConf = NetworkConf.TheWakuNetworkConf() - conf.bootstrapNodes = twnClusterConf.discv5BootstrapNodes - conf.rlnRelayDynamic = twnClusterConf.rlnRelayDynamic - conf.rlnRelayEthContractAddress = twnClusterConf.rlnRelayEthContractAddress - conf.rlnEpochSizeSec = twnClusterConf.rlnEpochSizeSec - conf.rlnRelayUserMessageLimit = twnClusterConf.rlnRelayUserMessageLimit - conf.numShardsInNetwork = twnClusterConf.numShardsInNetwork + conf.bootstrapNodes = twnNetworkConf.discv5BootstrapNodes + conf.rlnRelayDynamic = twnNetworkConf.rlnRelayDynamic + conf.rlnRelayEthContractAddress = twnNetworkConf.rlnRelayEthContractAddress + conf.rlnEpochSizeSec = twnNetworkConf.rlnEpochSizeSec + conf.rlnRelayUserMessageLimit = twnNetworkConf.rlnRelayUserMessageLimit + conf.numShardsInNetwork = twnNetworkConf.shardingConf.numShardsInCluster if conf.shards.len == 0: - conf.shards = toSeq(uint16(0) .. uint16(twnClusterConf.numShardsInNetwork - 1)) + conf.shards = + toSeq(uint16(0) .. uint16(twnNetworkConf.shardingConf.numShardsInCluster - 1)) if conf.logLevel != LogLevel.NONE: setLogLevel(conf.logLevel) diff --git a/apps/networkmonitor/networkmonitor_config.nim b/apps/networkmonitor/networkmonitor_config.nim index 04245f9dd..8f5298a53 100644 --- a/apps/networkmonitor/networkmonitor_config.nim +++ b/apps/networkmonitor/networkmonitor_config.nim @@ -5,7 +5,6 @@ import chronos, std/strutils, results, - stew/shims/net, regex type EthRpcUrl* = distinct string diff --git a/apps/networkmonitor/networkmonitor_utils.nim b/apps/networkmonitor/networkmonitor_utils.nim index f12b16014..25b79da65 100644 --- a/apps/networkmonitor/networkmonitor_utils.nim +++ b/apps/networkmonitor/networkmonitor_utils.nim @@ -3,7 +3,6 @@ import std/json, results, - stew/shims/net, chronicles, chronicles/topics_registry, chronos, diff --git a/apps/sonda/docker-compose.yml b/apps/sonda/docker-compose.yml index c6235ef32..d6594428e 100644 --- a/apps/sonda/docker-compose.yml +++ b/apps/sonda/docker-compose.yml @@ -9,7 +9,7 @@ x-logging: &logging x-rln-relay-eth-client-address: &rln_relay_eth_client_address ${RLN_RELAY_ETH_CLIENT_ADDRESS:-} # Add your RLN_RELAY_ETH_CLIENT_ADDRESS after the "-" x-rln-environment: &rln_env - RLN_RELAY_CONTRACT_ADDRESS: ${RLN_RELAY_CONTRACT_ADDRESS:-0xfe7a9eabcE779a090FD702346Fd0bFAc02ce6Ac8} + RLN_RELAY_CONTRACT_ADDRESS: ${RLN_RELAY_CONTRACT_ADDRESS:-0xB9cd878C90E49F797B4431fBF4fb333108CB90e6} RLN_RELAY_CRED_PATH: ${RLN_RELAY_CRED_PATH:-} # Optional: Add your RLN_RELAY_CRED_PATH after the "-" RLN_RELAY_CRED_PASSWORD: ${RLN_RELAY_CRED_PASSWORD:-} # Optional: Add your RLN_RELAY_CRED_PASSWORD after the "-" diff --git a/apps/sonda/register_rln.sh b/apps/sonda/register_rln.sh index aca1007a8..4fb373b3a 100755 --- a/apps/sonda/register_rln.sh +++ b/apps/sonda/register_rln.sh @@ -24,7 +24,7 @@ fi docker run -v $(pwd)/keystore:/keystore/:Z harbor.status.im/wakuorg/nwaku:v0.30.1 generateRlnKeystore \ --rln-relay-eth-client-address=${RLN_RELAY_ETH_CLIENT_ADDRESS} \ --rln-relay-eth-private-key=${ETH_TESTNET_KEY} \ ---rln-relay-eth-contract-address=0xfe7a9eabcE779a090FD702346Fd0bFAc02ce6Ac8 \ +--rln-relay-eth-contract-address=0xB9cd878C90E49F797B4431fBF4fb333108CB90e6 \ --rln-relay-cred-path=/keystore/keystore.json \ --rln-relay-cred-password="${RLN_RELAY_CRED_PASSWORD}" \ --rln-relay-user-message-limit=20 \ diff --git a/apps/wakucanary/wakucanary.nim b/apps/wakucanary/wakucanary.nim index 3565c619f..84ac6350c 100644 --- a/apps/wakucanary/wakucanary.nim +++ b/apps/wakucanary/wakucanary.nim @@ -2,7 +2,6 @@ import std/[strutils, sequtils, tables, strformat], confutils, chronos, - stew/shims/net, chronicles/topics_registry, os import diff --git a/apps/wakunode2/wakunode2.nim b/apps/wakunode2/wakunode2.nim index a99cfcb52..5e6cbb700 100644 --- a/apps/wakunode2/wakunode2.nim +++ b/apps/wakunode2/wakunode2.nim @@ -16,7 +16,6 @@ import factory/external_config, factory/waku, node/health_monitor, - node/waku_metrics, waku_api/rest/builder as rest_server_builder, ] @@ -53,69 +52,21 @@ when isMainModule: let conf = wakuNodeConf.toInspectRlnDbConf() doInspectRlnDb(conf) of noCommand: - # NOTE: {.threadvar.} is used to make the global variable GC safe for the closure uses it - # It will always be called from main thread anyway. - # Ref: https://nim-lang.org/docs/manual.html#threads-gc-safety - var nodeHealthMonitor {.threadvar.}: WakuNodeHealthMonitor - nodeHealthMonitor = WakuNodeHealthMonitor() - nodeHealthMonitor.setOverallHealth(HealthStatus.INITIALIZING) - let conf = wakuNodeConf.toWakuConf().valueOr: error "Waku configuration failed", error = error quit(QuitFailure) - var restServer: WakuRestServerRef = nil - - if conf.restServerConf.isSome(): - restServer = rest_server_builder.startRestServerEssentials( - nodeHealthMonitor, conf.restServerConf.get(), conf.portsShift - ).valueOr: - error "Starting essential REST server failed.", error = $error - quit(QuitFailure) - var waku = Waku.new(conf).valueOr: error "Waku initialization failed", error = error quit(QuitFailure) - waku.restServer = restServer - - nodeHealthMonitor.setNode(waku.node) - (waitFor startWaku(addr waku)).isOkOr: error "Starting waku failed", error = error quit(QuitFailure) - if conf.restServerConf.isSome(): - rest_server_builder.startRestServerProtocolSupport( - restServer, - waku.node, - waku.wakuDiscv5, - conf.restServerConf.get(), - conf.relay, - conf.lightPush, - conf.clusterId, - conf.shards, - conf.contentTopics, - ).isOkOr: - error "Starting protocols support REST server failed.", error = $error - quit(QuitFailure) - - if conf.metricsServerConf.isSome(): - waku.metricsServer = waku_metrics.startMetricsServerAndLogging( - conf.metricsServerConf.get(), conf.portsShift - ).valueOr: - error "Starting monitoring and external interfaces failed", error = error - quit(QuitFailure) - - nodeHealthMonitor.setOverallHealth(HealthStatus.READY) - debug "Setting up shutdown hooks" - ## Setup shutdown hooks for this process. - ## Stop node gracefully on shutdown. - - proc asyncStopper(node: Waku) {.async: (raises: [Exception]).} = - nodeHealthMonitor.setOverallHealth(HealthStatus.SHUTTING_DOWN) - await node.stop() + proc asyncStopper(waku: Waku) {.async: (raises: [Exception]).} = + await waku.stop() quit(QuitSuccess) # Handle Ctrl-C SIGINT diff --git a/ci/Jenkinsfile.release b/ci/Jenkinsfile.release index 1a2125402..5d18d32aa 100644 --- a/ci/Jenkinsfile.release +++ b/ci/Jenkinsfile.release @@ -69,17 +69,33 @@ pipeline { stages { stage('Build') { steps { script { - image = docker.build( - "${params.IMAGE_NAME}:${params.IMAGE_TAG ?: env.GIT_COMMIT.take(8)}", - "--label=build='${env.BUILD_URL}' " + - "--label=commit='${git.commit()}' " + - "--label=version='${git.describe('--tags')}' " + - "--build-arg=MAKE_TARGET='${params.MAKE_TARGET}' " + - "--build-arg=NIMFLAGS='${params.NIMFLAGS} -d:postgres ' " + - "--build-arg=LOG_LEVEL='${params.LOWEST_LOG_LEVEL_ALLOWED}' " + - "--build-arg=DEBUG='${params.DEBUG ? "1" : "0"} ' " + - "--target=${params.HEAPTRACK ? "heaptrack-build" : "prod"} ." - ) + if (params.HEAPTRACK) { + echo 'Building with heaptrack support' + image = docker.build( + "${params.IMAGE_NAME}:${params.IMAGE_TAG ?: env.GIT_COMMIT.take(8)}", + "--label=build='${env.BUILD_URL}' " + + "--label=commit='${git.commit()}' " + + "--label=version='${git.describe('--tags')}' " + + "--build-arg=MAKE_TARGET='${params.MAKE_TARGET}' " + + "--build-arg=NIMFLAGS='${params.NIMFLAGS} -d:postgres -d:heaptracker ' " + + "--build-arg=LOG_LEVEL='${params.LOWEST_LOG_LEVEL_ALLOWED}' " + + "--build-arg=DEBUG='${params.DEBUG ? "1" : "0"} ' " + + "--build-arg=NIM_COMMIT='NIM_COMMIT=heaptrack_support_v2.0.12' " + + "--target='debug-with-heaptrack' ." + ) + } else { + image = docker.build( + "${params.IMAGE_NAME}:${params.IMAGE_TAG ?: env.GIT_COMMIT.take(8)}", + "--label=build='${env.BUILD_URL}' " + + "--label=commit='${git.commit()}' " + + "--label=version='${git.describe('--tags')}' " + + "--build-arg=MAKE_TARGET='${params.MAKE_TARGET}' " + + "--build-arg=NIMFLAGS='${params.NIMFLAGS} -d:postgres ' " + + "--build-arg=LOG_LEVEL='${params.LOWEST_LOG_LEVEL_ALLOWED}' " + + "--build-arg=DEBUG='${params.DEBUG ? "1" : "0"} ' " + + "--target='prod' ." + ) + } } } } diff --git a/docker/binaries/Dockerfile.bn.amd64 b/docker/binaries/Dockerfile.bn.amd64 index d32cf9342..c8dc0ffeb 100644 --- a/docker/binaries/Dockerfile.bn.amd64 +++ b/docker/binaries/Dockerfile.bn.amd64 @@ -13,7 +13,7 @@ EXPOSE 30303 60000 8545 # Referenced in the binary RUN apt-get update &&\ - apt-get install -y libpcre3 libpq-dev curl iproute2 wget &&\ + apt-get install -y libpcre3 libpq-dev curl iproute2 wget dnsutils &&\ apt-get clean && rm -rf /var/lib/apt/lists/* # Fix for 'Error loading shared library libpcre.so.3: No such file or directory' diff --git a/docker/binaries/Dockerfile.bn.local b/docker/binaries/Dockerfile.bn.local new file mode 100644 index 000000000..79445d14f --- /dev/null +++ b/docker/binaries/Dockerfile.bn.local @@ -0,0 +1,63 @@ +# Dockerfile to build a distributable container image from pre-existing binaries +# FROM debian:stable-slim AS prod +FROM ubuntu:24.04 AS prod + +ARG MAKE_TARGET=wakunode2 + +LABEL maintainer="vaclav@status.im" +LABEL source="https://github.com/waku-org/nwaku" +LABEL description="Wakunode: Waku client" +LABEL commit="unknown" + +# DevP2P, LibP2P, and JSON RPC ports +EXPOSE 30303 60000 8545 + +# Referenced in the binary +RUN apt-get update &&\ + apt-get install -y libpcre3 libpq-dev curl iproute2 wget jq dnsutils &&\ + apt-get clean && rm -rf /var/lib/apt/lists/* + +# Fix for 'Error loading shared library libpcre.so.3: No such file or directory' +RUN ln -s /usr/lib/libpcre.so /usr/lib/libpcre.so.3 + +# Copy to separate location to accomodate different MAKE_TARGET values +ADD ./build/$MAKE_TARGET /usr/local/bin/ + +# Copy migration scripts for DB upgrades +ADD ./migrations/ /app/migrations/ + +# Symlink the correct wakunode binary +RUN ln -sv /usr/local/bin/$MAKE_TARGET /usr/bin/wakunode + +ENTRYPOINT ["/usr/bin/wakunode"] + +# By default just show help if called without arguments +CMD ["--help"] + +# Build debug tools: heaptrack +FROM ubuntu:24.04 AS heaptrack-build + +RUN apt update +RUN apt install -y gdb git g++ make cmake zlib1g-dev libboost-all-dev libunwind-dev +RUN git clone https://github.com/KDE/heaptrack.git /heaptrack + +WORKDIR /heaptrack/build +# going to a commit that builds properly. We will revisit this for new releases +RUN git reset --hard f9cc35ebbdde92a292fe3870fe011ad2874da0ca +RUN cmake -DCMAKE_BUILD_TYPE=Release .. +RUN make -j$(nproc) + + +# Debug image +FROM prod AS debug-with-heaptrack + +RUN apt update +RUN apt install -y gdb libunwind8 + +# Add heaptrack +COPY --from=heaptrack-build /heaptrack/build/ /heaptrack/build/ + +ENV LD_LIBRARY_PATH=/heaptrack/build/lib/heaptrack/ +RUN ln -s /heaptrack/build/bin/heaptrack /usr/local/bin/heaptrack + +ENTRYPOINT ["/heaptrack/build/bin/heaptrack", "/usr/bin/wakunode"] diff --git a/examples/cbindings/waku_example.c b/examples/cbindings/waku_example.c index b80b9af8f..1f6f0256a 100644 --- a/examples/cbindings/waku_example.c +++ b/examples/cbindings/waku_example.c @@ -305,10 +305,10 @@ int main(int argc, char** argv) { \"storeMessageDbUrl\": \"%s\", \ \"storeMessageRetentionPolicy\": \"%s\", \ \"storeMaxNumDbConnections\": %d , \ - \"logLevel\": \"FATAL\", \ + \"logLevel\": \"DEBUG\", \ \"discv5Discovery\": true, \ \"discv5BootstrapNodes\": \ - [\"enr:-QESuEB4Dchgjn7gfAvwB00CxTA-nGiyk-aALI-H4dYSZD3rUk7bZHmP8d2U6xDiQ2vZffpo45Jp7zKNdnwDUx6g4o6XAYJpZIJ2NIJpcIRA4VDAim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOvD3S3jUNICsrOILlmhENiWAMmMVlAl6-Q8wRB7hidY4N0Y3CCdl-DdWRwgiMohXdha3UyDw\", \"enr:-QEkuEBIkb8q8_mrorHndoXH9t5N6ZfD-jehQCrYeoJDPHqT0l0wyaONa2-piRQsi3oVKAzDShDVeoQhy0uwN1xbZfPZAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQKnGt-GSgqPSf3IAPM7bFgTlpczpMZZLF3geeoNNsxzSoN0Y3CCdl-DdWRwgiMohXdha3UyDw\"], \ + [\"enr:-QEKuED9AJm2HGgrRpVaJY2nj68ao_QiPeUT43sK-aRM7sMJ6R4G11OSDOwnvVacgN1sTw-K7soC5dzHDFZgZkHU0u-XAYJpZIJ2NIJpcISnYxMvim11bHRpYWRkcnO4WgAqNiVib290LTAxLmRvLWFtczMuc3RhdHVzLnByb2Quc3RhdHVzLmltBnZfACw2JWJvb3QtMDEuZG8tYW1zMy5zdGF0dXMucHJvZC5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaEC3rRtFQSgc24uWewzXaxTY8hDAHB8sgnxr9k8Rjb5GeSDdGNwgnZfg3VkcIIjKIV3YWt1Mg0\", \"enr:-QEcuED7ww5vo2rKc1pyBp7fubBUH-8STHEZHo7InjVjLblEVyDGkjdTI9VdqmYQOn95vuQH-Htku17WSTzEufx-Wg4mAYJpZIJ2NIJpcIQihw1Xim11bHRpYWRkcnO4bAAzNi5ib290LTAxLmdjLXVzLWNlbnRyYWwxLWEuc3RhdHVzLnByb2Quc3RhdHVzLmltBnZfADU2LmJvb3QtMDEuZ2MtdXMtY2VudHJhbDEtYS5zdGF0dXMucHJvZC5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaECxjqgDQ0WyRSOilYU32DA5k_XNlDis3m1VdXkK9xM6kODdGNwgnZfg3VkcIIjKIV3YWt1Mg0\", \"enr:-QEcuEAoShWGyN66wwusE3Ri8hXBaIkoHZHybUB8cCPv5v3ypEf9OCg4cfslJxZFANl90s-jmMOugLUyBx4EfOBNJ6_VAYJpZIJ2NIJpcIQI2hdMim11bHRpYWRkcnO4bAAzNi5ib290LTAxLmFjLWNuLWhvbmdrb25nLWMuc3RhdHVzLnByb2Quc3RhdHVzLmltBnZfADU2LmJvb3QtMDEuYWMtY24taG9uZ2tvbmctYy5zdGF0dXMucHJvZC5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaEDP7CbRk-YKJwOFFM4Z9ney0GPc7WPJaCwGkpNRyla7mCDdGNwgnZfg3VkcIIjKIV3YWt1Mg0\"], \ \"discv5UdpPort\": 9999, \ \"dnsDiscoveryUrl\": \"enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.prod.status.nodes.status.im\", \ \"dnsDiscoveryNameServers\": [\"8.8.8.8\", \"1.0.0.1\"] \ @@ -353,6 +353,11 @@ int main(int argc, char** argv) { show_main_menu(); while(1) { handle_user_input(); + + // Uncomment the following if need to test the metrics retrieval + // WAKU_CALL( waku_get_metrics(ctx, + // event_handler, + // userData) ); } pthread_mutex_destroy(&mutex); diff --git a/examples/filter_subscriber.nim b/examples/filter_subscriber.nim index 2216e4a41..5554966d4 100644 --- a/examples/filter_subscriber.nim +++ b/examples/filter_subscriber.nim @@ -1,7 +1,6 @@ import std/[tables, sequtils], stew/byteutils, - stew/shims/net, chronicles, chronos, confutils, diff --git a/examples/lightpush_publisher.nim b/examples/lightpush_publisher.nim index b0f919a89..9c7499695 100644 --- a/examples/lightpush_publisher.nim +++ b/examples/lightpush_publisher.nim @@ -1,7 +1,6 @@ import std/[tables, times, sequtils], stew/byteutils, - stew/shims/net, chronicles, results, chronos, diff --git a/examples/publisher.nim b/examples/publisher.nim index 907ce2274..8c2d03679 100644 --- a/examples/publisher.nim +++ b/examples/publisher.nim @@ -1,7 +1,6 @@ import std/[tables, times, sequtils], stew/byteutils, - stew/shims/net, chronicles, chronos, confutils, diff --git a/examples/subscriber.nim b/examples/subscriber.nim index 633bfa4ca..fb040b05a 100644 --- a/examples/subscriber.nim +++ b/examples/subscriber.nim @@ -1,7 +1,6 @@ import std/[tables, sequtils], stew/byteutils, - stew/shims/net, chronicles, chronos, confutils, @@ -120,7 +119,7 @@ proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} = contentTopic = msg.contentTopic, timestamp = msg.timestamp - node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(WakuRelayHandler(handler))).isOkOr: + node.subscribe((kind: PubsubSub, topic: pubsubTopic), WakuRelayHandler(handler)).isOkOr: error "failed to subscribe to pubsub topic", pubsubTopic, error quit(1) diff --git a/examples/wakustealthcommitments/node_spec.nim b/examples/wakustealthcommitments/node_spec.nim index b5dafb0be..c3468ccde 100644 --- a/examples/wakustealthcommitments/node_spec.nim +++ b/examples/wakustealthcommitments/node_spec.nim @@ -3,7 +3,6 @@ import waku/[common/logging, factory/[waku, networks_config, external_config]] import std/[options, strutils, os, sequtils], - stew/shims/net as stewNet, chronicles, chronos, metrics, @@ -25,26 +24,26 @@ proc setup*(): Waku = var conf = confRes.get() - let twnClusterConf = ClusterConf.TheWakuNetworkConf() + let twnNetworkConf = NetworkConf.TheWakuNetworkConf() if len(conf.shards) != 0: - conf.pubsubTopics = conf.shards.mapIt(twnClusterConf.pubsubTopics[it.uint16]) + conf.pubsubTopics = conf.shards.mapIt(twnNetworkConf.pubsubTopics[it.uint16]) else: - conf.pubsubTopics = twnClusterConf.pubsubTopics + conf.pubsubTopics = twnNetworkConf.pubsubTopics # Override configuration - conf.maxMessageSize = twnClusterConf.maxMessageSize - conf.clusterId = twnClusterConf.clusterId - conf.rlnRelayEthContractAddress = twnClusterConf.rlnRelayEthContractAddress - conf.rlnRelayDynamic = twnClusterConf.rlnRelayDynamic - conf.discv5Discovery = twnClusterConf.discv5Discovery + conf.maxMessageSize = twnNetworkConf.maxMessageSize + conf.clusterId = twnNetworkConf.clusterId + conf.rlnRelayEthContractAddress = twnNetworkConf.rlnRelayEthContractAddress + conf.rlnRelayDynamic = twnNetworkConf.rlnRelayDynamic + conf.discv5Discovery = twnNetworkConf.discv5Discovery conf.discv5BootstrapNodes = - conf.discv5BootstrapNodes & twnClusterConf.discv5BootstrapNodes - conf.rlnEpochSizeSec = twnClusterConf.rlnEpochSizeSec - conf.rlnRelayUserMessageLimit = twnClusterConf.rlnRelayUserMessageLimit + conf.discv5BootstrapNodes & twnNetworkConf.discv5BootstrapNodes + conf.rlnEpochSizeSec = twnNetworkConf.rlnEpochSizeSec + conf.rlnRelayUserMessageLimit = twnNetworkConf.rlnRelayUserMessageLimit # Only set rlnRelay to true if relay is configured if conf.relay: - conf.rlnRelay = twnClusterConf.rlnRelay + conf.rlnRelay = twnNetworkConf.rlnRelay debug "Starting node" var waku = Waku.new(conf).valueOr: diff --git a/library/events/json_waku_not_responding_event.nim b/library/events/json_waku_not_responding_event.nim new file mode 100644 index 000000000..1e1d5fcc5 --- /dev/null +++ b/library/events/json_waku_not_responding_event.nim @@ -0,0 +1,9 @@ +import system, std/json, ./json_base_event + +type JsonWakuNotRespondingEvent* = ref object of JsonEvent + +proc new*(T: type JsonWakuNotRespondingEvent): T = + return JsonWakuNotRespondingEvent(eventType: "waku_not_responding") + +method `$`*(event: JsonWakuNotRespondingEvent): string = + $(%*event) diff --git a/library/libwaku.h b/library/libwaku.h index 3c15b36f9..b5d6c9bab 100644 --- a/library/libwaku.h +++ b/library/libwaku.h @@ -45,6 +45,8 @@ int waku_version(void* ctx, WakuCallBack callback, void* userData); +// Sets a callback that will be invoked whenever an event occurs. +// It is crucial that the passed callback is fast, non-blocking and potentially thread-safe. void waku_set_event_callback(void* ctx, WakuCallBack callback, void* userData); @@ -150,6 +152,10 @@ int waku_disconnect_peer_by_id(void* ctx, WakuCallBack callback, void* userData); +int waku_disconnect_all_peers(void* ctx, + WakuCallBack callback, + void* userData); + int waku_dial_peer(void* ctx, const char* peerMultiAddr, const char* protocol, @@ -221,6 +227,10 @@ int waku_get_my_peerid(void* ctx, WakuCallBack callback, void* userData); +int waku_get_metrics(void* ctx, + WakuCallBack callback, + void* userData); + int waku_peer_exchange_request(void* ctx, int numPeers, WakuCallBack callback, @@ -232,6 +242,10 @@ int waku_ping_peer(void* ctx, WakuCallBack callback, void* userData); +int waku_is_online(void* ctx, + WakuCallBack callback, + void* userData); + #ifdef __cplusplus } #endif diff --git a/library/libwaku.nim b/library/libwaku.nim index 48df3e2c6..ad3afa134 100644 --- a/library/libwaku.nim +++ b/library/libwaku.nim @@ -15,19 +15,18 @@ import waku/waku_core/topics/pubsub_topic, waku/waku_core/subscription/push_handler, waku/waku_relay, - ./events/ - [json_message_event, json_topic_health_change_event, json_connection_change_event], - ./waku_thread/waku_thread, - ./waku_thread/inter_thread_communication/requests/node_lifecycle_request, - ./waku_thread/inter_thread_communication/requests/peer_manager_request, - ./waku_thread/inter_thread_communication/requests/protocols/relay_request, - ./waku_thread/inter_thread_communication/requests/protocols/store_request, - ./waku_thread/inter_thread_communication/requests/protocols/lightpush_request, - ./waku_thread/inter_thread_communication/requests/protocols/filter_request, - ./waku_thread/inter_thread_communication/requests/debug_node_request, - ./waku_thread/inter_thread_communication/requests/discovery_request, - ./waku_thread/inter_thread_communication/requests/ping_request, - ./waku_thread/inter_thread_communication/waku_thread_request, + ./events/json_message_event, + ./waku_context, + ./waku_thread_requests/requests/node_lifecycle_request, + ./waku_thread_requests/requests/peer_manager_request, + ./waku_thread_requests/requests/protocols/relay_request, + ./waku_thread_requests/requests/protocols/store_request, + ./waku_thread_requests/requests/protocols/lightpush_request, + ./waku_thread_requests/requests/protocols/filter_request, + ./waku_thread_requests/requests/debug_node_request, + ./waku_thread_requests/requests/discovery_request, + ./waku_thread_requests/requests/ping_request, + ./waku_thread_requests/waku_thread_request, ./alloc, ./ffi_types, ../waku/factory/app_callbacks @@ -48,25 +47,6 @@ template checkLibwakuParams*( if isNil(callback): return RET_MISSING_CALLBACK -template callEventCallback(ctx: ptr WakuContext, eventName: string, body: untyped) = - if isNil(ctx[].eventCallback): - error eventName & " - eventCallback is nil" - return - - foreignThreadGc: - try: - let event = body - cast[WakuCallBack](ctx[].eventCallback)( - RET_OK, unsafeAddr event[0], cast[csize_t](len(event)), ctx[].eventUserData - ) - except Exception, CatchableError: - let msg = - "Exception " & eventName & " when calling 'eventCallBack': " & - getCurrentExceptionMsg() - cast[WakuCallBack](ctx[].eventCallback)( - RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), ctx[].eventUserData - ) - proc handleRequest( ctx: ptr WakuContext, requestType: RequestType, @@ -74,28 +54,13 @@ proc handleRequest( callback: WakuCallBack, userData: pointer, ): cint = - waku_thread.sendRequestToWakuThread(ctx, requestType, content, callback, userData).isOkOr: + waku_context.sendRequestToWakuThread(ctx, requestType, content, callback, userData).isOkOr: let msg = "libwaku error: " & $error callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) return RET_ERR return RET_OK -proc onConnectionChange(ctx: ptr WakuContext): ConnectionChangeHandler = - return proc(peerId: PeerId, peerEvent: PeerEventKind) {.async.} = - callEventCallback(ctx, "onConnectionChange"): - $JsonConnectionChangeEvent.new($peerId, peerEvent) - -proc onReceivedMessage(ctx: ptr WakuContext): WakuRelayHandler = - return proc(pubsubTopic: PubsubTopic, msg: WakuMessage) {.async.} = - callEventCallback(ctx, "onReceivedMessage"): - $JsonMessageEvent.new(pubsubTopic, msg) - -proc onTopicHealthChange(ctx: ptr WakuContext): TopicHealthChangeHandler = - return proc(pubsubTopic: PubsubTopic, topicHealth: TopicHealth) {.async.} = - callEventCallback(ctx, "onTopicHealthChange"): - $JsonTopicHealthChangeEvent.new(pubsubTopic, topicHealth) - ### End of not-exported components ################################################################################ @@ -146,8 +111,8 @@ proc waku_new( return nil ## Create the Waku thread that will keep waiting for req from the main thread. - var ctx = waku_thread.createWakuThread().valueOr: - let msg = "Error in createWakuThread: " & $error + var ctx = waku_context.createWakuContext().valueOr: + let msg = "Error in createWakuContext: " & $error callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) return nil @@ -180,7 +145,7 @@ proc waku_destroy( initializeLibrary() checkLibwakuParams(ctx, callback, userData) - waku_thread.destroyWakuThread(ctx).isOkOr: + waku_context.destroyWakuContext(ctx).isOkOr: let msg = "libwaku error: " & $error callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) return RET_ERR @@ -580,6 +545,20 @@ proc waku_disconnect_peer_by_id( userData, ) +proc waku_disconnect_all_peers( + ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.PEER_MANAGER, + PeerManagementRequest.createShared(op = PeerManagementMsgType.DISCONNECT_ALL_PEERS), + callback, + userData, + ) + proc waku_dial_peer( ctx: ptr WakuContext, peerMultiAddr: cstring, @@ -782,6 +761,20 @@ proc waku_get_my_peerid( userData, ) +proc waku_get_metrics( + ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.DEBUG, + DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_METRICS), + callback, + userData, + ) + proc waku_start_discv5( ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer ): cint {.dynlib, exportc.} = @@ -842,5 +835,19 @@ proc waku_ping_peer( userData, ) +proc waku_is_online( + ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.DEBUG, + DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_ONLINE_STATE), + callback, + userData, + ) + ### End of exported procs ################################################################################ diff --git a/library/waku_context.nim b/library/waku_context.nim new file mode 100644 index 000000000..2dd9e9c95 --- /dev/null +++ b/library/waku_context.nim @@ -0,0 +1,219 @@ +{.pragma: exported, exportc, cdecl, raises: [].} +{.pragma: callback, cdecl, raises: [], gcsafe.} +{.passc: "-fPIC".} + +import std/[options, atomics, os, net, locks] +import chronicles, chronos, chronos/threadsync, taskpools/channels_spsc_single, results +import + waku/factory/waku, + waku/node/peer_manager, + waku/waku_relay/[protocol, topic_health], + waku/waku_core/[topics/pubsub_topic, message], + ./waku_thread_requests/[waku_thread_request, requests/debug_node_request], + ./ffi_types, + ./events/[ + json_message_event, json_topic_health_change_event, json_connection_change_event, + json_waku_not_responding_event, + ] + +type WakuContext* = object + wakuThread: Thread[(ptr WakuContext)] + watchdogThread: Thread[(ptr WakuContext)] + # monitors the Waku thread and notifies the Waku SDK consumer if it hangs + lock: Lock + reqChannel: ChannelSPSCSingle[ptr WakuThreadRequest] + reqSignal: ThreadSignalPtr + # to inform The Waku Thread (a.k.a TWT) that a new request is sent + reqReceivedSignal: ThreadSignalPtr + # to inform the main thread that the request is rx by TWT + userData*: pointer + eventCallback*: pointer + eventUserdata*: pointer + running: Atomic[bool] # To control when the threads are running + +const git_version* {.strdefine.} = "n/a" +const versionString = "version / git commit hash: " & waku.git_version + +template callEventCallback(ctx: ptr WakuContext, eventName: string, body: untyped) = + if isNil(ctx[].eventCallback): + error eventName & " - eventCallback is nil" + return + + foreignThreadGc: + try: + let event = body + cast[WakuCallBack](ctx[].eventCallback)( + RET_OK, unsafeAddr event[0], cast[csize_t](len(event)), ctx[].eventUserData + ) + except Exception, CatchableError: + let msg = + "Exception " & eventName & " when calling 'eventCallBack': " & + getCurrentExceptionMsg() + cast[WakuCallBack](ctx[].eventCallback)( + RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), ctx[].eventUserData + ) + +proc onConnectionChange*(ctx: ptr WakuContext): ConnectionChangeHandler = + return proc(peerId: PeerId, peerEvent: PeerEventKind) {.async.} = + callEventCallback(ctx, "onConnectionChange"): + $JsonConnectionChangeEvent.new($peerId, peerEvent) + +proc onReceivedMessage*(ctx: ptr WakuContext): WakuRelayHandler = + return proc(pubsubTopic: PubsubTopic, msg: WakuMessage) {.async.} = + callEventCallback(ctx, "onReceivedMessage"): + $JsonMessageEvent.new(pubsubTopic, msg) + +proc onTopicHealthChange*(ctx: ptr WakuContext): TopicHealthChangeHandler = + return proc(pubsubTopic: PubsubTopic, topicHealth: TopicHealth) {.async.} = + callEventCallback(ctx, "onTopicHealthChange"): + $JsonTopicHealthChangeEvent.new(pubsubTopic, topicHealth) + +proc onWakuNotResponding*(ctx: ptr WakuContext) = + callEventCallback(ctx, "onWakuNotResponsive"): + $JsonWakuNotRespondingEvent.new() + +proc sendRequestToWakuThread*( + ctx: ptr WakuContext, + reqType: RequestType, + reqContent: pointer, + callback: WakuCallBack, + userData: pointer, + timeout = InfiniteDuration, +): Result[void, string] = + ctx.lock.acquire() + # This lock is only necessary while we use a SP Channel and while the signalling + # between threads assumes that there aren't concurrent requests. + # Rearchitecting the signaling + migrating to a MP Channel will allow us to receive + # requests concurrently and spare us the need of locks + defer: + ctx.lock.release() + + let req = WakuThreadRequest.createShared(reqType, reqContent, callback, userData) + ## Sending the request + let sentOk = ctx.reqChannel.trySend(req) + if not sentOk: + deallocShared(req) + return err("Couldn't send a request to the waku thread: " & $req[]) + + let fireSyncRes = ctx.reqSignal.fireSync() + if fireSyncRes.isErr(): + deallocShared(req) + return err("failed fireSync: " & $fireSyncRes.error) + + if fireSyncRes.get() == false: + deallocShared(req) + return err("Couldn't fireSync in time") + + ## wait until the Waku Thread properly received the request + let res = ctx.reqReceivedSignal.waitSync(timeout) + if res.isErr(): + deallocShared(req) + return err("Couldn't receive reqReceivedSignal signal") + + ## Notice that in case of "ok", the deallocShared(req) is performed by the Waku Thread in the + ## process proc. See the 'waku_thread_request.nim' module for more details. + ok() + +proc watchdogThreadBody(ctx: ptr WakuContext) {.thread.} = + ## Watchdog thread that monitors the Waku thread and notifies the library user if it hangs. + + let watchdogRun = proc(ctx: ptr WakuContext) {.async.} = + const WatchdogTimeinterval = 1.seconds + const WakuNotRespondingTimeout = 3.seconds + while true: + await sleepAsync(WatchdogTimeinterval) + + if ctx.running.load == false: + debug "Watchdog thread exiting because WakuContext is not running" + break + + let wakuCallback = proc( + callerRet: cint, msg: ptr cchar, len: csize_t, userData: pointer + ) {.cdecl, gcsafe, raises: [].} = + discard ## Don't do anything. Just respecting the callback signature. + const nilUserData = nil + + trace "Sending watchdog request to Waku thread" + + sendRequestToWakuThread( + ctx, + RequestType.DEBUG, + DebugNodeRequest.createShared(DebugNodeMsgType.CHECK_WAKU_NOT_BLOCKED), + wakuCallback, + nilUserData, + WakuNotRespondingTimeout, + ).isOkOr: + error "Failed to send watchdog request to Waku thread", error = $error + onWakuNotResponding(ctx) + + waitFor watchdogRun(ctx) + +proc wakuThreadBody(ctx: ptr WakuContext) {.thread.} = + ## Waku thread that attends library user requests (stop, connect_to, etc.) + + let wakuRun = proc(ctx: ptr WakuContext) {.async.} = + var waku: Waku + while true: + await ctx.reqSignal.wait() + + if ctx.running.load == false: + break + + ## Trying to get a request from the libwaku requestor thread + var request: ptr WakuThreadRequest + let recvOk = ctx.reqChannel.tryRecv(request) + if not recvOk: + error "waku thread could not receive a request" + continue + + let fireRes = ctx.reqReceivedSignal.fireSync() + if fireRes.isErr(): + error "could not fireSync back to requester thread", error = fireRes.error + + ## Handle the request + asyncSpawn WakuThreadRequest.process(request, addr waku) + + waitFor wakuRun(ctx) + +proc createWakuContext*(): Result[ptr WakuContext, string] = + ## This proc is called from the main thread and it creates + ## the Waku working thread. + var ctx = createShared(WakuContext, 1) + ctx.reqSignal = ThreadSignalPtr.new().valueOr: + return err("couldn't create reqSignal ThreadSignalPtr") + ctx.reqReceivedSignal = ThreadSignalPtr.new().valueOr: + return err("couldn't create reqReceivedSignal ThreadSignalPtr") + ctx.lock.initLock() + + ctx.running.store(true) + + try: + createThread(ctx.wakuThread, wakuThreadBody, ctx) + except ValueError, ResourceExhaustedError: + freeShared(ctx) + return err("failed to create the Waku thread: " & getCurrentExceptionMsg()) + + try: + createThread(ctx.watchdogThread, watchdogThreadBody, ctx) + except ValueError, ResourceExhaustedError: + freeShared(ctx) + return err("failed to create the watchdog thread: " & getCurrentExceptionMsg()) + + return ok(ctx) + +proc destroyWakuContext*(ctx: ptr WakuContext): Result[void, string] = + ctx.running.store(false) + + let signaledOnTime = ctx.reqSignal.fireSync().valueOr: + return err("error in destroyWakuContext: " & $error) + if not signaledOnTime: + return err("failed to signal reqSignal on time in destroyWakuContext") + + joinThread(ctx.wakuThread) + joinThread(ctx.watchdogThread) + ctx.lock.deinitLock() + ?ctx.reqSignal.close() + ?ctx.reqReceivedSignal.close() + freeShared(ctx) + + return ok() diff --git a/library/waku_thread/waku_thread.nim b/library/waku_thread/waku_thread.nim deleted file mode 100644 index 640389e32..000000000 --- a/library/waku_thread/waku_thread.nim +++ /dev/null @@ -1,132 +0,0 @@ -{.pragma: exported, exportc, cdecl, raises: [].} -{.pragma: callback, cdecl, raises: [], gcsafe.} -{.passc: "-fPIC".} - -import std/[options, atomics, os, net, locks] -import chronicles, chronos, chronos/threadsync, taskpools/channels_spsc_single, results -import waku/factory/waku, ./inter_thread_communication/waku_thread_request, ../ffi_types - -type WakuContext* = object - thread: Thread[(ptr WakuContext)] - lock: Lock - reqChannel: ChannelSPSCSingle[ptr WakuThreadRequest] - reqSignal: ThreadSignalPtr - # to inform The Waku Thread (a.k.a TWT) that a new request is sent - reqReceivedSignal: ThreadSignalPtr - # to inform the main thread that the request is rx by TWT - userData*: pointer - eventCallback*: pointer - eventUserdata*: pointer - running: Atomic[bool] # To control when the thread is running - -const git_version* {.strdefine.} = "n/a" -const versionString = "version / git commit hash: " & waku.git_version - -proc runWaku(ctx: ptr WakuContext) {.async.} = - ## This is the worker body. This runs the Waku node - ## and attends library user requests (stop, connect_to, etc.) - - var waku: Waku - - while true: - await ctx.reqSignal.wait() - - if ctx.running.load == false: - break - - ## Trying to get a request from the libwaku requestor thread - var request: ptr WakuThreadRequest - let recvOk = ctx.reqChannel.tryRecv(request) - if not recvOk: - error "waku thread could not receive a request" - continue - - let fireRes = ctx.reqReceivedSignal.fireSync() - if fireRes.isErr(): - error "could not fireSync back to requester thread", error = fireRes.error - - ## Handle the request - asyncSpawn WakuThreadRequest.process(request, addr waku) - -proc run(ctx: ptr WakuContext) {.thread.} = - ## Launch waku worker - waitFor runWaku(ctx) - -proc createWakuThread*(): Result[ptr WakuContext, string] = - ## This proc is called from the main thread and it creates - ## the Waku working thread. - var ctx = createShared(WakuContext, 1) - ctx.reqSignal = ThreadSignalPtr.new().valueOr: - return err("couldn't create reqSignal ThreadSignalPtr") - ctx.reqReceivedSignal = ThreadSignalPtr.new().valueOr: - return err("couldn't create reqReceivedSignal ThreadSignalPtr") - ctx.lock.initLock() - - ctx.running.store(true) - - try: - createThread(ctx.thread, run, ctx) - except ValueError, ResourceExhaustedError: - # and freeShared for typed allocations! - freeShared(ctx) - - return err("failed to create the Waku thread: " & getCurrentExceptionMsg()) - - return ok(ctx) - -proc destroyWakuThread*(ctx: ptr WakuContext): Result[void, string] = - ctx.running.store(false) - - let signaledOnTime = ctx.reqSignal.fireSync().valueOr: - return err("error in destroyWakuThread: " & $error) - if not signaledOnTime: - return err("failed to signal reqSignal on time in destroyWakuThread") - - joinThread(ctx.thread) - ctx.lock.deinitLock() - ?ctx.reqSignal.close() - ?ctx.reqReceivedSignal.close() - freeShared(ctx) - - return ok() - -proc sendRequestToWakuThread*( - ctx: ptr WakuContext, - reqType: RequestType, - reqContent: pointer, - callback: WakuCallBack, - userData: pointer, -): Result[void, string] = - let req = WakuThreadRequest.createShared(reqType, reqContent, callback, userData) - - # This lock is only necessary while we use a SP Channel and while the signalling - # between threads assumes that there aren't concurrent requests. - # Rearchitecting the signaling + migrating to a MP Channel will allow us to receive - # requests concurrently and spare us the need of locks - ctx.lock.acquire() - defer: - ctx.lock.release() - ## Sending the request - let sentOk = ctx.reqChannel.trySend(req) - if not sentOk: - deallocShared(req) - return err("Couldn't send a request to the waku thread: " & $req[]) - - let fireSyncRes = ctx.reqSignal.fireSync() - if fireSyncRes.isErr(): - deallocShared(req) - return err("failed fireSync: " & $fireSyncRes.error) - - if fireSyncRes.get() == false: - deallocShared(req) - return err("Couldn't fireSync in time") - - ## wait until the Waku Thread properly received the request - let res = ctx.reqReceivedSignal.waitSync() - if res.isErr(): - deallocShared(req) - return err("Couldn't receive reqReceivedSignal signal") - - ## Notice that in case of "ok", the deallocShared(req) is performed by the Waku Thread in the - ## process proc. - ok() diff --git a/library/waku_thread/inter_thread_communication/requests/debug_node_request.nim b/library/waku_thread_requests/requests/debug_node_request.nim similarity index 62% rename from library/waku_thread/inter_thread_communication/requests/debug_node_request.nim rename to library/waku_thread_requests/requests/debug_node_request.nim index 53715e0ed..c9aa5a743 100644 --- a/library/waku_thread/inter_thread_communication/requests/debug_node_request.nim +++ b/library/waku_thread_requests/requests/debug_node_request.nim @@ -1,11 +1,24 @@ import std/json -import chronicles, chronos, results, eth/p2p/discoveryv5/enr, strutils, libp2p/peerid -import ../../../../waku/factory/waku, ../../../../waku/node/waku_node +import + chronicles, + chronos, + results, + eth/p2p/discoveryv5/enr, + strutils, + libp2p/peerid, + metrics +import + ../../../waku/factory/waku, + ../../../waku/node/waku_node, + ../../../waku/node/health_monitor type DebugNodeMsgType* = enum RETRIEVE_LISTENING_ADDRESSES RETRIEVE_MY_ENR RETRIEVE_MY_PEER_ID + RETRIEVE_METRICS + RETRIEVE_ONLINE_STATE + CHECK_WAKU_NOT_BLOCKED type DebugNodeRequest* = object operation: DebugNodeMsgType @@ -21,6 +34,10 @@ proc destroyShared(self: ptr DebugNodeRequest) = proc getMultiaddresses(node: WakuNode): seq[string] = return node.info().listenAddresses +proc getMetrics(): string = + {.gcsafe.}: + return defaultRegistry.toText() ## defaultRegistry is {.global.} in metrics module + proc process*( self: ptr DebugNodeRequest, waku: Waku ): Future[Result[string, string]] {.async.} = @@ -35,6 +52,12 @@ proc process*( return ok(waku.node.enr.toURI()) of RETRIEVE_MY_PEER_ID: return ok($waku.node.peerId()) + of RETRIEVE_METRICS: + return ok(getMetrics()) + of RETRIEVE_ONLINE_STATE: + return ok($waku.healthMonitor.onlineMonitor.amIOnline()) + of CHECK_WAKU_NOT_BLOCKED: + return ok("waku thread is not blocked") error "unsupported operation in DebugNodeRequest" return err("unsupported operation in DebugNodeRequest") diff --git a/library/waku_thread/inter_thread_communication/requests/discovery_request.nim b/library/waku_thread_requests/requests/discovery_request.nim similarity index 95% rename from library/waku_thread/inter_thread_communication/requests/discovery_request.nim rename to library/waku_thread_requests/requests/discovery_request.nim index 4eb193728..8fec0dd9f 100644 --- a/library/waku_thread/inter_thread_communication/requests/discovery_request.nim +++ b/library/waku_thread_requests/requests/discovery_request.nim @@ -1,12 +1,12 @@ import std/json import chronos, chronicles, results, strutils, libp2p/multiaddress import - ../../../../waku/factory/waku, - ../../../../waku/discovery/waku_dnsdisc, - ../../../../waku/discovery/waku_discv5, - ../../../../waku/waku_core/peers, - ../../../../waku/node/waku_node, - ../../../alloc + ../../../waku/factory/waku, + ../../../waku/discovery/waku_dnsdisc, + ../../../waku/discovery/waku_discv5, + ../../../waku/waku_core/peers, + ../../../waku/node/waku_node, + ../../alloc type DiscoveryMsgType* = enum GET_BOOTSTRAP_NODES diff --git a/library/waku_thread/inter_thread_communication/requests/node_lifecycle_request.nim b/library/waku_thread_requests/requests/node_lifecycle_request.nim similarity index 88% rename from library/waku_thread/inter_thread_communication/requests/node_lifecycle_request.nim rename to library/waku_thread_requests/requests/node_lifecycle_request.nim index 8d504df89..21765838e 100644 --- a/library/waku_thread/inter_thread_communication/requests/node_lifecycle_request.nim +++ b/library/waku_thread_requests/requests/node_lifecycle_request.nim @@ -2,13 +2,14 @@ import std/[options, json, strutils, net] import chronos, chronicles, results, confutils, confutils/std/net import - ../../../../waku/node/peer_manager/peer_manager, - ../../../../waku/factory/external_config, - ../../../../waku/factory/waku, - ../../../../waku/factory/node_factory, - ../../../../waku/factory/networks_config, - ../../../../waku/factory/app_callbacks, - ../../../alloc + ../../../waku/node/peer_manager/peer_manager, + ../../../waku/factory/external_config, + ../../../waku/factory/waku, + ../../../waku/factory/node_factory, + ../../../waku/factory/networks_config, + ../../../waku/factory/app_callbacks, + ../../../waku/waku_api/rest/builder, + ../../alloc type NodeLifecycleMsgType* = enum CREATE_NODE @@ -73,9 +74,11 @@ proc createWaku( appCallbacks.topicHealthChangeHandler = nil # TODO: Convert `confJson` directly to `WakuConf` - let wakuConf = conf.toWakuConf().valueOr: + var wakuConf = conf.toWakuConf().valueOr: return err("Configuration error: " & $error) + wakuConf.restServerConf = none(RestServerConf) ## don't want REST in libwaku + let wakuRes = Waku.new(wakuConf, appCallbacks).valueOr: error "waku initialization failed", error = error return err("Failed setting up Waku: " & $error) diff --git a/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim b/library/waku_thread_requests/requests/peer_manager_request.nim similarity index 95% rename from library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim rename to library/waku_thread_requests/requests/peer_manager_request.nim index 1e5202891..a7e643a21 100644 --- a/library/waku_thread/inter_thread_communication/requests/peer_manager_request.nim +++ b/library/waku_thread_requests/requests/peer_manager_request.nim @@ -1,10 +1,10 @@ import std/[sequtils, strutils] import chronicles, chronos, results, options, json import - ../../../../waku/factory/waku, - ../../../../waku/node/waku_node, - ../../../alloc, - ../../../../waku/node/peer_manager + ../../../waku/factory/waku, + ../../../waku/node/waku_node, + ../../alloc, + ../../../waku/node/peer_manager type PeerManagementMsgType* {.pure.} = enum CONNECT_TO @@ -12,6 +12,7 @@ type PeerManagementMsgType* {.pure.} = enum GET_CONNECTED_PEERS_INFO GET_PEER_IDS_BY_PROTOCOL DISCONNECT_PEER_BY_ID + DISCONNECT_ALL_PEERS DIAL_PEER DIAL_PEER_BY_ID GET_CONNECTED_PEERS @@ -120,6 +121,9 @@ proc process*( return err($error) await waku.node.peerManager.disconnectNode(peerId) return ok("") + of DISCONNECT_ALL_PEERS: + await waku.node.peerManager.disconnectAllPeers() + return ok("") of DIAL_PEER: let remotePeerInfo = parsePeerInfo($self[].peerMultiAddr).valueOr: error "DIAL_PEER failed", error = $error diff --git a/library/waku_thread/inter_thread_communication/requests/ping_request.nim b/library/waku_thread_requests/requests/ping_request.nim similarity index 94% rename from library/waku_thread/inter_thread_communication/requests/ping_request.nim rename to library/waku_thread_requests/requests/ping_request.nim index 4467f9659..53d33968e 100644 --- a/library/waku_thread/inter_thread_communication/requests/ping_request.nim +++ b/library/waku_thread_requests/requests/ping_request.nim @@ -1,7 +1,7 @@ import std/[json, strutils] import chronos, results import libp2p/[protocols/ping, switch, multiaddress, multicodec] -import ../../../../waku/[factory/waku, waku_core/peers, node/waku_node], ../../../alloc +import ../../../waku/[factory/waku, waku_core/peers, node/waku_node], ../../alloc type PingRequest* = object peerAddr: cstring diff --git a/library/waku_thread/inter_thread_communication/requests/protocols/filter_request.nim b/library/waku_thread_requests/requests/protocols/filter_request.nim similarity index 88% rename from library/waku_thread/inter_thread_communication/requests/protocols/filter_request.nim rename to library/waku_thread_requests/requests/protocols/filter_request.nim index 452a0c7c3..274ec32ea 100644 --- a/library/waku_thread/inter_thread_communication/requests/protocols/filter_request.nim +++ b/library/waku_thread_requests/requests/protocols/filter_request.nim @@ -1,16 +1,16 @@ import options, std/[strutils, sequtils] import chronicles, chronos, results import - ../../../../../waku/waku_filter_v2/client, - ../../../../../waku/waku_core/message/message, - ../../../../../waku/factory/waku, - ../../../../../waku/waku_filter_v2/common, - ../../../../../waku/waku_core/subscription/push_handler, - ../../../../../waku/node/peer_manager/peer_manager, - ../../../../../waku/node/waku_node, - ../../../../../waku/waku_core/topics/pubsub_topic, - ../../../../../waku/waku_core/topics/content_topic, - ../../../../alloc + ../../../../waku/waku_filter_v2/client, + ../../../../waku/waku_core/message/message, + ../../../../waku/factory/waku, + ../../../../waku/waku_filter_v2/common, + ../../../../waku/waku_core/subscription/push_handler, + ../../../../waku/node/peer_manager/peer_manager, + ../../../../waku/node/waku_node, + ../../../../waku/waku_core/topics/pubsub_topic, + ../../../../waku/waku_core/topics/content_topic, + ../../../alloc type FilterMsgType* = enum SUBSCRIBE diff --git a/library/waku_thread/inter_thread_communication/requests/protocols/lightpush_request.nim b/library/waku_thread_requests/requests/protocols/lightpush_request.nim similarity index 85% rename from library/waku_thread/inter_thread_communication/requests/protocols/lightpush_request.nim rename to library/waku_thread_requests/requests/protocols/lightpush_request.nim index f167cd239..bc3d9de2c 100644 --- a/library/waku_thread/inter_thread_communication/requests/protocols/lightpush_request.nim +++ b/library/waku_thread_requests/requests/protocols/lightpush_request.nim @@ -1,16 +1,16 @@ import options import chronicles, chronos, results import - ../../../../../waku/waku_core/message/message, - ../../../../../waku/waku_core/codecs, - ../../../../../waku/factory/waku, - ../../../../../waku/waku_core/message, - ../../../../../waku/waku_core/time, # Timestamp - ../../../../../waku/waku_core/topics/pubsub_topic, - ../../../../../waku/waku_lightpush_legacy/client, - ../../../../../waku/waku_lightpush_legacy/common, - ../../../../../waku/node/peer_manager/peer_manager, - ../../../../alloc + ../../../../waku/waku_core/message/message, + ../../../../waku/waku_core/codecs, + ../../../../waku/factory/waku, + ../../../../waku/waku_core/message, + ../../../../waku/waku_core/time, # Timestamp + ../../../../waku/waku_core/topics/pubsub_topic, + ../../../../waku/waku_lightpush_legacy/client, + ../../../../waku/waku_lightpush_legacy/common, + ../../../../waku/node/peer_manager/peer_manager, + ../../../alloc type LightpushMsgType* = enum PUBLISH diff --git a/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim b/library/waku_thread_requests/requests/protocols/relay_request.nim similarity index 91% rename from library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim rename to library/waku_thread_requests/requests/protocols/relay_request.nim index c2f002c44..279a1efb4 100644 --- a/library/waku_thread/inter_thread_communication/requests/protocols/relay_request.nim +++ b/library/waku_thread_requests/requests/protocols/relay_request.nim @@ -1,16 +1,16 @@ import std/[net, sequtils, strutils] import chronicles, chronos, stew/byteutils, results import - ../../../../../waku/waku_core/message/message, - ../../../../../waku/factory/[external_config, validator_signed, waku], - ../../../../../waku/waku_node, - ../../../../../waku/waku_core/message, - ../../../../../waku/waku_core/time, # Timestamp - ../../../../../waku/waku_core/topics/pubsub_topic, - ../../../../../waku/waku_core/topics, - ../../../../../waku/waku_relay/protocol, - ../../../../../waku/node/peer_manager, - ../../../../alloc + ../../../../waku/waku_core/message/message, + ../../../../waku/factory/[external_config, validator_signed, waku], + ../../../../waku/waku_node, + ../../../../waku/waku_core/message, + ../../../../waku/waku_core/time, # Timestamp + ../../../../waku/waku_core/topics/pubsub_topic, + ../../../../waku/waku_core/topics, + ../../../../waku/waku_relay/protocol, + ../../../../waku/node/peer_manager, + ../../../alloc type RelayMsgType* = enum SUBSCRIBE @@ -111,7 +111,7 @@ proc process*( of SUBSCRIBE: waku.node.subscribe( (kind: SubscriptionKind.PubsubSub, topic: $self.pubsubTopic), - handler = some(self.relayEventCallback), + handler = self.relayEventCallback, ).isOkOr: error "SUBSCRIBE failed", error return err($error) diff --git a/library/waku_thread/inter_thread_communication/requests/protocols/store_request.nim b/library/waku_thread_requests/requests/protocols/store_request.nim similarity index 92% rename from library/waku_thread/inter_thread_communication/requests/protocols/store_request.nim rename to library/waku_thread_requests/requests/protocols/store_request.nim index 57786a581..3fe1e2f13 100644 --- a/library/waku_thread/inter_thread_communication/requests/protocols/store_request.nim +++ b/library/waku_thread_requests/requests/protocols/store_request.nim @@ -1,15 +1,15 @@ import std/[json, sugar, strutils, options] import chronos, chronicles, results, stew/byteutils import - ../../../../../waku/factory/waku, - ../../../../alloc, - ../../../../utils, - ../../../../../waku/waku_core/peers, - ../../../../../waku/waku_core/time, - ../../../../../waku/waku_core/message/digest, - ../../../../../waku/waku_store/common, - ../../../../../waku/waku_store/client, - ../../../../../waku/common/paging + ../../../../waku/factory/waku, + ../../../alloc, + ../../../utils, + ../../../../waku/waku_core/peers, + ../../../../waku/waku_core/time, + ../../../../waku/waku_core/message/digest, + ../../../../waku/waku_store/common, + ../../../../waku/waku_store/client, + ../../../../waku/common/paging type StoreReqType* = enum REMOTE_QUERY ## to perform a query to another Store node diff --git a/library/waku_thread/inter_thread_communication/waku_thread_request.nim b/library/waku_thread_requests/waku_thread_request.nim similarity index 98% rename from library/waku_thread/inter_thread_communication/waku_thread_request.nim rename to library/waku_thread_requests/waku_thread_request.nim index bcfb84198..50462fba7 100644 --- a/library/waku_thread/inter_thread_communication/waku_thread_request.nim +++ b/library/waku_thread_requests/waku_thread_request.nim @@ -5,8 +5,8 @@ import std/json, results import chronos, chronos/threadsync import - ../../../waku/factory/waku, - ../../ffi_types, + ../../waku/factory/waku, + ../ffi_types, ./requests/node_lifecycle_request, ./requests/peer_manager_request, ./requests/protocols/relay_request, diff --git a/scripts/build_rln.sh b/scripts/build_rln.sh index 1cf9b9879..cd2fa3827 100755 --- a/scripts/build_rln.sh +++ b/scripts/build_rln.sh @@ -19,15 +19,14 @@ host_triplet=$(rustc --version --verbose | awk '/host:/{print $2}') tarball="${host_triplet}" -# use arkzkey feature for v0.5.1 +# use arkzkey feature for v0.7.0 # TODO: update this script in the future when arkzkey is default -if [[ "${rln_version}" == "v0.5.1" ]]; then +if [[ "${rln_version}" == "v0.7.0" ]]; then tarball+="-arkzkey-rln.tar.gz" else tarball+="-rln.tar.gz" fi - # Download the prebuilt rln library if it is available if curl --silent --fail-with-body -L \ "https://github.com/vacp2p/zerokit/releases/download/$rln_version/$tarball" \ diff --git a/scripts/build_wakunode_windows.sh b/scripts/build_windows.sh similarity index 88% rename from scripts/build_wakunode_windows.sh rename to scripts/build_windows.sh index ef0881836..e56fb8871 100755 --- a/scripts/build_wakunode_windows.sh +++ b/scripts/build_windows.sh @@ -36,25 +36,28 @@ cd ../../../.. echo "6. -.-.-.- Building libunwind -.-.-.-" cd vendor/nim-libbacktrace -execute_command "make all V=1" -execute_command "make install/usr/lib/libunwind.a V=1" +execute_command "make all V=1 -j8" +execute_command "make install/usr/lib/libunwind.a V=1 -j8" cp ./vendor/libunwind/build/lib/libunwind.a install/usr/lib cd ../../ echo "7. -.-.-.- Building miniupnpc -.-.-.- " cd vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc execute_command "git checkout little_chore_windows_support" -execute_command "make -f Makefile.mingw CC=gcc CXX=g++ libminiupnpc.a V=1" +execute_command "make -f Makefile.mingw CC=gcc CXX=g++ libminiupnpc.a V=1 -j8" cd ../../../../.. echo "8. -.-.-.- Building libnatpmp -.-.-.- " cd ./vendor/nim-nat-traversal/vendor/libnatpmp-upstream -make CC="gcc -fPIC -D_WIN32_WINNT=0x0600 -DNATPMP_STATICLIB" libnatpmp.a V=1 +make CC="gcc -fPIC -D_WIN32_WINNT=0x0600 -DNATPMP_STATICLIB" libnatpmp.a V=1 -j8 cd ../../../../ echo "9. -.-.-.- Building wakunode2 -.-.-.- " execute_command "make wakunode2 LOG_LEVEL=DEBUG V=1 -j8" +echo "10. -.-.-.- Building libwaku -.-.-.- " +execute_command "make libwaku STATIC=0 LOG_LEVEL=DEBUG V=1 -j8" + echo "Windows setup completed successfully!" echo "✓ Successful commands: $success_count" echo "✗ Failed commands: $failure_count" diff --git a/scripts/install_anvil.sh b/scripts/install_anvil.sh index 13d5f8dfd..1bf4bd7b1 100755 --- a/scripts/install_anvil.sh +++ b/scripts/install_anvil.sh @@ -2,13 +2,14 @@ # Install Anvil +if ! command -v anvil &> /dev/null; then + BASE_DIR="${XDG_CONFIG_HOME:-$HOME}" + FOUNDRY_DIR="${FOUNDRY_DIR:-"$BASE_DIR/.foundry"}" + FOUNDRY_BIN_DIR="$FOUNDRY_DIR/bin" -BASE_DIR="${XDG_CONFIG_HOME:-$HOME}" -FOUNDRY_DIR="${FOUNDRY_DIR-"$BASE_DIR/.foundry"}" -FOUNDRY_BIN_DIR="$FOUNDRY_DIR/bin" - -curl -L https://foundry.paradigm.xyz | bash -# Extract the source path from the download result -echo "foundryup_path: $FOUNDRY_BIN_DIR" -# run foundryup -$FOUNDRY_BIN_DIR/foundryup \ No newline at end of file + curl -L https://foundry.paradigm.xyz | bash + # Extract the source path from the download result + echo "foundryup_path: $FOUNDRY_BIN_DIR" + # run foundryup + $FOUNDRY_BIN_DIR/foundryup +fi \ No newline at end of file diff --git a/scripts/install_pnpm.sh b/scripts/install_pnpm.sh new file mode 100755 index 000000000..34ba47b07 --- /dev/null +++ b/scripts/install_pnpm.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# Install pnpm +if ! command -v pnpm &> /dev/null; then + echo "pnpm is not installed, installing it now..." + npm i pnpm --global +fi + diff --git a/scripts/install_rln_tests_dependencies.sh b/scripts/install_rln_tests_dependencies.sh new file mode 100755 index 000000000..e19e0ef3c --- /dev/null +++ b/scripts/install_rln_tests_dependencies.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +# Install Anvil +./scripts/install_anvil.sh + +#Install pnpm +./scripts/install_pnpm.sh \ No newline at end of file diff --git a/tests/all_tests_waku.nim b/tests/all_tests_waku.nim index 07e0cd895..aac92863a 100644 --- a/tests/all_tests_waku.nim +++ b/tests/all_tests_waku.nim @@ -38,7 +38,8 @@ when os == "Linux" and #./waku_archive_legacy/test_driver_postgres_query, #./waku_archive_legacy/test_driver_postgres, ./factory/test_node_factory, - ./wakunode_rest/test_rest_store + ./wakunode_rest/test_rest_store, + ./wakunode_rest/test_all # Waku store test suite import @@ -91,21 +92,7 @@ import # Waku Keystore test suite import ./test_waku_keystore_keyfile, ./test_waku_keystore -## Wakunode Rest API test suite -import - ./wakunode_rest/test_rest_debug, - ./wakunode_rest/test_rest_debug_serdes, - ./wakunode_rest/test_rest_relay, - ./wakunode_rest/test_rest_relay_serdes, - ./wakunode_rest/test_rest_serdes, - ./wakunode_rest/test_rest_filter, - ./wakunode_rest/test_rest_lightpush, - ./wakunode_rest/test_rest_lightpush_legacy, - ./wakunode_rest/test_rest_admin, - ./wakunode_rest/test_rest_cors, - ./wakunode_rest/test_rest_health - import ./waku_rln_relay/test_all # Node Factory -import ./factory/test_external_config +import ./factory/test_all diff --git a/tests/common/test_all.nim b/tests/common/test_all.nim index 7756f23ad..ae37337cd 100644 --- a/tests/common/test_all.nim +++ b/tests/common/test_all.nim @@ -1,3 +1,5 @@ +{.used.} + import ./test_base64_codec, ./test_confutils_envvar, diff --git a/tests/common/test_confutils_envvar.nim b/tests/common/test_confutils_envvar.nim index 676a35ae1..fca11cca6 100644 --- a/tests/common/test_confutils_envvar.nim +++ b/tests/common/test_confutils_envvar.nim @@ -3,7 +3,6 @@ import std/[os, options], results, - stew/shims/net as stewNet, testutils/unittests, confutils, confutils/defs, diff --git a/tests/common/test_enr_builder.nim b/tests/common/test_enr_builder.nim index 9fe8f6807..0cf7bcb55 100644 --- a/tests/common/test_enr_builder.nim +++ b/tests/common/test_enr_builder.nim @@ -1,6 +1,6 @@ {.used.} -import std/options, results, stew/shims/net, testutils/unittests +import std/[options, net], results, testutils/unittests import waku/common/enr, ../testlib/wakucore suite "nim-eth ENR - builder and typed record": diff --git a/tests/factory/test_all.nim b/tests/factory/test_all.nim new file mode 100644 index 000000000..683bc3b10 --- /dev/null +++ b/tests/factory/test_all.nim @@ -0,0 +1,3 @@ +{.used.} + +import ./test_external_config, ./test_node_factory, ./test_waku_conf diff --git a/tests/factory/test_external_config.nim b/tests/factory/test_external_config.nim index 5bd4e2c86..ecd77826f 100644 --- a/tests/factory/test_external_config.nim +++ b/tests/factory/test_external_config.nim @@ -8,7 +8,8 @@ import libp2p/multiaddress, nimcrypto/utils, secp256k1, - confutils + confutils, + stint import ../../waku/factory/external_config, ../../waku/factory/networks_config, @@ -16,10 +17,46 @@ import ../../waku/common/logging, ../../waku/common/utils/parse_size_units -suite "Waku config - apply preset": - test "Default preset is TWN": +suite "Waku external config - default values": + test "Default sharding value": ## Setup - let expectedConf = ClusterConf.TheWakuNetworkConf() + let defaultShardingMode = AutoSharding + let defaultNumShardsInCluster = 1.uint16 + let defaultSubscribeShards = @[0.uint16] + + ## Given + let preConfig = defaultWakuNodeConf().get() + + ## When + let res = preConfig.toWakuConf() + assert res.isOk(), $res.error + + ## Then + let conf = res.get() + check conf.shardingConf.kind == defaultShardingMode + check conf.shardingConf.numShardsInCluster == defaultNumShardsInCluster + check conf.subscribeShards == defaultSubscribeShards + + test "Default shards value in static sharding": + ## Setup + let defaultSubscribeShards: seq[uint16] = @[] + + ## Given + var preConfig = defaultWakuNodeConf().get() + preConfig.numShardsInNetwork = 0.uint16 + + ## When + let res = preConfig.toWakuConf() + assert res.isOk(), $res.error + + ## Then + let conf = res.get() + check conf.subscribeShards == defaultSubscribeShards + +suite "Waku external config - apply preset": + test "Preset is TWN": + ## Setup + let expectedConf = NetworkConf.TheWakuNetworkConf() ## Given let preConfig = WakuNodeConf( @@ -47,7 +84,9 @@ suite "Waku config - apply preset": check rlnRelayConf.chainId == expectedConf.rlnRelayChainId check rlnRelayConf.epochSizeSec == expectedConf.rlnEpochSizeSec check rlnRelayConf.userMessageLimit == expectedConf.rlnRelayUserMessageLimit - check conf.numShardsInNetwork == expectedConf.numShardsInNetwork + check conf.shardingConf.kind == expectedConf.shardingConf.kind + check conf.shardingConf.numShardsInCluster == + expectedConf.shardingConf.numShardsInCluster check conf.discv5Conf.isSome() == expectedConf.discv5Discovery if conf.discv5Conf.isSome(): let discv5Conf = conf.discv5Conf.get() @@ -55,7 +94,7 @@ suite "Waku config - apply preset": test "Subscribes to all valid shards in twn": ## Setup - let expectedConf = ClusterConf.TheWakuNetworkConf() + let expectedConf = NetworkConf.TheWakuNetworkConf() ## Given let shards: seq[uint16] = @[0, 1, 2, 3, 4, 5, 6, 7] @@ -67,11 +106,11 @@ suite "Waku config - apply preset": ## Then let conf = res.get() - check conf.shards.len == expectedConf.numShardsInNetwork.int + check conf.subscribeShards.len == expectedConf.shardingConf.numShardsInCluster.int test "Subscribes to some valid shards in twn": ## Setup - let expectedConf = ClusterConf.TheWakuNetworkConf() + let expectedConf = NetworkConf.TheWakuNetworkConf() ## Given let shards: seq[uint16] = @[0, 4, 7] @@ -83,9 +122,9 @@ suite "Waku config - apply preset": ## Then let conf = resConf.get() - assert conf.shards.len() == shards.len() + assert conf.subscribeShards.len() == shards.len() for index, shard in shards: - assert shard in conf.shards + assert shard in conf.subscribeShards test "Subscribes to invalid shards in twn": ## Setup @@ -102,7 +141,7 @@ suite "Waku config - apply preset": test "Apply TWN preset when cluster id = 1": ## Setup - let expectedConf = ClusterConf.TheWakuNetworkConf() + let expectedConf = NetworkConf.TheWakuNetworkConf() ## Given let preConfig = WakuNodeConf( @@ -130,13 +169,15 @@ suite "Waku config - apply preset": check rlnRelayConf.chainId == expectedConf.rlnRelayChainId check rlnRelayConf.epochSizeSec == expectedConf.rlnEpochSizeSec check rlnRelayConf.userMessageLimit == expectedConf.rlnRelayUserMessageLimit - check conf.numShardsInNetwork == expectedConf.numShardsInNetwork + check conf.shardingConf.kind == expectedConf.shardingConf.kind + check conf.shardingConf.numShardsInCluster == + expectedConf.shardingConf.numShardsInCluster check conf.discv5Conf.isSome() == expectedConf.discv5Discovery if conf.discv5Conf.isSome(): let discv5Conf = conf.discv5Conf.get() check discv5Conf.bootstrapNodes == expectedConf.discv5BootstrapNodes -suite "Waku config - node key": +suite "Waku external config - node key": test "Passed node key is used": ## Setup let nodeKeyStr = @@ -157,13 +198,13 @@ suite "Waku config - node key": assert utils.toHex(resKey.getRawBytes().get()) == utils.toHex(nodekey.getRawBytes().get()) -suite "Waku config - Shards": +suite "Waku external config - Shards": test "Shards are valid": ## Setup ## Given let shards: seq[uint16] = @[0, 2, 4] - let numShardsInNetwork = 5.uint32 + let numShardsInNetwork = 5.uint16 let wakuNodeConf = WakuNodeConf( cmd: noCommand, shards: shards, numShardsInNetwork: numShardsInNetwork ) @@ -182,7 +223,7 @@ suite "Waku config - Shards": ## Given let shards: seq[uint16] = @[0, 2, 5] - let numShardsInNetwork = 5.uint32 + let numShardsInNetwork = 5.uint16 let wakuNodeConf = WakuNodeConf( cmd: noCommand, shards: shards, numShardsInNetwork: numShardsInNetwork ) @@ -197,7 +238,7 @@ suite "Waku config - Shards": ## Setup ## Given - let wakuNodeConf = WakuNodeConf.load(version = "", cmdLine = @["--shard=32"]) + let wakuNodeConf = WakuNodeConf.load(version = "", cmdLine = @["--shard=0"]) ## When let res = wakuNodeConf.toWakuConf() @@ -206,3 +247,15 @@ suite "Waku config - Shards": let wakuConf = res.get() let vRes = wakuConf.validate() assert vRes.isOk(), $vRes.error + + test "Imvalid shard is passed without num shards": + ## Setup + + ## Given + let wakuNodeConf = WakuNodeConf.load(version = "", cmdLine = @["--shard=32"]) + + ## When + let res = wakuNodeConf.toWakuConf() + + ## Then + assert res.isErr(), "Invalid shard was accepted" diff --git a/tests/factory/test_waku_conf.nim b/tests/factory/test_waku_conf.nim index 6b7040dd5..7ecdb01bb 100644 --- a/tests/factory/test_waku_conf.nim +++ b/tests/factory/test_waku_conf.nim @@ -9,14 +9,14 @@ import testutils/unittests import waku/factory/waku_conf, - waku/factory/waku_conf_builder, + waku/factory/conf_builder/conf_builder, waku/factory/networks_config, waku/common/utils/parse_size_units suite "Waku Conf - build with cluster conf": test "Cluster Conf is passed and relay is enabled": ## Setup - let clusterConf = ClusterConf.TheWakuNetworkConf() + let networkConf = NetworkConf.TheWakuNetworkConf() var builder = WakuConfBuilder.init() builder.discv5Conf.withUdpPort(9000) builder.withRelayServiceRatio("50:50") @@ -24,8 +24,8 @@ suite "Waku Conf - build with cluster conf": let expectedShards = toSeq[0.uint16 .. 7.uint16] ## Given - builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/") - builder.withClusterConf(clusterConf) + builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"]) + builder.withNetworkConf(networkConf) builder.withRelay(true) builder.rlnRelayConf.withTreePath("/tmp/test-tree-path") @@ -37,27 +37,29 @@ suite "Waku Conf - build with cluster conf": ## Then let resValidate = conf.validate() assert resValidate.isOk(), $resValidate.error - check conf.clusterId == clusterConf.clusterId - check conf.numShardsInNetwork == clusterConf.numShardsInNetwork - check conf.shards == expectedShards + check conf.clusterId == networkConf.clusterId + check conf.shardingConf.kind == networkConf.shardingConf.kind + check conf.shardingConf.numShardsInCluster == + networkConf.shardingConf.numShardsInCluster + check conf.subscribeShards == expectedShards check conf.maxMessageSizeBytes == - uint64(parseCorrectMsgSize(clusterConf.maxMessageSize)) - check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes + uint64(parseCorrectMsgSize(networkConf.maxMessageSize)) + check conf.discv5Conf.get().bootstrapNodes == networkConf.discv5BootstrapNodes - if clusterConf.rlnRelay: + if networkConf.rlnRelay: assert conf.rlnRelayConf.isSome(), "RLN Relay conf is disabled" let rlnRelayConf = conf.rlnRelayConf.get() check rlnRelayConf.ethContractAddress.string == - clusterConf.rlnRelayEthContractAddress - check rlnRelayConf.dynamic == clusterConf.rlnRelayDynamic - check rlnRelayConf.chainId == clusterConf.rlnRelayChainId - check rlnRelayConf.epochSizeSec == clusterConf.rlnEpochSizeSec - check rlnRelayConf.userMessageLimit == clusterConf.rlnRelayUserMessageLimit + networkConf.rlnRelayEthContractAddress + check rlnRelayConf.dynamic == networkConf.rlnRelayDynamic + check rlnRelayConf.chainId == networkConf.rlnRelayChainId + check rlnRelayConf.epochSizeSec == networkConf.rlnEpochSizeSec + check rlnRelayConf.userMessageLimit == networkConf.rlnRelayUserMessageLimit test "Cluster Conf is passed, but relay is disabled": ## Setup - let clusterConf = ClusterConf.TheWakuNetworkConf() + let networkConf = NetworkConf.TheWakuNetworkConf() var builder = WakuConfBuilder.init() builder.withRelayServiceRatio("50:50") builder.discv5Conf.withUdpPort(9000) @@ -65,8 +67,8 @@ suite "Waku Conf - build with cluster conf": let expectedShards = toSeq[0.uint16 .. 7.uint16] ## Given - builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/") - builder.withClusterConf(clusterConf) + builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"]) + builder.withNetworkConf(networkConf) builder.withRelay(false) ## When @@ -77,26 +79,28 @@ suite "Waku Conf - build with cluster conf": ## Then let resValidate = conf.validate() assert resValidate.isOk(), $resValidate.error - check conf.clusterId == clusterConf.clusterId - check conf.numShardsInNetwork == clusterConf.numShardsInNetwork - check conf.shards == expectedShards + check conf.clusterId == networkConf.clusterId + check conf.shardingConf.kind == networkConf.shardingConf.kind + check conf.shardingConf.numShardsInCluster == + networkConf.shardingConf.numShardsInCluster + check conf.subscribeShards == expectedShards check conf.maxMessageSizeBytes == - uint64(parseCorrectMsgSize(clusterConf.maxMessageSize)) - check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes + uint64(parseCorrectMsgSize(networkConf.maxMessageSize)) + check conf.discv5Conf.get().bootstrapNodes == networkConf.discv5BootstrapNodes assert conf.rlnRelayConf.isNone test "Cluster Conf is passed, but rln relay is disabled": ## Setup - let clusterConf = ClusterConf.TheWakuNetworkConf() + let networkConf = NetworkConf.TheWakuNetworkConf() var builder = WakuConfBuilder.init() let # Mount all shards in network expectedShards = toSeq[0.uint16 .. 7.uint16] ## Given - builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/") - builder.withClusterConf(clusterConf) + builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"]) + builder.withNetworkConf(networkConf) builder.rlnRelayConf.withEnabled(false) ## When @@ -107,24 +111,26 @@ suite "Waku Conf - build with cluster conf": ## Then let resValidate = conf.validate() assert resValidate.isOk(), $resValidate.error - check conf.clusterId == clusterConf.clusterId - check conf.numShardsInNetwork == clusterConf.numShardsInNetwork - check conf.shards == expectedShards + check conf.clusterId == networkConf.clusterId + check conf.shardingConf.kind == networkConf.shardingConf.kind + check conf.shardingConf.numShardsInCluster == + networkConf.shardingConf.numShardsInCluster + check conf.subscribeShards == expectedShards check conf.maxMessageSizeBytes == - uint64(parseCorrectMsgSize(clusterConf.maxMessageSize)) - check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes + uint64(parseCorrectMsgSize(networkConf.maxMessageSize)) + check conf.discv5Conf.get().bootstrapNodes == networkConf.discv5BootstrapNodes assert conf.rlnRelayConf.isNone test "Cluster Conf is passed and valid shards are specified": ## Setup - let clusterConf = ClusterConf.TheWakuNetworkConf() + let networkConf = NetworkConf.TheWakuNetworkConf() var builder = WakuConfBuilder.init() let shards = @[2.uint16, 3.uint16] ## Given - builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/") - builder.withClusterConf(clusterConf) - builder.withShards(shards) + builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"]) + builder.withNetworkConf(networkConf) + builder.withSubscribeShards(shards) ## When let resConf = builder.build() @@ -134,23 +140,25 @@ suite "Waku Conf - build with cluster conf": ## Then let resValidate = conf.validate() assert resValidate.isOk(), $resValidate.error - check conf.clusterId == clusterConf.clusterId - check conf.numShardsInNetwork == clusterConf.numShardsInNetwork - check conf.shards == shards + check conf.clusterId == networkConf.clusterId + check conf.shardingConf.kind == networkConf.shardingConf.kind + check conf.shardingConf.numShardsInCluster == + networkConf.shardingConf.numShardsInCluster + check conf.subscribeShards == shards check conf.maxMessageSizeBytes == - uint64(parseCorrectMsgSize(clusterConf.maxMessageSize)) - check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes + uint64(parseCorrectMsgSize(networkConf.maxMessageSize)) + check conf.discv5Conf.get().bootstrapNodes == networkConf.discv5BootstrapNodes test "Cluster Conf is passed and invalid shards are specified": ## Setup - let clusterConf = ClusterConf.TheWakuNetworkConf() + let networkConf = NetworkConf.TheWakuNetworkConf() var builder = WakuConfBuilder.init() let shards = @[2.uint16, 10.uint16] ## Given - builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/") - builder.withClusterConf(clusterConf) - builder.withShards(shards) + builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"]) + builder.withNetworkConf(networkConf) + builder.withSubscribeShards(shards) ## When let resConf = builder.build() @@ -158,11 +166,11 @@ suite "Waku Conf - build with cluster conf": ## Then assert resConf.isErr(), "Invalid shard was accepted" - test "Cluster Conf is passed and RLN contract is overridden": + test "Cluster Conf is passed and RLN contract is **not** overridden": ## Setup - let clusterConf = ClusterConf.TheWakuNetworkConf() + let networkConf = NetworkConf.TheWakuNetworkConf() var builder = WakuConfBuilder.init() - builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/") + builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"]) # Mount all shards in network let expectedShards = toSeq[0.uint16 .. 7.uint16] @@ -170,7 +178,7 @@ suite "Waku Conf - build with cluster conf": ## Given builder.rlnRelayConf.withEthContractAddress(contractAddress) - builder.withClusterConf(clusterConf) + builder.withNetworkConf(networkConf) builder.withRelay(true) builder.rlnRelayConf.withTreePath("/tmp/test") @@ -182,23 +190,26 @@ suite "Waku Conf - build with cluster conf": ## Then let resValidate = conf.validate() assert resValidate.isOk(), $resValidate.error - check conf.clusterId == clusterConf.clusterId - check conf.numShardsInNetwork == clusterConf.numShardsInNetwork - check conf.shards == expectedShards + check conf.clusterId == networkConf.clusterId + check conf.shardingConf.kind == networkConf.shardingConf.kind + check conf.shardingConf.numShardsInCluster == + networkConf.shardingConf.numShardsInCluster + check conf.subscribeShards == expectedShards check conf.maxMessageSizeBytes == - uint64(parseCorrectMsgSize(clusterConf.maxMessageSize)) - check conf.discv5Conf.isSome == clusterConf.discv5Discovery - check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes + uint64(parseCorrectMsgSize(networkConf.maxMessageSize)) + check conf.discv5Conf.isSome == networkConf.discv5Discovery + check conf.discv5Conf.get().bootstrapNodes == networkConf.discv5BootstrapNodes - if clusterConf.rlnRelay: + if networkConf.rlnRelay: assert conf.rlnRelayConf.isSome let rlnRelayConf = conf.rlnRelayConf.get() - check rlnRelayConf.ethContractAddress.string == contractAddress - check rlnRelayConf.dynamic == clusterConf.rlnRelayDynamic - check rlnRelayConf.chainId == clusterConf.rlnRelayChainId - check rlnRelayConf.epochSizeSec == clusterConf.rlnEpochSizeSec - check rlnRelayConf.userMessageLimit == clusterConf.rlnRelayUserMessageLimit + check rlnRelayConf.ethContractAddress.string == + networkConf.rlnRelayEthContractAddress + check rlnRelayConf.dynamic == networkConf.rlnRelayDynamic + check rlnRelayConf.chainId == networkConf.rlnRelayChainId + check rlnRelayConf.epochSizeSec == networkConf.rlnEpochSizeSec + check rlnRelayConf.userMessageLimit == networkConf.rlnRelayUserMessageLimit suite "Waku Conf - node key": test "Node key is generated": @@ -263,10 +274,25 @@ suite "Waku Conf - extMultiaddrs": ## Then let resValidate = conf.validate() assert resValidate.isOk(), $resValidate.error - check multiaddrs.len == conf.networkConf.extMultiAddrs.len - let resMultiaddrs = conf.networkConf.extMultiAddrs.map( + check multiaddrs.len == conf.endpointConf.extMultiAddrs.len + let resMultiaddrs = conf.endpointConf.extMultiAddrs.map( proc(m: MultiAddress): string = $m ) for m in multiaddrs: check m in resMultiaddrs + +suite "Waku Conf Builder - rate limits": + test "Valid rate limit passed via string": + ## Setup + var builder = RateLimitConfBuilder.init() + + ## Given + let rateLimitsStr = @["lightpush:2/2ms", "10/2m", "store: 3/3s"] + builder.withRateLimits(rateLimitsStr) + + ## When + let res = builder.build() + + ## Then + assert res.isOk(), $res.error diff --git a/tests/incentivization/test_all.nim b/tests/incentivization/test_all.nim index 4657ea0d3..dc488c4da 100644 --- a/tests/incentivization/test_all.nim +++ b/tests/incentivization/test_all.nim @@ -1 +1,3 @@ +{.used.} + import ./test_rpc_codec, ./test_poc_eligibility, ./test_poc_reputation diff --git a/tests/node/peer_manager/peer_store/test_waku_peer_storage.nim b/tests/node/peer_manager/peer_store/test_waku_peer_storage.nim index 1edd9243e..c0e25ec6a 100644 --- a/tests/node/peer_manager/peer_store/test_waku_peer_storage.nim +++ b/tests/node/peer_manager/peer_store/test_waku_peer_storage.nim @@ -3,7 +3,6 @@ import testutils/unittests, libp2p/[multiaddress, peerid], libp2p/crypto/crypto, - stew/shims/net, eth/keys, eth/p2p/discoveryv5/enr, nimcrypto/utils diff --git a/tests/node/peer_manager/test_peer_manager.nim b/tests/node/peer_manager/test_peer_manager.nim index 6eddda0d6..ad1f1bf0e 100644 --- a/tests/node/peer_manager/test_peer_manager.nim +++ b/tests/node/peer_manager/test_peer_manager.nim @@ -1,9 +1,4 @@ -import - chronicles, - std/[options, tables, strutils], - stew/shims/net, - chronos, - testutils/unittests +import chronicles, std/[options, tables, strutils], chronos, testutils/unittests import waku/node/waku_node, @@ -23,7 +18,7 @@ suite "Peer Manager": asyncSetup: listenPort = Port(0) - listenAddress = ValidIpAddress.init("0.0.0.0") + listenAddress = parseIpAddress("0.0.0.0") serverKey = generateSecp256k1Key() clientKey = generateSecp256k1Key() clusterId = 1 diff --git a/tests/node/test_all.nim b/tests/node/test_all.nim index 4840f49a2..f6e7507b7 100644 --- a/tests/node/test_all.nim +++ b/tests/node/test_all.nim @@ -1,3 +1,5 @@ +{.used.} + import ./test_wakunode_filter, ./test_wakunode_legacy_lightpush, diff --git a/tests/node/test_wakunode_filter.nim b/tests/node/test_wakunode_filter.nim index bf9f2495b..abf555b68 100644 --- a/tests/node/test_wakunode_filter.nim +++ b/tests/node/test_wakunode_filter.nim @@ -2,7 +2,6 @@ import std/[options, tables, sequtils, strutils, sets], - stew/shims/net as stewNet, testutils/unittests, chronos, chronicles, diff --git a/tests/node/test_wakunode_legacy_lightpush.nim b/tests/node/test_wakunode_legacy_lightpush.nim index dfc306de8..5d01e9f58 100644 --- a/tests/node/test_wakunode_legacy_lightpush.nim +++ b/tests/node/test_wakunode_legacy_lightpush.nim @@ -1,8 +1,7 @@ {.used.} import - std/[options, tempfiles], - stew/shims/net as stewNet, + std/[options, tempfiles, net], testutils/unittests, chronos, std/strformat, @@ -46,8 +45,8 @@ suite "Waku Legacy Lightpush - End To End": serverKey = generateSecp256k1Key() clientKey = generateSecp256k1Key() - server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0)) - client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) await allFutures(server.start(), client.start()) await server.start() @@ -70,7 +69,7 @@ suite "Waku Legacy Lightpush - End To End": asyncTest "Via 11/WAKU2-RELAY from Relay/Full Node": # Given a light lightpush client let lightpushClient = - newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) lightpushClient.mountLegacyLightpushClient() # When the client publishes a message @@ -129,8 +128,8 @@ suite "RLN Proofs as a Lightpush Service": serverKey = generateSecp256k1Key() clientKey = generateSecp256k1Key() - server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0)) - client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) # mount rln-relay let wakuRlnConfig = WakuRlnConfig( @@ -162,7 +161,7 @@ suite "RLN Proofs as a Lightpush Service": asyncTest "Message is published when RLN enabled": # Given a light lightpush client let lightpushClient = - newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) lightpushClient.mountLegacyLightPushClient() # When the client publishes a message @@ -190,9 +189,9 @@ suite "Waku Legacy Lightpush message delivery": await allFutures(destNode.start(), bridgeNode.start(), lightNode.start()) - (await destNode.mountRelay(@[DefaultRelayShard])).isOkOr: + (await destNode.mountRelay()).isOkOr: assert false, "Failed to mount relay" - (await bridgeNode.mountRelay(@[DefaultRelayShard])).isOkOr: + (await bridgeNode.mountRelay()).isOkOr: assert false, "Failed to mount relay" await bridgeNode.mountLegacyLightPush() lightNode.mountLegacyLightPushClient() @@ -215,7 +214,7 @@ suite "Waku Legacy Lightpush message delivery": msg == message completionFutRelay.complete(true) - destNode.subscribe((kind: PubsubSub, topic: CustomPubsubTopic), some(relayHandler)).isOkOr: + destNode.subscribe((kind: PubsubSub, topic: CustomPubsubTopic), relayHandler).isOkOr: assert false, "Failed to subscribe to topic:" & $error # Wait for subscription to take effect diff --git a/tests/node/test_wakunode_legacy_store.nim b/tests/node/test_wakunode_legacy_store.nim index 8ede3f6f2..beed3c1c6 100644 --- a/tests/node/test_wakunode_legacy_store.nim +++ b/tests/node/test_wakunode_legacy_store.nim @@ -1,11 +1,6 @@ {.used.} -import - std/options, - stew/shims/net as stewNet, - testutils/unittests, - chronos, - libp2p/crypto/crypto +import std/options, testutils/unittests, chronos, libp2p/crypto/crypto import waku/[ @@ -66,8 +61,8 @@ suite "Waku Store - End to End - Sorted Archive": serverKey = generateSecp256k1Key() clientKey = generateSecp256k1Key() - server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0)) - client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) archiveDriver = newArchiveDriverWithMessages(pubsubTopic, archiveMessages) let mountArchiveResult = server.mountLegacyArchive(archiveDriver) @@ -440,7 +435,7 @@ suite "Waku Store - End to End - Sorted Archive": newArchiveDriverWithMessages(pubsubTopic, archiveMessages) otherServerKey = generateSecp256k1Key() otherServer = - newTestWakuNode(otherServerKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + newTestWakuNode(otherServerKey, parseIpAddress("0.0.0.0"), Port(0)) mountOtherArchiveResult = otherServer.mountLegacyArchive(otherArchiveDriverWithMessages) assert mountOtherArchiveResult.isOk() @@ -522,8 +517,8 @@ suite "Waku Store - End to End - Unsorted Archive": serverKey = generateSecp256k1Key() clientKey = generateSecp256k1Key() - server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0)) - client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) let unsortedArchiveDriverWithMessages = @@ -678,8 +673,8 @@ suite "Waku Store - End to End - Archive with Multiple Topics": serverKey = generateSecp256k1Key() clientKey = generateSecp256k1Key() - server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0)) - client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) let archiveDriver = newSqliteArchiveDriver() .put(pubsubTopic, archiveMessages[0 ..< 6]) @@ -927,7 +922,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics": let ephemeralServerKey = generateSecp256k1Key() ephemeralServer = - newTestWakuNode(ephemeralServerKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + newTestWakuNode(ephemeralServerKey, parseIpAddress("0.0.0.0"), Port(0)) mountEphemeralArchiveResult = ephemeralServer.mountLegacyArchive(ephemeralArchiveDriver) assert mountEphemeralArchiveResult.isOk() @@ -970,7 +965,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics": let mixedServerKey = generateSecp256k1Key() mixedServer = - newTestWakuNode(mixedServerKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + newTestWakuNode(mixedServerKey, parseIpAddress("0.0.0.0"), Port(0)) mountMixedArchiveResult = mixedServer.mountLegacyArchive(mixedArchiveDriver) assert mountMixedArchiveResult.isOk() @@ -997,7 +992,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics": let emptyServerKey = generateSecp256k1Key() emptyServer = - newTestWakuNode(emptyServerKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + newTestWakuNode(emptyServerKey, parseIpAddress("0.0.0.0"), Port(0)) mountEmptyArchiveResult = emptyServer.mountLegacyArchive(emptyArchiveDriver) assert mountEmptyArchiveResult.isOk() @@ -1028,7 +1023,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics": let voluminousServerKey = generateSecp256k1Key() voluminousServer = - newTestWakuNode(voluminousServerKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + newTestWakuNode(voluminousServerKey, parseIpAddress("0.0.0.0"), Port(0)) mountVoluminousArchiveResult = voluminousServer.mountLegacyArchive(voluminousArchiveDriverWithMessages) assert mountVoluminousArchiveResult.isOk() diff --git a/tests/node/test_wakunode_lightpush.nim b/tests/node/test_wakunode_lightpush.nim index 8d48c8cb7..ee68710d1 100644 --- a/tests/node/test_wakunode_lightpush.nim +++ b/tests/node/test_wakunode_lightpush.nim @@ -2,7 +2,6 @@ import std/[options, tempfiles], - stew/shims/net as stewNet, testutils/unittests, chronos, std/strformat, @@ -40,8 +39,8 @@ suite "Waku Lightpush - End To End": serverKey = generateSecp256k1Key() clientKey = generateSecp256k1Key() - server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0)) - client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) await allFutures(server.start(), client.start()) await server.start() @@ -63,7 +62,7 @@ suite "Waku Lightpush - End To End": asyncTest "Via 11/WAKU2-RELAY from Relay/Full Node": # Given a light lightpush client let lightpushClient = - newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) lightpushClient.mountLightpushClient() # When the client publishes a message @@ -77,7 +76,7 @@ suite "Waku Lightpush - End To End": # Then the message is not relayed but not due to RLN assert publishResponse.isErr(), "We expect an error response" - assert (publishResponse.error.code == NO_PEERS_TO_RELAY), + assert (publishResponse.error.code == LightPushErrorCode.NO_PEERS_TO_RELAY), "incorrect error response" suite "Waku LightPush Validation Tests": @@ -94,7 +93,7 @@ suite "Waku Lightpush - End To End": check: publishResponse.isErr() - publishResponse.error.code == INVALID_MESSAGE_ERROR + publishResponse.error.code == LightPushErrorCode.INVALID_MESSAGE publishResponse.error.desc == some(fmt"Message size exceeded maximum of {DefaultMaxWakuMessageSize} bytes") @@ -123,8 +122,8 @@ suite "RLN Proofs as a Lightpush Service": serverKey = generateSecp256k1Key() clientKey = generateSecp256k1Key() - server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0)) - client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) # mount rln-relay let wakuRlnConfig = WakuRlnConfig( @@ -156,7 +155,7 @@ suite "RLN Proofs as a Lightpush Service": asyncTest "Message is published when RLN enabled": # Given a light lightpush client let lightpushClient = - newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) lightpushClient.mountLightPushClient() # When the client publishes a message @@ -169,7 +168,7 @@ suite "RLN Proofs as a Lightpush Service": # Then the message is not relayed but not due to RLN assert publishResponse.isErr(), "We expect an error response" - check publishResponse.error.code == NO_PEERS_TO_RELAY + check publishResponse.error.code == LightPushErrorCode.NO_PEERS_TO_RELAY suite "Waku Lightpush message delivery": asyncTest "lightpush message flow succeed": @@ -184,9 +183,9 @@ suite "Waku Lightpush message delivery": await allFutures(destNode.start(), bridgeNode.start(), lightNode.start()) - (await destNode.mountRelay(@[DefaultRelayShard])).isOkOr: + (await destNode.mountRelay()).isOkOr: assert false, "Failed to mount relay" - (await bridgeNode.mountRelay(@[DefaultRelayShard])).isOkOr: + (await bridgeNode.mountRelay()).isOkOr: assert false, "Failed to mount relay" await bridgeNode.mountLightPush() lightNode.mountLightPushClient() @@ -210,7 +209,7 @@ suite "Waku Lightpush message delivery": msg == message completionFutRelay.complete(true) - destNode.subscribe((kind: PubsubSub, topic: CustomPubsubTopic), some(relayHandler)).isOkOr: + destNode.subscribe((kind: PubsubSub, topic: CustomPubsubTopic), relayHandler).isOkOr: assert false, "Failed to subscribe to relay" # Wait for subscription to take effect diff --git a/tests/node/test_wakunode_peer_exchange.nim b/tests/node/test_wakunode_peer_exchange.nim index 26837869d..3996be0dc 100644 --- a/tests/node/test_wakunode_peer_exchange.nim +++ b/tests/node/test_wakunode_peer_exchange.nim @@ -5,7 +5,6 @@ import testutils/unittests, chronos, chronicles, - stew/shims/net, libp2p/switch, libp2p/peerId, libp2p/crypto/crypto, diff --git a/tests/node/test_wakunode_peer_manager.nim b/tests/node/test_wakunode_peer_manager.nim index 88fcc827f..0ef2b1a13 100644 --- a/tests/node/test_wakunode_peer_manager.nim +++ b/tests/node/test_wakunode_peer_manager.nim @@ -3,7 +3,6 @@ import os, std/[options, tables], - stew/shims/net as stewNet, testutils/unittests, chronos, # chronos/timer, @@ -32,7 +31,7 @@ const DEFAULT_PROTOCOLS: seq[string] = @["/ipfs/id/1.0.0", "/libp2p/autonat/1.0.0", "/libp2p/circuit/relay/0.2.0/hop"] let - listenIp = ValidIpAddress.init("0.0.0.0") + listenIp = parseIpAddress("0.0.0.0") listenPort = Port(0) suite "Peer Manager": diff --git a/tests/node/test_wakunode_relay_rln.nim b/tests/node/test_wakunode_relay_rln.nim index afc282d50..66866c4da 100644 --- a/tests/node/test_wakunode_relay_rln.nim +++ b/tests/node/test_wakunode_relay_rln.nim @@ -2,7 +2,6 @@ import std/[tempfiles, strutils, options], - stew/shims/net as stewNet, stew/results, testutils/unittests, chronos, @@ -121,8 +120,8 @@ suite "Waku RlnRelay - End to End - Static": serverKey = generateSecp256k1Key() clientKey = generateSecp256k1Key() - server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0)) - client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) await allFutures(server.start(), client.start()) @@ -410,8 +409,8 @@ suite "Waku RlnRelay - End to End - OnChain": serverKey = generateSecp256k1Key() clientKey = generateSecp256k1Key() - server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0)) - client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) await allFutures(server.start(), client.start()) diff --git a/tests/node/test_wakunode_sharding.nim b/tests/node/test_wakunode_sharding.nim index bdd6859b9..5b99689be 100644 --- a/tests/node/test_wakunode_sharding.nim +++ b/tests/node/test_wakunode_sharding.nim @@ -1,16 +1,10 @@ {.used.} -import - std/[options, sequtils, tempfiles], - testutils/unittests, - chronos, - chronicles, - stew/shims/net as stewNet +import std/[options, sequtils, tempfiles], testutils/unittests, chronos, chronicles import std/[sequtils, tempfiles], stew/byteutils, - stew/shims/net as stewNet, testutils/unittests, chronos, libp2p/switch, @@ -35,7 +29,7 @@ import import waku_relay/protocol const - listenIp = ValidIpAddress.init("0.0.0.0") + listenIp = parseIpAddress("0.0.0.0") listenPort = Port(0) suite "Sharding": diff --git a/tests/node/test_wakunode_store.nim b/tests/node/test_wakunode_store.nim index 622322d92..00dbfb7ee 100644 --- a/tests/node/test_wakunode_store.nim +++ b/tests/node/test_wakunode_store.nim @@ -1,11 +1,6 @@ {.used.} -import - std/[options, sequtils, sets], - stew/shims/net as stewNet, - testutils/unittests, - chronos, - libp2p/crypto/crypto +import std/[options, sequtils, sets], testutils/unittests, chronos, libp2p/crypto/crypto import waku/[ @@ -75,8 +70,8 @@ suite "Waku Store - End to End - Sorted Archive": serverKey = generateSecp256k1Key() clientKey = generateSecp256k1Key() - server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0)) - client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) archiveDriver = newArchiveDriverWithMessages(pubsubTopic, messages) let mountArchiveResult = server.mountArchive(archiveDriver) @@ -480,7 +475,7 @@ suite "Waku Store - End to End - Sorted Archive": ) otherServerKey = generateSecp256k1Key() otherServer = - newTestWakuNode(otherServerKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + newTestWakuNode(otherServerKey, parseIpAddress("0.0.0.0"), Port(0)) mountOtherArchiveResult = otherServer.mountArchive(otherArchiveDriverWithMessages) assert mountOtherArchiveResult.isOk() @@ -571,8 +566,8 @@ suite "Waku Store - End to End - Unsorted Archive": serverKey = generateSecp256k1Key() clientKey = generateSecp256k1Key() - server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0)) - client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) let unsortedArchiveDriverWithMessages = @@ -788,8 +783,8 @@ suite "Waku Store - End to End - Unsorted Archive without provided Timestamp": serverKey = generateSecp256k1Key() clientKey = generateSecp256k1Key() - server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0)) - client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) let unsortedArchiveDriverWithMessages = @@ -938,8 +933,8 @@ suite "Waku Store - End to End - Archive with Multiple Topics": serverKey = generateSecp256k1Key() clientKey = generateSecp256k1Key() - server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0)) - client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) let archiveDriver = newSqliteArchiveDriver().put(pubsubTopic, messages[0 ..< 6]).put( pubsubTopicB, messages[6 ..< 10] @@ -1189,7 +1184,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics": let ephemeralServerKey = generateSecp256k1Key() ephemeralServer = - newTestWakuNode(ephemeralServerKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + newTestWakuNode(ephemeralServerKey, parseIpAddress("0.0.0.0"), Port(0)) mountEphemeralArchiveResult = ephemeralServer.mountArchive(ephemeralArchiveDriver) assert mountEphemeralArchiveResult.isOk() @@ -1231,7 +1226,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics": let mixedServerKey = generateSecp256k1Key() mixedServer = - newTestWakuNode(mixedServerKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + newTestWakuNode(mixedServerKey, parseIpAddress("0.0.0.0"), Port(0)) mountMixedArchiveResult = mixedServer.mountArchive(mixedArchiveDriver) assert mountMixedArchiveResult.isOk() @@ -1258,7 +1253,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics": let emptyServerKey = generateSecp256k1Key() emptyServer = - newTestWakuNode(emptyServerKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + newTestWakuNode(emptyServerKey, parseIpAddress("0.0.0.0"), Port(0)) mountEmptyArchiveResult = emptyServer.mountArchive(emptyArchiveDriver) assert mountEmptyArchiveResult.isOk() @@ -1298,7 +1293,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics": let voluminousServerKey = generateSecp256k1Key() voluminousServer = - newTestWakuNode(voluminousServerKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + newTestWakuNode(voluminousServerKey, parseIpAddress("0.0.0.0"), Port(0)) mountVoluminousArchiveResult = voluminousServer.mountArchive(voluminousArchiveDriverWithMessages) assert mountVoluminousArchiveResult.isOk() diff --git a/tests/test_peer_manager.nim b/tests/test_peer_manager.nim index c38ca23b8..56ea17fc1 100644 --- a/tests/test_peer_manager.nim +++ b/tests/test_peer_manager.nim @@ -2,7 +2,6 @@ import std/[sequtils, times, sugar, net], - stew/shims/net as stewNet, testutils/unittests, chronos, json_rpc/rpcserver, @@ -40,7 +39,7 @@ procSuite "Peer Manager": asyncTest "connectPeer() works": # Create 2 nodes let nodes = toSeq(0 ..< 2).mapIt( - newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) ) await allFutures(nodes.mapIt(it.start())) @@ -59,7 +58,7 @@ procSuite "Peer Manager": asyncTest "dialPeer() works": # Create 2 nodes let nodes = toSeq(0 ..< 2).mapIt( - newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) ) await allFutures(nodes.mapIt(it.start())) @@ -94,7 +93,7 @@ procSuite "Peer Manager": asyncTest "dialPeer() fails gracefully": # Create 2 nodes and start them let nodes = toSeq(0 ..< 2).mapIt( - newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) ) await allFutures(nodes.mapIt(it.start())) await allFutures(nodes.mapIt(it.mountRelay())) @@ -122,8 +121,7 @@ procSuite "Peer Manager": asyncTest "Adding, selecting and filtering peers work": let - node = - newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + node = newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) # Create filter peer filterLoc = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet() @@ -156,7 +154,7 @@ procSuite "Peer Manager": asyncTest "Peer manager keeps track of connections": # Create 2 nodes let nodes = toSeq(0 ..< 2).mapIt( - newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) ) await allFutures(nodes.mapIt(it.start())) @@ -209,7 +207,7 @@ procSuite "Peer Manager": asyncTest "Peer manager updates failed peers correctly": # Create 2 nodes let nodes = toSeq(0 ..< 2).mapIt( - newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) ) await allFutures(nodes.mapIt(it.start())) @@ -311,7 +309,7 @@ procSuite "Peer Manager": # Simulate restart by initialising a new node using the same storage let node3 = newTestWakuNode( generateSecp256k1Key(), - ValidIpAddress.init("127.0.0.1"), + parseIpAddress("127.0.0.1"), Port(56037), peerStorage = storage, ) @@ -384,7 +382,7 @@ procSuite "Peer Manager": # Simulate restart by initialising a new node using the same storage let node3 = newTestWakuNode( generateSecp256k1Key(), - ValidIpAddress.init("127.0.0.1"), + parseIpAddress("127.0.0.1"), Port(56037), peerStorage = storage, ) @@ -420,26 +418,26 @@ procSuite "Peer Manager": # different network node1 = newTestWakuNode( generateSecp256k1Key(), - ValidIpAddress.init("0.0.0.0"), + parseIpAddress("0.0.0.0"), port, clusterId = 3, - shards = @[uint16(0)], + subscribeShards = @[uint16(0)], ) # same network node2 = newTestWakuNode( generateSecp256k1Key(), - ValidIpAddress.init("0.0.0.0"), + parseIpAddress("0.0.0.0"), port, clusterId = 4, - shards = @[uint16(0)], + subscribeShards = @[uint16(0)], ) node3 = newTestWakuNode( generateSecp256k1Key(), - ValidIpAddress.init("0.0.0.0"), + parseIpAddress("0.0.0.0"), port, clusterId = 4, - shards = @[uint16(0)], + subscribeShards = @[uint16(0)], ) node1.mountMetadata(3).expect("Mounted Waku Metadata") @@ -476,12 +474,12 @@ procSuite "Peer Manager": storage = WakuPeerStorage.new(database)[] node1 = newTestWakuNode( generateSecp256k1Key(), - ValidIpAddress.init("0.0.0.0"), + parseIpAddress("0.0.0.0"), Port(0), peerStorage = storage, ) node2 = - newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) peerInfo2 = node2.switch.peerInfo betaCodec = "/vac/waku/relay/2.0.0-beta2" stableCodec = "/vac/waku/relay/2.0.0" @@ -509,10 +507,7 @@ procSuite "Peer Manager": # Simulate restart by initialising a new node using the same storage let node3 = newTestWakuNode( - generateSecp256k1Key(), - ValidIpAddress.init("0.0.0.0"), - Port(0), - peerStorage = storage, + generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0), peerStorage = storage ) (await node3.mountRelay()).isOkOr: @@ -547,7 +542,7 @@ procSuite "Peer Manager": let nodes = toSeq(0 ..< 4).mapIt( newTestWakuNode( nodeKey = generateSecp256k1Key(), - bindIp = ValidIpAddress.init("0.0.0.0"), + bindIp = parseIpAddress("0.0.0.0"), bindPort = Port(0), wakuFlags = some(CapabilitiesBitfield.init(@[Relay])), ) @@ -617,7 +612,7 @@ procSuite "Peer Manager": let nodes = toSeq(0 ..< 4).mapIt( newTestWakuNode( nodeKey = generateSecp256k1Key(), - bindIp = ValidIpAddress.init("0.0.0.0"), + bindIp = parseIpAddress("0.0.0.0"), bindPort = Port(0), wakuFlags = some(CapabilitiesBitfield.init(@[Relay])), ) @@ -685,7 +680,7 @@ procSuite "Peer Manager": asyncTest "Peer store keeps track of incoming connections": # Create 4 nodes let nodes = toSeq(0 ..< 4).mapIt( - newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) ) # Start them @@ -779,8 +774,7 @@ procSuite "Peer Manager": let basePeerId = "16Uiu2HAm7QGEZKujdSbbo1aaQyfDPQ6Bw3ybQnj6fruH5Dxwd7D" let - node = - newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + node = newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) peers = toSeq(1 .. 4) .mapIt(parsePeerInfo("/ip4/0.0.0.0/tcp/30300/p2p/" & basePeerId & $it)) .filterIt(it.isOk()) @@ -819,7 +813,7 @@ procSuite "Peer Manager": asyncTest "connectedPeers() returns expected number of connections per protocol": # Create 4 nodes let nodes = toSeq(0 ..< 4).mapIt( - newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) ) # Start them with relay + filter @@ -874,7 +868,7 @@ procSuite "Peer Manager": asyncTest "getNumStreams() returns expected number of connections per protocol": # Create 2 nodes let nodes = toSeq(0 ..< 2).mapIt( - newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) ) # Start them with relay + filter @@ -1159,7 +1153,7 @@ procSuite "Peer Manager": asyncTest "colocationLimit is enforced by pruneConnsByIp()": # Create 5 nodes let nodes = toSeq(0 ..< 5).mapIt( - newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0)) + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) ) # Start them with relay + filter diff --git a/tests/test_relay_peer_exchange.nim b/tests/test_relay_peer_exchange.nim index a729ff1a7..84976bd9a 100644 --- a/tests/test_relay_peer_exchange.nim +++ b/tests/test_relay_peer_exchange.nim @@ -2,7 +2,6 @@ import std/[sequtils, options], - stew/shims/net, testutils/unittests, chronos, libp2p/peerid, @@ -23,9 +22,9 @@ procSuite "Relay (GossipSub) Peer Exchange": newTestWakuNode(node2Key, listenAddress, port, sendSignedPeerRecord = true) # When both client and server mount relay without a handler - (await node1.mountRelay(@[DefaultRelayShard])).isOkOr: + (await node1.mountRelay()).isOkOr: assert false, "Failed to mount relay" - (await node2.mountRelay(@[DefaultRelayShard], none(RoutingRecordsHandler))).isOkOr: + (await node2.mountRelay(none(RoutingRecordsHandler))).isOkOr: assert false, "Failed to mount relay" # Then the relays are mounted without a handler @@ -75,11 +74,11 @@ procSuite "Relay (GossipSub) Peer Exchange": peerExchangeHandle: RoutingRecordsHandler = peerExchangeHandler # Givem the nodes mount relay with a peer exchange handler - (await node1.mountRelay(@[DefaultRelayShard], some(emptyPeerExchangeHandle))).isOkOr: + (await node1.mountRelay(some(emptyPeerExchangeHandle))).isOkOr: assert false, "Failed to mount relay" - (await node2.mountRelay(@[DefaultRelayShard], some(emptyPeerExchangeHandle))).isOkOr: + (await node2.mountRelay(some(emptyPeerExchangeHandle))).isOkOr: assert false, "Failed to mount relay" - (await node3.mountRelay(@[DefaultRelayShard], some(peerExchangeHandle))).isOkOr: + (await node3.mountRelay(some(peerExchangeHandle))).isOkOr: assert false, "Failed to mount relay" # Ensure that node1 prunes all peers after the first connection @@ -87,6 +86,19 @@ procSuite "Relay (GossipSub) Peer Exchange": await allFutures([node1.start(), node2.start(), node3.start()]) + # The three nodes should be subscribed to the same shard + proc simpleHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + node1.subscribe((kind: PubsubSub, topic: $DefaultRelayShard), simpleHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + node2.subscribe((kind: PubsubSub, topic: $DefaultRelayShard), simpleHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + node3.subscribe((kind: PubsubSub, topic: $DefaultRelayShard), simpleHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + # When nodes are connected await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) await node3.connectToNodes(@[node1.switch.peerInfo.toRemotePeerInfo()]) diff --git a/tests/test_waku_dnsdisc.nim b/tests/test_waku_dnsdisc.nim index 7028b20eb..758bdb3ca 100644 --- a/tests/test_waku_dnsdisc.nim +++ b/tests/test_waku_dnsdisc.nim @@ -2,7 +2,6 @@ import std/[sequtils, tables], - stew/shims/net, results, stew/base32, testutils/unittests, diff --git a/tests/test_waku_keepalive.nim b/tests/test_waku_keepalive.nim index d4d05ad97..f6a9e631b 100644 --- a/tests/test_waku_keepalive.nim +++ b/tests/test_waku_keepalive.nim @@ -1,7 +1,6 @@ {.used.} import - stew/shims/net as stewNet, testutils/unittests, chronos, chronicles, @@ -45,7 +44,10 @@ suite "Waku Keepalive": await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) - node1.startKeepalive(2.seconds) + let healthMonitor = NodeHealthMonitor() + healthMonitor.setNodeToHealthMonitor(node1) + healthMonitor.startKeepalive(2.seconds).isOkOr: + assert false, "Failed to start keepalive" check: (await completionFut.withTimeout(5.seconds)) == true diff --git a/tests/test_waku_metadata.nim b/tests/test_waku_metadata.nim index aa22a43f4..b30fd1712 100644 --- a/tests/test_waku_metadata.nim +++ b/tests/test_waku_metadata.nim @@ -5,7 +5,6 @@ import testutils/unittests, chronos, chronicles, - stew/shims/net, libp2p/switch, libp2p/peerId, libp2p/crypto/crypto, diff --git a/tests/test_waku_netconfig.nim b/tests/test_waku_netconfig.nim index d2c9cc780..712fa4736 100644 --- a/tests/test_waku_netconfig.nim +++ b/tests/test_waku_netconfig.nim @@ -18,8 +18,8 @@ suite "Waku NetConfig": let wakuFlags = defaultTestWakuFlags() let netConfigRes = NetConfig.init( - bindIp = conf.networkConf.p2pListenAddress, - bindPort = conf.networkConf.p2pTcpPort, + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, extIp = none(IpAddress), extPort = none(Port), extMultiAddrs = @[], @@ -46,7 +46,8 @@ suite "Waku NetConfig": let conf = defaultTestWakuConf() let netConfigRes = NetConfig.init( - bindIp = conf.networkConf.p2pListenAddress, bindPort = conf.networkConf.p2pTcpPort + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, ) assert netConfigRes.isOk(), $netConfigRes.error @@ -57,7 +58,9 @@ suite "Waku NetConfig": netConfig.announcedAddresses.len == 1 # Only bind address should be present netConfig.announcedAddresses[0] == formatListenAddress( - ip4TcpEndPoint(conf.networkConf.p2pListenAddress, conf.networkConf.p2pTcpPort) + ip4TcpEndPoint( + conf.endpointConf.p2pListenAddress, conf.endpointConf.p2pTcpPort + ) ) asyncTest "AnnouncedAddresses contains external address if extIp/Port are provided": @@ -67,8 +70,8 @@ suite "Waku NetConfig": extPort = Port(1234) let netConfigRes = NetConfig.init( - bindIp = conf.networkConf.p2pListenAddress, - bindPort = conf.networkConf.p2pTcpPort, + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, extIp = some(extIp), extPort = some(extPort), ) @@ -88,8 +91,8 @@ suite "Waku NetConfig": extPort = Port(1234) let netConfigRes = NetConfig.init( - bindIp = conf.networkConf.p2pListenAddress, - bindPort = conf.networkConf.p2pTcpPort, + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, dns4DomainName = some(dns4DomainName), extPort = some(extPort), ) @@ -110,8 +113,8 @@ suite "Waku NetConfig": extMultiAddrs = @[ip4TcpEndPoint(extIp, extPort)] let netConfigRes = NetConfig.init( - bindIp = conf.networkConf.p2pListenAddress, - bindPort = conf.networkConf.p2pTcpPort, + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, extMultiAddrs = extMultiAddrs, ) @@ -131,8 +134,8 @@ suite "Waku NetConfig": extPort = Port(1234) let netConfigRes = NetConfig.init( - bindIp = conf.networkConf.p2pListenAddress, - bindPort = conf.networkConf.p2pTcpPort, + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, dns4DomainName = some(dns4DomainName), extIp = some(extIp), extPort = some(extPort), @@ -152,8 +155,8 @@ suite "Waku NetConfig": wssEnabled = false var netConfigRes = NetConfig.init( - bindIp = conf.networkConf.p2pListenAddress, - bindPort = conf.networkConf.p2pTcpPort, + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, wsEnabled = true, wssEnabled = wssEnabled, ) @@ -165,8 +168,9 @@ suite "Waku NetConfig": check: netConfig.announcedAddresses.len == 2 # Bind address + wsHostAddress netConfig.announcedAddresses[1] == ( - ip4TcpEndPoint(conf.networkConf.p2pListenAddress, conf.webSocketConf.get().port) & - wsFlag(wssEnabled) + ip4TcpEndPoint( + conf.endpointConf.p2pListenAddress, conf.webSocketConf.get().port + ) & wsFlag(wssEnabled) ) ## Now try the same for the case of wssEnabled = true @@ -174,8 +178,8 @@ suite "Waku NetConfig": wssEnabled = true netConfigRes = NetConfig.init( - bindIp = conf.networkConf.p2pListenAddress, - bindPort = conf.networkConf.p2pTcpPort, + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, wsEnabled = true, wssEnabled = wssEnabled, ) @@ -187,8 +191,9 @@ suite "Waku NetConfig": check: netConfig.announcedAddresses.len == 2 # Bind address + wsHostAddress netConfig.announcedAddresses[1] == ( - ip4TcpEndPoint(conf.networkConf.p2pListenAddress, conf.websocketConf.get().port) & - wsFlag(wssEnabled) + ip4TcpEndPoint( + conf.endpointConf.p2pListenAddress, conf.websocketConf.get().port + ) & wsFlag(wssEnabled) ) asyncTest "Announced WebSocket address contains external IP if provided": @@ -199,8 +204,8 @@ suite "Waku NetConfig": wssEnabled = false let netConfigRes = NetConfig.init( - bindIp = conf.networkConf.p2pListenAddress, - bindPort = conf.networkConf.p2pTcpPort, + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, extIp = some(extIp), extPort = some(extPort), wsEnabled = true, @@ -224,8 +229,8 @@ suite "Waku NetConfig": wssEnabled = false let netConfigRes = NetConfig.init( - bindIp = conf.networkConf.p2pListenAddress, - bindPort = conf.networkConf.p2pTcpPort, + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, dns4DomainName = some(dns4DomainName), extPort = some(extPort), wsEnabled = true, @@ -252,8 +257,8 @@ suite "Waku NetConfig": wssEnabled = false let netConfigRes = NetConfig.init( - bindIp = conf.networkConf.p2pListenAddress, - bindPort = conf.networkConf.p2pTcpPort, + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, dns4DomainName = some(dns4DomainName), extIp = some(extIp), extPort = some(extPort), @@ -277,7 +282,8 @@ suite "Waku NetConfig": let conf = defaultTestWakuConf() let netConfigRes = NetConfig.init( - bindIp = conf.networkConf.p2pListenAddress, bindPort = conf.networkConf.p2pTcpPort + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, ) assert netConfigRes.isOk(), $netConfigRes.error @@ -285,8 +291,8 @@ suite "Waku NetConfig": let netConfig = netConfigRes.get() check: - netConfig.enrIp.get() == conf.networkConf.p2pListenAddress - netConfig.enrPort.get() == conf.networkConf.p2pTcpPort + netConfig.enrIp.get() == conf.endpointConf.p2pListenAddress + netConfig.enrPort.get() == conf.endpointConf.p2pTcpPort asyncTest "ENR is set with extIp/Port if provided": let @@ -295,8 +301,8 @@ suite "Waku NetConfig": extPort = Port(1234) let netConfigRes = NetConfig.init( - bindIp = conf.networkConf.p2pListenAddress, - bindPort = conf.networkConf.p2pTcpPort, + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, extIp = some(extIp), extPort = some(extPort), ) @@ -316,8 +322,8 @@ suite "Waku NetConfig": extPort = Port(1234) let netConfigRes = NetConfig.init( - bindIp = conf.networkConf.p2pListenAddress, - bindPort = conf.networkConf.p2pTcpPort, + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, dns4DomainName = some(dns4DomainName), extPort = some(extPort), ) @@ -339,8 +345,8 @@ suite "Waku NetConfig": extMultiAddrs = @[(ip4TcpEndPoint(extAddIp, extAddPort) & wsFlag(wssEnabled))] var netConfigRes = NetConfig.init( - bindIp = conf.networkConf.p2pListenAddress, - bindPort = conf.networkConf.p2pTcpPort, + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, extMultiAddrs = extMultiAddrs, wsEnabled = wsEnabled, ) @@ -358,8 +364,8 @@ suite "Waku NetConfig": extMultiAddrs = @[(ip4TcpEndPoint(extAddIp, extAddPort) & wsFlag(wssEnabled))] netConfigRes = NetConfig.init( - bindIp = conf.networkConf.p2pListenAddress, - bindPort = conf.networkConf.p2pTcpPort, + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, extMultiAddrs = extMultiAddrs, wssEnabled = wssEnabled, ) @@ -380,8 +386,8 @@ suite "Waku NetConfig": extMultiAddrs = @[ip4TcpEndPoint(extAddIp, extAddPort)] let netConfigRes = NetConfig.init( - bindIp = conf.networkConf.p2pListenAddress, - bindPort = conf.networkConf.p2pTcpPort, + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, extMultiAddrs = extMultiAddrs, extMultiAddrsOnly = true, ) diff --git a/tests/test_waku_switch.nim b/tests/test_waku_switch.nim index e58bff12e..3e6fd08eb 100644 --- a/tests/test_waku_switch.nim +++ b/tests/test_waku_switch.nim @@ -75,8 +75,15 @@ suite "Waku Switch": completionFut = newFuture[bool]() proto = new LPProtocol proto.codec = customProtoCodec - proto.handler = proc(conn: Connection, proto: string) {.async.} = - assert (await conn.readLp(1024)) == msg.toBytes() + proto.handler = proc( + conn: Connection, proto: string + ) {.async: (raises: [CancelledError]).} = + try: + assert (await conn.readLp(1024)) == msg.toBytes() + except LPStreamError: + error "Connection read error", error = getCurrentExceptionMsg() + assert false, getCurrentExceptionMsg() + completionFut.complete(true) await proto.start() diff --git a/tests/test_wakunode.nim b/tests/test_wakunode.nim index 51dd999b0..a7f1084fb 100644 --- a/tests/test_wakunode.nim +++ b/tests/test_wakunode.nim @@ -3,7 +3,6 @@ import std/[sequtils, strutils, net], stew/byteutils, - stew/shims/net as stewNet, testutils/unittests, chronicles, chronos, @@ -15,7 +14,8 @@ import libp2p/protocols/pubsub/pubsub, libp2p/protocols/pubsub/gossipsub, libp2p/nameresolving/mockresolver, - eth/p2p/discoveryv5/enr + eth/p2p/discoveryv5/enr, + eth/net/utils import waku/[waku_core, waku_node, node/peer_manager], ./testlib/wakucore, ./testlib/wakunode @@ -34,14 +34,14 @@ suite "WakuNode": # Setup node 1 with stable codec "/vac/waku/relay/2.0.0" await node1.start() - (await node1.mountRelay(@[shard])).isOkOr: + (await node1.mountRelay()).isOkOr: assert false, "Failed to mount relay" node1.wakuRelay.codec = "/vac/waku/relay/2.0.0" # Setup node 2 with beta codec "/vac/waku/relay/2.0.0-beta2" await node2.start() - (await node2.mountRelay(@[shard])).isOkOr: + (await node2.mountRelay()).isOkOr: assert false, "Failed to mount relay" node2.wakuRelay.codec = "/vac/waku/relay/2.0.0-beta2" @@ -69,7 +69,7 @@ suite "WakuNode": assert false, "Failed to unsubscribe from topic: " & $error ## Subscribe to the relay topic to add the custom relay handler defined above - node2.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr: + node2.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr: assert false, "Failed to subscribe to topic" await sleepAsync(2000.millis) diff --git a/tests/testlib/wakunode.nim b/tests/testlib/wakunode.nim index 87fdbcf5f..fe040534e 100644 --- a/tests/testlib/wakunode.nim +++ b/tests/testlib/wakunode.nim @@ -1,7 +1,6 @@ import std/options, results, - stew/shims/net, chronos, libp2p/switch, libp2p/builders, @@ -38,7 +37,7 @@ proc defaultTestWakuConfBuilder*(): WakuConfBuilder = builder.withRelayServiceRatio("60:40") builder.withMaxMessageSize("1024 KiB") builder.withClusterId(DefaultClusterId) - builder.withShards(@[DefaultShardId]) + builder.withSubscribeShards(@[DefaultShardId]) builder.withRelay(true) builder.withRendezvous(true) builder.storeServiceConf.withDbMigration(false) @@ -73,7 +72,7 @@ proc newTestWakuNode*( agentString = none(string), peerStoreCapacity = none(int), clusterId = DefaultClusterId, - shards = @[DefaultShardId], + subscribeShards = @[DefaultShardId], ): WakuNode = var resolvedExtIp = extIp @@ -87,7 +86,7 @@ proc newTestWakuNode*( var conf = defaultTestWakuConf() conf.clusterId = clusterId - conf.shards = shards + conf.subscribeShards = subscribeShards if dns4DomainName.isSome() and extIp.isNone(): # If there's an error resolving the IP, an exception is thrown and test fails @@ -115,7 +114,7 @@ proc newTestWakuNode*( var enrBuilder = EnrBuilder.init(nodeKey) enrBuilder.withWakuRelaySharding( - RelayShards(clusterId: conf.clusterId, shardIds: conf.shards) + RelayShards(clusterId: conf.clusterId, shardIds: conf.subscribeShards) ).isOkOr: raise newException(Defect, "Invalid record: " & $error) diff --git a/tests/waku_core/test_published_address.nim b/tests/waku_core/test_published_address.nim index 37f263ea0..9d6201a77 100644 --- a/tests/waku_core/test_published_address.nim +++ b/tests/waku_core/test_published_address.nim @@ -1,6 +1,6 @@ {.used.} -import stew/shims/net as stewNet, std/strutils, testutils/unittests +import std/[strutils, net], testutils/unittests import ../testlib/wakucore, ../testlib/wakunode suite "Waku Core - Published Address": diff --git a/tests/waku_discv5/test_waku_discv5.nim b/tests/waku_discv5/test_waku_discv5.nim index c5dd1c55e..79913ce92 100644 --- a/tests/waku_discv5/test_waku_discv5.nim +++ b/tests/waku_discv5/test_waku_discv5.nim @@ -503,7 +503,7 @@ suite "Waku Discovery v5": waku.dynamicBootstrapNodes, waku.rng, waku.conf.nodeKey, - waku.conf.networkConf.p2pListenAddress, + waku.conf.endpointConf.p2pListenAddress, waku.conf.portsShift, ) @@ -534,7 +534,7 @@ suite "Waku Discovery v5": waku.dynamicBootstrapNodes, waku.rng, waku.conf.nodeKey, - waku.conf.networkConf.p2pListenAddress, + waku.conf.endpointConf.p2pListenAddress, waku.conf.portsShift, ) diff --git a/tests/waku_discv5/utils.nim b/tests/waku_discv5/utils.nim index 422e13fd9..5a69108c5 100644 --- a/tests/waku_discv5/utils.nim +++ b/tests/waku_discv5/utils.nim @@ -1,9 +1,4 @@ -import - std/options, - stew/shims/net, - chronos, - libp2p/crypto/crypto as libp2p_keys, - eth/keys as eth_keys +import std/options, chronos, libp2p/crypto/crypto as libp2p_keys, eth/keys as eth_keys import waku/ diff --git a/tests/waku_enr/test_sharding.nim b/tests/waku_enr/test_sharding.nim index 7c65d83fb..0984b7d8d 100644 --- a/tests/waku_enr/test_sharding.nim +++ b/tests/waku_enr/test_sharding.nim @@ -2,7 +2,6 @@ import stew/results, - stew/shims/net, chronos, testutils/unittests, libp2p/crypto/crypto as libp2p_keys, diff --git a/tests/waku_enr/utils.nim b/tests/waku_enr/utils.nim index 8f79b1d8f..7302c2112 100644 --- a/tests/waku_enr/utils.nim +++ b/tests/waku_enr/utils.nim @@ -2,7 +2,6 @@ import std/options, sequtils, results, - stew/shims/net, chronos, libp2p/crypto/crypto as libp2p_keys, eth/keys as eth_keys diff --git a/tests/waku_lightpush/lightpush_utils.nim b/tests/waku_lightpush/lightpush_utils.nim index 0dde4140d..160e5d060 100644 --- a/tests/waku_lightpush/lightpush_utils.nim +++ b/tests/waku_lightpush/lightpush_utils.nim @@ -20,8 +20,10 @@ proc newTestWakuLightpushNode*( ): Future[WakuLightPush] {.async.} = let peerManager = PeerManager.new(switch) - wakuSharding = Sharding(clusterId: 1, shardCountGenZero: 8) - proto = WakuLightPush.new(peerManager, rng, handler, wakuSharding, rateLimitSetting) + wakuAutoSharding = Sharding(clusterId: 1, shardCountGenZero: 8) + proto = WakuLightPush.new( + peerManager, rng, handler, some(wakuAutoSharding), rateLimitSetting + ) await proto.start() switch.mount(proto) diff --git a/tests/waku_lightpush/test_all.nim b/tests/waku_lightpush/test_all.nim index 4e4980929..b5edd72fb 100644 --- a/tests/waku_lightpush/test_all.nim +++ b/tests/waku_lightpush/test_all.nim @@ -1 +1,3 @@ +{.used.} + import ./test_client, ./test_ratelimit diff --git a/tests/waku_lightpush/test_client.nim b/tests/waku_lightpush/test_client.nim index 500d228ce..10098109a 100644 --- a/tests/waku_lightpush/test_client.nim +++ b/tests/waku_lightpush/test_client.nim @@ -53,8 +53,9 @@ suite "Waku Lightpush Client": ): Future[WakuLightPushResult] {.async.} = let msgLen = message.encode().buffer.len if msgLen > int(DefaultMaxWakuMessageSize) + 64 * 1024: - return - lighpushErrorResult(PAYLOAD_TOO_LARGE, "length greater than maxMessageSize") + return lighpushErrorResult( + LightPushErrorCode.PAYLOAD_TOO_LARGE, "length greater than maxMessageSize" + ) handlerFuture.complete((pubsubTopic, message)) # return that we published the message to 1 peer. return ok(1) @@ -294,7 +295,7 @@ suite "Waku Lightpush Client": # Then the message is not received by the server check: publishResponse5.isErr() - publishResponse5.error.code == PAYLOAD_TOO_LARGE + publishResponse5.error.code == LightPushErrorCode.PAYLOAD_TOO_LARGE (await handlerFuture.waitForResult()).isErr() asyncTest "Invalid Encoding Payload": @@ -307,7 +308,7 @@ suite "Waku Lightpush Client": # And the error is returned check: publishResponse.requestId == "N/A" - publishResponse.statusCode == LightpushStatusCode.BAD_REQUEST.uint32 + publishResponse.statusCode == LightPushErrorCode.BAD_REQUEST publishResponse.statusDesc.isSome() scanf(publishResponse.statusDesc.get(), decodeRpcFailure) @@ -320,7 +321,7 @@ suite "Waku Lightpush Client": peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage ): Future[WakuLightPushResult] {.async.} = handlerFuture2.complete() - return lighpushErrorResult(PAYLOAD_TOO_LARGE, handlerError) + return lighpushErrorResult(LightPushErrorCode.PAYLOAD_TOO_LARGE, handlerError) let serverSwitch2 = newTestSwitch() @@ -336,7 +337,7 @@ suite "Waku Lightpush Client": # Then the response is negative check: - publishResponse.error.code == PAYLOAD_TOO_LARGE + publishResponse.error.code == LightPushErrorCode.PAYLOAD_TOO_LARGE publishResponse.error.desc == some(handlerError) (await handlerFuture2.waitForResult()).isOk() diff --git a/tests/waku_lightpush/test_ratelimit.nim b/tests/waku_lightpush/test_ratelimit.nim index 0dd7913d1..b2dcdc7b5 100644 --- a/tests/waku_lightpush/test_ratelimit.nim +++ b/tests/waku_lightpush/test_ratelimit.nim @@ -119,7 +119,7 @@ suite "Rate limited push service": check: requestRes.isErr() - requestRes.error.code == TOO_MANY_REQUESTS + requestRes.error.code == LightPushErrorCode.TOO_MANY_REQUESTS requestRes.error.desc == some(TooManyRequestsMessage) for testCnt in 0 .. 2: diff --git a/tests/waku_lightpush_legacy/test_all.nim b/tests/waku_lightpush_legacy/test_all.nim index 4e4980929..b5edd72fb 100644 --- a/tests/waku_lightpush_legacy/test_all.nim +++ b/tests/waku_lightpush_legacy/test_all.nim @@ -1 +1,3 @@ +{.used.} + import ./test_client, ./test_ratelimit diff --git a/tests/waku_peer_exchange/test_all.nim b/tests/waku_peer_exchange/test_all.nim index 069de6e7b..903b47cb9 100644 --- a/tests/waku_peer_exchange/test_all.nim +++ b/tests/waku_peer_exchange/test_all.nim @@ -1 +1,3 @@ +{.used.} + import ./test_protocol, ./test_rpc_codec diff --git a/tests/waku_relay/test_message_id.nim b/tests/waku_relay/test_message_id.nim index 633303120..6dcd72ab7 100644 --- a/tests/waku_relay/test_message_id.nim +++ b/tests/waku_relay/test_message_id.nim @@ -1,7 +1,7 @@ import unittest, results, - stew/[shims/net, byteutils], + stew/byteutils, nimcrypto/sha2, libp2p/protocols/pubsub/rpc/messages diff --git a/tests/waku_relay/test_protocol.nim b/tests/waku_relay/test_protocol.nim index d0e8a7ed6..46032b693 100644 --- a/tests/waku_relay/test_protocol.nim +++ b/tests/waku_relay/test_protocol.nim @@ -2,7 +2,6 @@ import std/[options, strformat], - stew/shims/net as stewNet, testutils/unittests, chronos, libp2p/protocols/pubsub/[pubsub, gossipsub], @@ -78,7 +77,8 @@ suite "Waku Relay": asyncTest "Publish with Subscription (Network Size: 1)": # When subscribing to a Pubsub Topic - discard node.subscribe(pubsubTopic, simpleFutureHandler) + + node.subscribe(pubsubTopic, simpleFutureHandler) # Then the node is subscribed check: @@ -112,7 +112,7 @@ suite "Waku Relay": otherHandlerFuture.complete((topic, message)) # When subscribing the second node to the Pubsub Topic - discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) + otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) # Then the second node is subscribed, but not the first one check: @@ -173,8 +173,8 @@ suite "Waku Relay": otherHandlerFuture.complete((topic, message)) # When subscribing both nodes to the same Pubsub Topic - discard node.subscribe(pubsubTopic, simpleFutureHandler) - discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) + node.subscribe(pubsubTopic, simpleFutureHandler) + otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) # Then both nodes are subscribed check: @@ -229,7 +229,7 @@ suite "Waku Relay": asyncTest "Refreshing subscription": # Given a subscribed node - discard node.subscribe(pubsubTopic, simpleFutureHandler) + node.subscribe(pubsubTopic, simpleFutureHandler) check: node.isSubscribed(pubsubTopic) node.subscribedTopics == pubsubTopicSeq @@ -245,7 +245,7 @@ suite "Waku Relay": ) {.async, gcsafe.} = otherHandlerFuture.complete((topic, message)) - discard node.subscribe(pubsubTopic, otherSimpleFutureHandler) + node.subscribe(pubsubTopic, otherSimpleFutureHandler) check: node.isSubscribed(pubsubTopic) node.subscribedTopics == pubsubTopicSeq @@ -292,14 +292,14 @@ suite "Waku Relay": otherHandlerFuture.complete((topic, message)) otherNode.addValidator(len4Validator) - discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) + otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) await sleepAsync(500.millis) check: otherNode.isSubscribed(pubsubTopic) # Given a subscribed node with a validator node.addValidator(len4Validator) - discard node.subscribe(pubsubTopic, simpleFutureHandler) + node.subscribe(pubsubTopic, simpleFutureHandler) await sleepAsync(500.millis) check: node.isSubscribed(pubsubTopic) @@ -381,8 +381,8 @@ suite "Waku Relay": ) {.async, gcsafe.} = otherHandlerFuture.complete((topic, message)) - discard node.subscribe(pubsubTopic, simpleFutureHandler) - discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) + node.subscribe(pubsubTopic, simpleFutureHandler) + otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) check: node.isSubscribed(pubsubTopic) node.subscribedTopics == pubsubTopicSeq @@ -465,8 +465,8 @@ suite "Waku Relay": ) {.async, gcsafe.} = handlerFuture2.complete((topic, message)) - discard node.subscribe(pubsubTopic, simpleFutureHandler) - discard node.subscribe(pubsubTopicB, simpleFutureHandler2) + node.subscribe(pubsubTopic, simpleFutureHandler) + node.subscribe(pubsubTopicB, simpleFutureHandler2) # Given the other nodes are subscribed to two pubsub topics var otherHandlerFuture1 = newPushHandlerFuture() @@ -493,10 +493,10 @@ suite "Waku Relay": ) {.async, gcsafe.} = anotherHandlerFuture2.complete((topic, message)) - discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler1) - discard otherNode.subscribe(pubsubTopicC, otherSimpleFutureHandler2) - discard anotherNode.subscribe(pubsubTopicB, anotherSimpleFutureHandler1) - discard anotherNode.subscribe(pubsubTopicC, anotherSimpleFutureHandler2) + otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler1) + otherNode.subscribe(pubsubTopicC, otherSimpleFutureHandler2) + anotherNode.subscribe(pubsubTopicB, anotherSimpleFutureHandler1) + anotherNode.subscribe(pubsubTopicC, anotherSimpleFutureHandler2) await sleepAsync(500.millis) # When publishing a message in node for each of the pubsub topics @@ -736,15 +736,13 @@ suite "Waku Relay": otherSwitch = newTestSwitch() otherNode = await newTestWakuRelay(otherSwitch) await allFutures(otherSwitch.start(), otherNode.start()) - let otherTopicHandler: TopicHandler = - otherNode.subscribe(pubsubTopic, simpleFutureHandler) + otherNode.subscribe(pubsubTopic, simpleFutureHandler) # Given a node without a subscription check: node.subscribedTopics == [] - # When unsubscribing from a pubsub topic from an unsubscribed topic handler - node.unsubscribe(pubsubTopic, otherTopicHandler) + node.unsubscribe(pubsubTopic) # Then the node is still not subscribed check: @@ -755,11 +753,11 @@ suite "Waku Relay": asyncTest "Single Node with Single Pubsub Topic": # Given a node subscribed to a pubsub topic - let topicHandler = node.subscribe(pubsubTopic, simpleFutureHandler) + node.subscribe(pubsubTopic, simpleFutureHandler) check node.subscribedTopics == pubsubTopicSeq # When unsubscribing from the pubsub topic - node.unsubscribe(pubsubTopic, topicHandler) + node.unsubscribe(pubsubTopic) # Then the node is not subscribed anymore check node.subscribedTopics == [] @@ -769,9 +767,8 @@ suite "Waku Relay": let pubsubTopicB = "/waku/2/rs/0/1" # Given a node subscribed to multiple pubsub topics - let - topicHandler = node.subscribe(pubsubTopic, simpleFutureHandler) - topicHandlerB = node.subscribe(pubsubTopicB, simpleFutureHandler) + node.subscribe(pubsubTopic, simpleFutureHandler) + node.subscribe(pubsubTopicB, simpleFutureHandler) assert pubsubTopic in node.subscribedTopics, fmt"Node is not subscribed to {pubsubTopic}" @@ -779,13 +776,13 @@ suite "Waku Relay": fmt"Node is not subscribed to {pubsubTopicB}" # When unsubscribing from one of the pubsub topics - node.unsubscribe(pubsubTopic, topicHandler) + node.unsubscribe(pubsubTopic) # Then the node is still subscribed to the other pubsub topic check node.subscribedTopics == @[pubsubTopicB] # When unsubscribing from the other pubsub topic - node.unsubscribe(pubsubTopicB, topicHandlerB) + node.unsubscribe(pubsubTopicB) # Then the node is not subscribed anymore check node.subscribedTopics == [] @@ -803,7 +800,7 @@ suite "Waku Relay": asyncTest "Single Node with Single Pubsub Topic": # Given a node subscribed to a pubsub topic - discard node.subscribe(pubsubTopic, simpleFutureHandler) + node.subscribe(pubsubTopic, simpleFutureHandler) check node.subscribedTopics == pubsubTopicSeq # When unsubscribing from all pubsub topics @@ -817,9 +814,9 @@ suite "Waku Relay": let pubsubTopicB = "/waku/2/rs/0/1" # Given a node subscribed to multiple pubsub topics - discard node.subscribe(pubsubTopic, simpleFutureHandler) - discard node.subscribe(pubsubTopic, simpleFutureHandler) - discard node.subscribe(pubsubTopicB, simpleFutureHandler) + node.subscribe(pubsubTopic, simpleFutureHandler) + node.subscribe(pubsubTopic, simpleFutureHandler) + node.subscribe(pubsubTopicB, simpleFutureHandler) assert pubsubTopic in node.subscribedTopics, fmt"Node is not subscribed to {pubsubTopic}" @@ -856,8 +853,8 @@ suite "Waku Relay": ) {.async, gcsafe.} = otherHandlerFuture.complete((topic, message)) - discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) - discard node.subscribe(pubsubTopic, simpleFutureHandler) + otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) + node.subscribe(pubsubTopic, simpleFutureHandler) check: node.subscribedTopics == pubsubTopicSeq otherNode.subscribedTopics == pubsubTopicSeq @@ -1022,8 +1019,8 @@ suite "Waku Relay": ) {.async, gcsafe.} = otherHandlerFuture.complete((topic, message)) - discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) - discard node.subscribe(pubsubTopic, simpleFutureHandler) + otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) + node.subscribe(pubsubTopic, simpleFutureHandler) check: node.subscribedTopics == pubsubTopicSeq otherNode.subscribedTopics == pubsubTopicSeq @@ -1164,8 +1161,8 @@ suite "Waku Relay": otherMessageSeq.add((topic, message)) otherHandlerFuture.complete((topic, message)) - discard node.subscribe(pubsubTopic, thisSimpleFutureHandler) - discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) + node.subscribe(pubsubTopic, thisSimpleFutureHandler) + otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) check: node.subscribedTopics == pubsubTopicSeq otherNode.subscribedTopics == pubsubTopicSeq @@ -1238,8 +1235,8 @@ suite "Waku Relay": ) {.async, gcsafe.} = otherHandlerFuture.complete((topic, message)) - discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) - discard node.subscribe(pubsubTopic, simpleFutureHandler) + otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) + node.subscribe(pubsubTopic, simpleFutureHandler) check: node.subscribedTopics == pubsubTopicSeq otherNode.subscribedTopics == pubsubTopicSeq @@ -1333,8 +1330,8 @@ suite "Waku Relay": ) {.async, gcsafe.} = otherHandlerFuture.complete((topic, message)) - discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) - discard node.subscribe(pubsubTopic, simpleFutureHandler) + otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) + node.subscribe(pubsubTopic, simpleFutureHandler) check: node.subscribedTopics == pubsubTopicSeq otherNode.subscribedTopics == pubsubTopicSeq diff --git a/tests/waku_relay/test_wakunode_relay.nim b/tests/waku_relay/test_wakunode_relay.nim index 5d5ce8458..2b4f32617 100644 --- a/tests/waku_relay/test_wakunode_relay.nim +++ b/tests/waku_relay/test_wakunode_relay.nim @@ -3,7 +3,6 @@ import std/[os, sequtils, sysrand, math], stew/byteutils, - stew/shims/net as stewNet, testutils/unittests, chronos, libp2p/switch, @@ -71,15 +70,15 @@ suite "WakuNode - Relay": message = WakuMessage(payload: payload, contentTopic: contentTopic) await node1.start() - (await node1.mountRelay(@[shard])).isOkOr: + (await node1.mountRelay()).isOkOr: assert false, "Failed to mount relay" await node2.start() - (await node2.mountRelay(@[shard])).isOkOr: + (await node2.mountRelay()).isOkOr: assert false, "Failed to mount relay" await node3.start() - (await node3.mountRelay(@[shard])).isOkOr: + (await node3.mountRelay()).isOkOr: assert false, "Failed to mount relay" await allFutures( @@ -98,13 +97,19 @@ suite "WakuNode - Relay": msg.timestamp > 0 completionFut.complete(true) - ## The following unsubscription is necessary to remove the default relay handler, which is - ## added when mountRelay is called. - node3.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr: - assert false, "Failed to unsubscribe from topic: " & $error + proc simpleHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + ## node1 and node2 explicitly subscribe to the same shard as node3 + node1.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + node2.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error ## Subscribe to the relay topic to add the custom relay handler defined above - node3.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr: + node3.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr: assert false, "Failed to subscribe to topic: " & $error await sleepAsync(500.millis) @@ -148,15 +153,15 @@ suite "WakuNode - Relay": # start all the nodes await node1.start() - (await node1.mountRelay(@[shard])).isOkOr: + (await node1.mountRelay()).isOkOr: assert false, "Failed to mount relay" await node2.start() - (await node2.mountRelay(@[shard])).isOkOr: + (await node2.mountRelay()).isOkOr: assert false, "Failed to mount relay" await node3.start() - (await node3.mountRelay(@[shard])).isOkOr: + (await node3.mountRelay()).isOkOr: assert false, "Failed to mount relay" await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) @@ -194,13 +199,19 @@ suite "WakuNode - Relay": # relay handler is called completionFut.complete(true) - ## The following unsubscription is necessary to remove the default relay handler, which is - ## added when mountRelay is called. - node3.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr: - assert false, "Failed to unsubscribe from topic: " & $error + proc simpleHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + ## node1 and node2 explicitly subscribe to the same shard as node3 + node1.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + node2.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error ## Subscribe to the relay topic to add the custom relay handler defined above - node3.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr: + node3.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr: assert false, "Failed to subscribe to topic: " & $error await sleepAsync(500.millis) @@ -288,11 +299,11 @@ suite "WakuNode - Relay": message = WakuMessage(payload: payload, contentTopic: contentTopic) await node1.start() - (await node1.mountRelay(@[shard])).isOkOr: + (await node1.mountRelay()).isOkOr: assert false, "Failed to mount relay" await node2.start() - (await node2.mountRelay(@[shard])).isOkOr: + (await node2.mountRelay()).isOkOr: assert false, "Failed to mount relay" await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) @@ -314,7 +325,7 @@ suite "WakuNode - Relay": assert false, "Failed to unsubscribe from topic: " & $error ## Subscribe to the relay topic to add the custom relay handler defined above - node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr: + node1.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr: assert false, "Failed to subscribe to topic: " & $error await sleepAsync(500.millis) @@ -346,11 +357,11 @@ suite "WakuNode - Relay": message = WakuMessage(payload: payload, contentTopic: contentTopic) await node1.start() - (await node1.mountRelay(@[shard])).isOkOr: + (await node1.mountRelay()).isOkOr: assert false, "Failed to mount relay" await node2.start() - (await node2.mountRelay(@[shard])).isOkOr: + (await node2.mountRelay()).isOkOr: assert false, "Failed to mount relay" await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) @@ -372,7 +383,7 @@ suite "WakuNode - Relay": assert false, "Failed to unsubscribe from topic: " & $error ## Subscribe to the relay topic to add the custom relay handler defined above - node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr: + node1.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr: assert false, "Failed to subscribe to topic: " & $error await sleepAsync(500.millis) @@ -404,11 +415,11 @@ suite "WakuNode - Relay": message = WakuMessage(payload: payload, contentTopic: contentTopic) await node1.start() - (await node1.mountRelay(@[shard])).isOkOr: + (await node1.mountRelay()).isOkOr: assert false, "Failed to mount relay" await node2.start() - (await node2.mountRelay(@[shard])).isOkOr: + (await node2.mountRelay()).isOkOr: assert false, "Failed to mount relay" #delete websocket peer address @@ -434,7 +445,7 @@ suite "WakuNode - Relay": assert false, "Failed to unsubscribe from topic: " & $error ## Subscribe to the relay topic to add the custom relay handler defined above - node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr: + node1.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr: assert false, "Failed to subscribe to topic: " & $error await sleepAsync(500.millis) @@ -468,11 +479,11 @@ suite "WakuNode - Relay": message = WakuMessage(payload: payload, contentTopic: contentTopic) await node1.start() - (await node1.mountRelay(@[shard])).isOkOr: + (await node1.mountRelay()).isOkOr: assert false, "Failed to mount relay" await node2.start() - (await node2.mountRelay(@[shard])).isOkOr: + (await node2.mountRelay()).isOkOr: assert false, "Failed to mount relay" await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) @@ -494,7 +505,7 @@ suite "WakuNode - Relay": assert false, "Failed to unsubscribe from topic: " & $error ## Subscribe to the relay topic to add the custom relay handler defined above - node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr: + node1.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr: assert false, "Failed to subscribe to topic: " & $error await sleepAsync(500.millis) @@ -536,11 +547,11 @@ suite "WakuNode - Relay": message = WakuMessage(payload: payload, contentTopic: contentTopic) await node1.start() - (await node1.mountRelay(@[shard])).isOkOr: + (await node1.mountRelay()).isOkOr: assert false, "Failed to mount relay" await node2.start() - (await node2.mountRelay(@[shard])).isOkOr: + (await node2.mountRelay()).isOkOr: assert false, "Failed to mount relay" await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) @@ -562,7 +573,7 @@ suite "WakuNode - Relay": assert false, "Failed to unsubscribe from topic: " & $error ## Subscribe to the relay topic to add the custom relay handler defined above - node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr: + node1.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr: assert false, "Failed to subscribe to topic: " & $error await sleepAsync(500.millis) @@ -584,10 +595,15 @@ suite "WakuNode - Relay": await allFutures(nodes.mapIt(it.start())) await allFutures(nodes.mapIt(it.mountRelay())) + proc simpleHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + # subscribe all nodes to a topic let topic = "topic" for node in nodes: - discard node.wakuRelay.subscribe(topic, nil) + node.wakuRelay.subscribe(topic, simpleHandler) await sleepAsync(500.millis) # connect nodes in full mesh @@ -632,7 +648,7 @@ suite "WakuNode - Relay": # Stop all nodes await allFutures(nodes.mapIt(it.stop())) - asyncTest "Only one subscription is allowed for contenttopics that generate the same shard": + asyncTest "Multiple subscription calls are allowed for contenttopics that generate the same shard": ## Setup let nodeKey = generateSecp256k1Key() @@ -641,7 +657,7 @@ suite "WakuNode - Relay": await node.start() (await node.mountRelay()).isOkOr: assert false, "Failed to mount relay" - require node.mountSharding(1, 1).isOk + require node.mountAutoSharding(1, 1).isOk ## Given let @@ -654,27 +670,35 @@ suite "WakuNode - Relay": ): Future[void] {.gcsafe, raises: [Defect].} = discard pubsubTopic discard message - assert shard == node.wakuSharding.getShard(contentTopicA).expect("Valid Topic"), + assert shard == + node.wakuAutoSharding.get().getShard(contentTopicA).expect("Valid Topic"), "topic must use the same shard" - assert shard == node.wakuSharding.getShard(contentTopicB).expect("Valid Topic"), + assert shard == + node.wakuAutoSharding.get().getShard(contentTopicB).expect("Valid Topic"), "topic must use the same shard" - assert shard == node.wakuSharding.getShard(contentTopicC).expect("Valid Topic"), + assert shard == + node.wakuAutoSharding.get().getShard(contentTopicC).expect("Valid Topic"), "topic must use the same shard" ## When - node.subscribe((kind: ContentSub, topic: contentTopicA), some(handler)).isOkOr: + node.subscribe((kind: ContentSub, topic: contentTopicA), handler).isOkOr: assert false, "Failed to subscribe to topic: " & $error - node.subscribe((kind: ContentSub, topic: contentTopicB), some(handler)).isErrOr: + node.subscribe((kind: ContentSub, topic: contentTopicB), handler).isOkOr: assert false, - "The subscription should fail because is already subscribe to that shard" - node.subscribe((kind: ContentSub, topic: contentTopicC), some(handler)).isErrOr: + "The subscription call shouldn't error even though it's already subscribed to that shard" + node.subscribe((kind: ContentSub, topic: contentTopicC), handler).isOkOr: assert false, - "The subscription should fail because is already subscribe to that shard" + "The subscription call shouldn't error even though it's already subscribed to that shard" + + ## The node should be subscribed to the shard + check node.wakuRelay.isSubscribed(shard) ## Then node.unsubscribe((kind: ContentUnsub, topic: contentTopicB)).isOkOr: assert false, "Failed to unsubscribe to topic: " & $error - check node.wakuRelay.isSubscribed(shard) + + ## After unsubcription, the node should not be subscribed to the shard anymore + check not node.wakuRelay.isSubscribed(shard) ## Cleanup await node.stop() diff --git a/tests/waku_relay/utils.nim b/tests/waku_relay/utils.nim index 6de28583e..81e366298 100644 --- a/tests/waku_relay/utils.nim +++ b/tests/waku_relay/utils.nim @@ -3,7 +3,6 @@ import std/[strutils, sequtils, tempfiles], stew/byteutils, - stew/shims/net as stewNet, chronos, chronicles, libp2p/switch, @@ -61,7 +60,7 @@ proc subscribeToContentTopicWithHandler*( if topic == topic: completionFut.complete(true) - (node.subscribe((kind: ContentSub, topic: contentTopic), some(relayHandler))).isOkOr: + (node.subscribe((kind: ContentSub, topic: contentTopic), relayHandler)).isOkOr: error "Failed to subscribe to content topic", error completionFut.complete(true) return completionFut @@ -74,7 +73,7 @@ proc subscribeCompletionHandler*(node: WakuNode, pubsubTopic: string): Future[bo if topic == pubsubTopic: completionFut.complete(true) - (node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(relayHandler))).isOkOr: + (node.subscribe((kind: PubsubSub, topic: pubsubTopic), relayHandler)).isOkOr: error "Failed to subscribe to pubsub topic", error completionFut.complete(false) return completionFut diff --git a/tests/waku_rln_relay/rln/test_wrappers.nim b/tests/waku_rln_relay/rln/test_wrappers.nim index 26e18f9da..f19599e4f 100644 --- a/tests/waku_rln_relay/rln/test_wrappers.nim +++ b/tests/waku_rln_relay/rln/test_wrappers.nim @@ -3,7 +3,6 @@ import testutils/unittests, chronicles, chronos, - stew/shims/net as stewNet, eth/keys, bearssl, stew/[results], diff --git a/tests/waku_rln_relay/test_rln_group_manager_onchain.nim b/tests/waku_rln_relay/test_rln_group_manager_onchain.nim index 7ba64e39b..3de48a738 100644 --- a/tests/waku_rln_relay/test_rln_group_manager_onchain.nim +++ b/tests/waku_rln_relay/test_rln_group_manager_onchain.nim @@ -3,7 +3,7 @@ {.push raises: [].} import - std/[options, sequtils, deques, random], + std/[options, sequtils, deques, random, locks], results, stew/byteutils, testutils/unittests, @@ -28,116 +28,121 @@ import ../testlib/wakucore, ./utils_onchain +var testLock: Lock +initLock(testLock) + suite "Onchain group manager": - # We run Anvil - let runAnvil {.used.} = runAnvil() + setup: + # Acquire lock to ensure tests run sequentially + acquire(testLock) - var manager {.threadvar.}: OnchainGroupManager + let runAnvil {.used.} = runAnvil() - asyncSetup: - manager = await setupOnchainGroupManager() + var manager {.threadvar.}: OnchainGroupManager + manager = waitFor setupOnchainGroupManager() - asyncTeardown: - await manager.stop() + teardown: + waitFor manager.stop() + stopAnvil(runAnvil) + # Release lock after test completes + release(testLock) - asyncTest "should initialize successfully": - (await manager.init()).isOkOr: + test "should initialize successfully": + (waitFor manager.init()).isOkOr: raiseAssert $error check: manager.ethRpc.isSome() manager.wakuRlnContract.isSome() manager.initialized - manager.rlnRelayMaxMessageLimit == 100 + manager.rlnRelayMaxMessageLimit == 600 - asyncTest "should error on initialization when chainId does not match": + test "should error on initialization when chainId does not match": manager.chainId = utils_onchain.CHAIN_ID + 1 - (await manager.init()).isErrOr: + (waitFor manager.init()).isErrOr: raiseAssert "Expected error when chainId does not match" - asyncTest "should initialize when chainId is set to 0": - manager.chainId = 0 - - (await manager.init()).isOkOr: - raiseAssert $error - - asyncTest "should error on initialization when loaded metadata does not match": - (await manager.init()).isOkOr: + test "should initialize when chainId is set to 0": + manager.chainId = 0x0'u256 + (waitFor manager.init()).isOkOr: raiseAssert $error + test "should error on initialization when loaded metadata does not match": + (waitFor manager.init()).isOkOr: + assert false, $error let metadataSetRes = manager.setMetadata() assert metadataSetRes.isOk(), metadataSetRes.error let metadataOpt = manager.rlnInstance.getMetadata().valueOr: - raiseAssert $error + assert false, $error + return assert metadataOpt.isSome(), "metadata is not set" let metadata = metadataOpt.get() - - assert metadata.chainId == 1337, "chainId is not equal to 1337" + assert metadata.chainId == 1234, "chainId is not equal to 1234" assert metadata.contractAddress == manager.ethContractAddress, "contractAddress is not equal to " & manager.ethContractAddress - - let differentContractAddress = await uploadRLNContract(manager.ethClientUrls[0]) + let web3 = manager.ethRpc.get() + let accounts = waitFor web3.provider.eth_accounts() + web3.defaultAccount = accounts[2] + let (privateKey, acc) = createEthAccount(web3) + let tokenAddress = (waitFor deployTestToken(privateKey, acc, web3)).valueOr: + assert false, "Failed to deploy test token contract: " & $error + return + let differentContractAddress = ( + waitFor executeForgeContractDeployScripts(privateKey, acc, web3) + ).valueOr: + assert false, "Failed to deploy RLN contract: " & $error + return # simulating a change in the contractAddress let manager2 = OnchainGroupManager( ethClientUrls: @[EthClient], ethContractAddress: $differentContractAddress, rlnInstance: manager.rlnInstance, onFatalErrorAction: proc(errStr: string) = - raiseAssert errStr + assert false, errStr , ) - let e = await manager2.init() + let e = waitFor manager2.init() (e).isErrOr: - raiseAssert "Expected error when contract address doesn't match" + assert false, "Expected error when contract address doesn't match" - echo "---" - discard "persisted data: contract address mismatch" - echo e.error - echo "---" - - asyncTest "should error if contract does not exist": + test "should error if contract does not exist": manager.ethContractAddress = "0x0000000000000000000000000000000000000000" - var triggeredError = false - try: - discard await manager.init() - except CatchableError: - triggeredError = true + (waitFor manager.init()).isErrOr: + raiseAssert "Expected error when contract address doesn't exist" - check triggeredError - - asyncTest "should error when keystore path and password are provided but file doesn't exist": + test "should error when keystore path and password are provided but file doesn't exist": manager.keystorePath = some("/inexistent/file") manager.keystorePassword = some("password") - (await manager.init()).isErrOr: + (waitFor manager.init()).isErrOr: raiseAssert "Expected error when keystore file doesn't exist" - asyncTest "trackRootChanges: start tracking roots": - (await manager.init()).isOkOr: + test "trackRootChanges: start tracking roots": + (waitFor manager.init()).isOkOr: raiseAssert $error discard manager.trackRootChanges() - asyncTest "trackRootChanges: should guard against uninitialized state": + test "trackRootChanges: should guard against uninitialized state": try: discard manager.trackRootChanges() except CatchableError: check getCurrentExceptionMsg().len == 38 - asyncTest "trackRootChanges: should sync to the state of the group": + test "trackRootChanges: should sync to the state of the group": let credentials = generateCredentials(manager.rlnInstance) - (await manager.init()).isOkOr: + (waitFor manager.init()).isOkOr: raiseAssert $error let merkleRootBefore = manager.fetchMerkleRoot() try: - await manager.register(credentials, UserMessageLimit(1)) + waitFor manager.register(credentials, UserMessageLimit(20)) except Exception, CatchableError: assert false, "exception raised: " & getCurrentExceptionMsg() - discard await withTimeout(trackRootChanges(manager), 15.seconds) + discard waitFor withTimeout(trackRootChanges(manager), 15.seconds) let merkleRootAfter = manager.fetchMerkleRoot() @@ -154,7 +159,7 @@ suite "Onchain group manager": metadata.validRoots == manager.validRoots.toSeq() merkleRootBefore != merkleRootAfter - asyncTest "trackRootChanges: should fetch history correctly": + test "trackRootChanges: should fetch history correctly": # TODO: We can't use `trackRootChanges()` directly in this test because its current implementation # relies on a busy loop rather than event-based monitoring. As a result, some root changes # may be missed, leading to inconsistent test results (i.e., it may randomly return true or false). @@ -162,15 +167,16 @@ suite "Onchain group manager": # after each registration. const credentialCount = 6 let credentials = generateCredentials(manager.rlnInstance, credentialCount) - (await manager.init()).isOkOr: + (waitFor manager.init()).isOkOr: raiseAssert $error let merkleRootBefore = manager.fetchMerkleRoot() try: for i in 0 ..< credentials.len(): - await manager.register(credentials[i], UserMessageLimit(1)) - discard await manager.updateRoots() + debug "Registering credential", index = i, credential = credentials[i] + waitFor manager.register(credentials[i], UserMessageLimit(20)) + discard waitFor manager.updateRoots() except Exception, CatchableError: assert false, "exception raised: " & getCurrentExceptionMsg() @@ -180,13 +186,13 @@ suite "Onchain group manager": merkleRootBefore != merkleRootAfter manager.validRoots.len() == credentialCount - asyncTest "register: should guard against uninitialized state": + test "register: should guard against uninitialized state": let dummyCommitment = default(IDCommitment) try: - await manager.register( + waitFor manager.register( RateCommitment( - idCommitment: dummyCommitment, userMessageLimit: UserMessageLimit(1) + idCommitment: dummyCommitment, userMessageLimit: UserMessageLimit(20) ) ) except CatchableError: @@ -194,18 +200,18 @@ suite "Onchain group manager": except Exception: assert false, "exception raised: " & getCurrentExceptionMsg() - asyncTest "register: should register successfully": + test "register: should register successfully": # TODO :- similar to ```trackRootChanges: should fetch history correctly``` - (await manager.init()).isOkOr: + (waitFor manager.init()).isOkOr: raiseAssert $error let idCommitment = generateCredentials(manager.rlnInstance).idCommitment let merkleRootBefore = manager.fetchMerkleRoot() try: - await manager.register( + waitFor manager.register( RateCommitment( - idCommitment: idCommitment, userMessageLimit: UserMessageLimit(1) + idCommitment: idCommitment, userMessageLimit: UserMessageLimit(20) ) ) except Exception, CatchableError: @@ -218,47 +224,47 @@ suite "Onchain group manager": merkleRootAfter != merkleRootBefore manager.latestIndex == 1 - asyncTest "register: callback is called": + test "register: callback is called": let idCredentials = generateCredentials(manager.rlnInstance) let idCommitment = idCredentials.idCommitment let fut = newFuture[void]() proc callback(registrations: seq[Membership]): Future[void] {.async.} = - let rateCommitment = getRateCommitment(idCredentials, UserMessageLimit(1)).get() + let rateCommitment = getRateCommitment(idCredentials, UserMessageLimit(20)).get() check: registrations.len == 1 registrations[0].rateCommitment == rateCommitment registrations[0].index == 0 fut.complete() - (await manager.init()).isOkOr: + (waitFor manager.init()).isOkOr: raiseAssert $error manager.onRegister(callback) try: - await manager.register( + waitFor manager.register( RateCommitment( - idCommitment: idCommitment, userMessageLimit: UserMessageLimit(1) + idCommitment: idCommitment, userMessageLimit: UserMessageLimit(20) ) ) except Exception, CatchableError: assert false, "exception raised: " & getCurrentExceptionMsg() - await fut + waitFor fut - asyncTest "withdraw: should guard against uninitialized state": + test "withdraw: should guard against uninitialized state": let idSecretHash = generateCredentials(manager.rlnInstance).idSecretHash try: - await manager.withdraw(idSecretHash) + waitFor manager.withdraw(idSecretHash) except CatchableError: assert true except Exception: assert false, "exception raised: " & getCurrentExceptionMsg() - asyncTest "validateRoot: should validate good root": + test "validateRoot: should validate good root": let idCredentials = generateCredentials(manager.rlnInstance) let idCommitment = idCredentials.idCommitment @@ -267,27 +273,27 @@ suite "Onchain group manager": proc callback(registrations: seq[Membership]): Future[void] {.async.} = if registrations.len == 1 and registrations[0].rateCommitment == - getRateCommitment(idCredentials, UserMessageLimit(1)).get() and + getRateCommitment(idCredentials, UserMessageLimit(20)).get() and registrations[0].index == 0: manager.idCredentials = some(idCredentials) fut.complete() manager.onRegister(callback) - (await manager.init()).isOkOr: + (waitFor manager.init()).isOkOr: raiseAssert $error try: - await manager.register(idCredentials, UserMessageLimit(1)) + waitFor manager.register(idCredentials, UserMessageLimit(20)) except Exception, CatchableError: assert false, "exception raised: " & getCurrentExceptionMsg() - await fut + waitFor fut - let rootUpdated = await manager.updateRoots() + let rootUpdated = waitFor manager.updateRoots() if rootUpdated: - let proofResult = await manager.fetchMerkleProofElements() + let proofResult = waitFor manager.fetchMerkleProofElements() if proofResult.isErr(): error "Failed to fetch Merkle proof", error = proofResult.error manager.merkleProofCache = proofResult.get() @@ -309,14 +315,14 @@ suite "Onchain group manager": check: validated - asyncTest "validateRoot: should reject bad root": + test "validateRoot: should reject bad root": let idCredentials = generateCredentials(manager.rlnInstance) let idCommitment = idCredentials.idCommitment - (await manager.init()).isOkOr: + (waitFor manager.init()).isOkOr: raiseAssert $error - manager.userMessageLimit = some(UserMessageLimit(1)) + manager.userMessageLimit = some(UserMessageLimit(20)) manager.membershipIndex = some(MembershipIndex(0)) manager.idCredentials = some(idCredentials) @@ -342,9 +348,9 @@ suite "Onchain group manager": check: validated == false - asyncTest "verifyProof: should verify valid proof": + test "verifyProof: should verify valid proof": let credentials = generateCredentials(manager.rlnInstance) - (await manager.init()).isOkOr: + (waitFor manager.init()).isOkOr: raiseAssert $error let fut = newFuture[void]() @@ -352,7 +358,7 @@ suite "Onchain group manager": proc callback(registrations: seq[Membership]): Future[void] {.async.} = if registrations.len == 1 and registrations[0].rateCommitment == - getRateCommitment(credentials, UserMessageLimit(1)).get() and + getRateCommitment(credentials, UserMessageLimit(20)).get() and registrations[0].index == 0: manager.idCredentials = some(credentials) fut.complete() @@ -360,15 +366,15 @@ suite "Onchain group manager": manager.onRegister(callback) try: - await manager.register(credentials, UserMessageLimit(1)) + waitFor manager.register(credentials, UserMessageLimit(20)) except Exception, CatchableError: assert false, "exception raised: " & getCurrentExceptionMsg() - await fut + waitFor fut - let rootUpdated = await manager.updateRoots() + let rootUpdated = waitFor manager.updateRoots() if rootUpdated: - let proofResult = await manager.fetchMerkleProofElements() + let proofResult = waitFor manager.fetchMerkleProofElements() if proofResult.isErr(): error "Failed to fetch Merkle proof", error = proofResult.error manager.merkleProofCache = proofResult.get() @@ -391,21 +397,21 @@ suite "Onchain group manager": check: verified - asyncTest "verifyProof: should reject invalid proof": - (await manager.init()).isOkOr: + test "verifyProof: should reject invalid proof": + (waitFor manager.init()).isOkOr: raiseAssert $error let idCredential = generateCredentials(manager.rlnInstance) try: - await manager.register(idCredential, UserMessageLimit(1)) + waitFor manager.register(idCredential, UserMessageLimit(20)) except Exception, CatchableError: assert false, "exception raised when calling startGroupSync: " & getCurrentExceptionMsg() let messageBytes = "Hello".toBytes() - let rootUpdated = await manager.updateRoots() + let rootUpdated = waitFor manager.updateRoots() manager.merkleProofCache = newSeq[byte](640) for i in 0 ..< 640: @@ -430,10 +436,10 @@ suite "Onchain group manager": check: verified == false - asyncTest "root queue should be updated correctly": + test "root queue should be updated correctly": const credentialCount = 12 let credentials = generateCredentials(manager.rlnInstance, credentialCount) - (await manager.init()).isOkOr: + (waitFor manager.init()).isOkOr: raiseAssert $error type TestBackfillFuts = array[0 .. credentialCount - 1, Future[void]] @@ -448,7 +454,7 @@ suite "Onchain group manager": proc callback(registrations: seq[Membership]): Future[void] {.async.} = if registrations.len == 1 and registrations[0].rateCommitment == - getRateCommitment(credentials[futureIndex], UserMessageLimit(1)).get() and + getRateCommitment(credentials[futureIndex], UserMessageLimit(20)).get() and registrations[0].index == MembershipIndex(futureIndex): futs[futureIndex].complete() futureIndex += 1 @@ -459,47 +465,40 @@ suite "Onchain group manager": manager.onRegister(generateCallback(futures, credentials)) for i in 0 ..< credentials.len(): - await manager.register(credentials[i], UserMessageLimit(1)) - discard await manager.updateRoots() + waitFor manager.register(credentials[i], UserMessageLimit(20)) + discard waitFor manager.updateRoots() except Exception, CatchableError: assert false, "exception raised: " & getCurrentExceptionMsg() - await allFutures(futures) + waitFor allFutures(futures) check: manager.validRoots.len() == credentialCount - asyncTest "isReady should return false if ethRpc is none": - (await manager.init()).isOkOr: + test "isReady should return false if ethRpc is none": + (waitFor manager.init()).isOkOr: raiseAssert $error manager.ethRpc = none(Web3) var isReady = true try: - isReady = await manager.isReady() + isReady = waitFor manager.isReady() except Exception, CatchableError: assert false, "exception raised: " & getCurrentExceptionMsg() check: isReady == false - asyncTest "isReady should return true if ethRpc is ready": - (await manager.init()).isOkOr: + test "isReady should return true if ethRpc is ready": + (waitFor manager.init()).isOkOr: raiseAssert $error var isReady = false try: - isReady = await manager.isReady() + isReady = waitFor manager.isReady() except Exception, CatchableError: assert false, "exception raised: " & getCurrentExceptionMsg() check: isReady == true - - ################################ - ## Terminating/removing Anvil - ################################ - - # We stop Anvil daemon - stopAnvil(runAnvil) diff --git a/tests/waku_rln_relay/test_rln_group_manager_static.nim b/tests/waku_rln_relay/test_rln_group_manager_static.nim index 5d1916f63..73dff8a8b 100644 --- a/tests/waku_rln_relay/test_rln_group_manager_static.nim +++ b/tests/waku_rln_relay/test_rln_group_manager_static.nim @@ -13,7 +13,7 @@ import waku_rln_relay/group_manager/static/group_manager, ] -import stew/shims/net, chronos, libp2p/crypto/crypto, eth/keys, dnsdisc/builder +import chronos, libp2p/crypto/crypto, eth/keys, dnsdisc/builder import std/tempfiles diff --git a/tests/waku_rln_relay/test_waku_rln_relay.nim b/tests/waku_rln_relay/test_waku_rln_relay.nim index 907b7c1b3..d09764ca2 100644 --- a/tests/waku_rln_relay/test_waku_rln_relay.nim +++ b/tests/waku_rln_relay/test_waku_rln_relay.nim @@ -1,9 +1,8 @@ {.used.} import - std/[options, os, sequtils, times, tempfiles], + std/[options, os, sequtils, tempfiles], stew/byteutils, - stew/shims/net as stewNet, testutils/unittests, chronos, chronicles, @@ -17,7 +16,10 @@ import waku_rln_relay/protocol_metrics, waku_keystore, ], - ./rln/waku_rln_relay_utils + ./rln/waku_rln_relay_utils, + ../testlib/[wakucore, futures, wakunode, testutils] + +from std/times import epochTime suite "Waku rln relay": test "key_gen Nim Wrappers": @@ -244,7 +246,7 @@ suite "Waku rln relay": .setMetadata( RlnMetadata( lastProcessedBlock: 128, - chainId: 1155511, + chainId: 1155511'u256, contractAddress: "0x9c09146844c1326c2dbc41c451766c7138f88155", ) ) @@ -262,7 +264,7 @@ suite "Waku rln relay": .setMetadata( RlnMetadata( lastProcessedBlock: 128, - chainId: 1155511, + chainId: 1155511'u256, contractAddress: "0x9c09146844c1326c2dbc41c451766c7138f88155", ) ) @@ -275,7 +277,7 @@ suite "Waku rln relay": let metadata = metadataOpt.get() check: metadata.lastProcessedBlock == 128 - metadata.chainId == 1155511 + metadata.chainId == 1155511'u256 metadata.contractAddress == "0x9c09146844c1326c2dbc41c451766c7138f88155" test "getMetadata: empty rln metadata": @@ -686,7 +688,7 @@ suite "Waku rln relay": # it is a duplicate assert isDuplicate3, "duplicate should be found" - asyncTest "validateMessageAndUpdateLog test": + asyncTest "validateMessageAndUpdateLog: against epoch gap": let index = MembershipIndex(5) let wakuRlnConfig = WakuRlnConfig( @@ -700,27 +702,31 @@ suite "Waku rln relay": let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr: raiseAssert $error - # get the current epoch time - let time = epochTime() + let time_1 = epochTime() - # create some messages from the same peer and append rln proof to them, except wm4 var - wm1 = WakuMessage(payload: "Valid message".toBytes()) + # create some messages from the same peer and append rln proof to them, except wm4 + wm1 = WakuMessage(payload: "Valid message".toBytes(), timestamp: now()) # another message in the same epoch as wm1, it will break the messaging rate limit - wm2 = WakuMessage(payload: "Spam".toBytes()) - # wm3 points to the next epoch - wm3 = WakuMessage(payload: "Valid message".toBytes()) - wm4 = WakuMessage(payload: "Invalid message".toBytes()) + wm2 = WakuMessage(payload: "Spam message".toBytes(), timestamp: now()) - wakuRlnRelay.unsafeAppendRLNProof(wm1, time).isOkOr: + await sleepAsync(1.seconds) + let time_2 = epochTime() + + var + # wm3 points to the next epoch bcz of the sleep + wm3 = WakuMessage(payload: "Valid message".toBytes(), timestamp: now()) + wm4 = WakuMessage(payload: "Invalid message".toBytes(), timestamp: now()) + + wakuRlnRelay.unsafeAppendRLNProof(wm1, time_1).isOkOr: raiseAssert $error - wakuRlnRelay.unsafeAppendRLNProof(wm2, time).isOkOr: + wakuRlnRelay.unsafeAppendRLNProof(wm2, time_1).isOkOr: raiseAssert $error - wakuRlnRelay.unsafeAppendRLNProof(wm3, time + float64(wakuRlnRelay.rlnEpochSizeSec)).isOkOr: + + wakuRlnRelay.unsafeAppendRLNProof(wm3, time_2).isOkOr: raiseAssert $error # validate messages - # validateMessage proc checks the validity of the message fields and adds it to the log (if valid) let msgValidate1 = wakuRlnRelay.validateMessageAndUpdateLog(wm1) # wm2 is published within the same Epoch as wm1 and should be found as spam @@ -736,6 +742,48 @@ suite "Waku rln relay": msgValidate3 == MessageValidationResult.Valid msgValidate4 == MessageValidationResult.Invalid + asyncTest "validateMessageAndUpdateLog: against timestamp gap": + let index = MembershipIndex(5) + + let wakuRlnConfig = WakuRlnConfig( + dynamic: false, + credIndex: some(index), + userMessageLimit: 10, + epochSizeSec: 10, + treePath: genTempPath("rln_tree", "waku_rln_relay_2"), + ) + + let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr: + raiseAssert $error + + # usually it's 20 seconds but we set it to 2 for testing purposes which make the test faster + wakuRlnRelay.rlnMaxTimestampGap = 1 + + var time = epochTime() + + var + wm1 = WakuMessage(payload: "timestamp message".toBytes(), timestamp: now()) + wm2 = WakuMessage(payload: "timestamp message".toBytes(), timestamp: now()) + + wakuRlnRelay.unsafeAppendRLNProof(wm1, time).isOkOr: + raiseAssert $error + + wakuRlnRelay.unsafeAppendRLNProof(wm2, time).isOkOr: + raiseAssert $error + + # validate the first message because it's timestamp is the same as the generated timestamp + let msgValidate1 = wakuRlnRelay.validateMessageAndUpdateLog(wm1) + + # wait for 2 seconds to make the timestamp different from generated timestamp + await sleepAsync(2.seconds) + + # invalidate the second message because it's timestamp is different from the generated timestamp + let msgValidate2 = wakuRlnRelay.validateMessageAndUpdateLog(wm2) + + check: + msgValidate1 == MessageValidationResult.Valid + msgValidate2 == MessageValidationResult.Invalid + asyncTest "validateMessageAndUpdateLog: multiple senders with same external nullifier": let index1 = MembershipIndex(5) let index2 = MembershipIndex(6) @@ -766,9 +814,11 @@ suite "Waku rln relay": # create messages from different peers and append rln proofs to them var - wm1 = WakuMessage(payload: "Valid message from sender 1".toBytes()) + wm1 = + WakuMessage(payload: "Valid message from sender 1".toBytes(), timestamp: now()) # another message in the same epoch as wm1, it will break the messaging rate limit - wm2 = WakuMessage(payload: "Valid message from sender 2".toBytes()) + wm2 = + WakuMessage(payload: "Valid message from sender 2".toBytes(), timestamp: now()) wakuRlnRelay1.appendRLNProof(wm1, time).isOkOr: raiseAssert $error diff --git a/tests/waku_rln_relay/test_wakunode_rln_relay.nim b/tests/waku_rln_relay/test_wakunode_rln_relay.nim index 3ff6923e0..8b5a47174 100644 --- a/tests/waku_rln_relay/test_wakunode_rln_relay.nim +++ b/tests/waku_rln_relay/test_wakunode_rln_relay.nim @@ -3,7 +3,6 @@ import std/[sequtils, tempfiles], stew/byteutils, - stew/shims/net as stewNet, testutils/unittests, chronicles, chronos, @@ -58,7 +57,7 @@ procSuite "WakuNode - RLN relay": # set up three nodes # node1 - (await node1.mountRelay(@[DefaultRelayShard])).isOkOr: + (await node1.mountRelay()).isOkOr: assert false, "Failed to mount relay" # mount rlnrelay in off-chain mode @@ -75,7 +74,7 @@ procSuite "WakuNode - RLN relay": await node1.start() # node 2 - (await node2.mountRelay(@[DefaultRelayShard])).isOkOr: + (await node2.mountRelay()).isOkOr: assert false, "Failed to mount relay" # mount rlnrelay in off-chain mode let wakuRlnConfig2 = WakuRlnConfig( @@ -91,7 +90,7 @@ procSuite "WakuNode - RLN relay": await node2.start() # node 3 - (await node3.mountRelay(@[DefaultRelayShard])).isOkOr: + (await node3.mountRelay()).isOkOr: assert false, "Failed to mount relay" let wakuRlnConfig3 = WakuRlnConfig( @@ -118,13 +117,18 @@ procSuite "WakuNode - RLN relay": if topic == DefaultPubsubTopic: completionFut.complete(true) - ## The following unsubscription is necessary to remove the default relay handler, which is - ## added when mountRelay is called. - node3.unsubscribe((kind: PubsubUnsub, topic: DefaultPubsubTopic)).isOkOr: - assert false, "Failed to unsubscribe from topic: " & $error + proc simpleHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + node1.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic in node1: " & $error + node2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic in node2: " & $error ## Subscribe to the relay topic to add the custom relay handler defined above - node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)).isOkOr: + node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), relayHandler).isOkOr: assert false, "Failed to subscribe to pubsub topic: " & $error await sleepAsync(2000.millis) @@ -132,7 +136,8 @@ procSuite "WakuNode - RLN relay": let payload = "Hello".toBytes() # prepare the epoch - var message = WakuMessage(payload: @payload, contentTopic: contentTopic) + var message = + WakuMessage(payload: @payload, contentTopic: contentTopic, timestamp: now()) doAssert(node1.wakuRlnRelay.unsafeAppendRLNProof(message, epochTime()).isOk()) debug "Nodes participating in the test", @@ -146,8 +151,7 @@ procSuite "WakuNode - RLN relay": discard await node1.publish(some(DefaultPubsubTopic), message) await sleepAsync(2000.millis) - check: - (await completionFut.withTimeout(10.seconds)) == true + assert (await completionFut.withTimeout(10.seconds)), "completionFut timed out" await node1.stop() await node2.stop() @@ -169,7 +173,7 @@ procSuite "WakuNode - RLN relay": ] # set up three nodes - await allFutures(nodes.mapIt(it.mountRelay(shards))) + await allFutures(nodes.mapIt(it.mountRelay())) # mount rlnrelay in off-chain mode for index, node in nodes: @@ -201,17 +205,20 @@ procSuite "WakuNode - RLN relay": elif topic == $shards[1]: rxMessagesTopic2 = rxMessagesTopic2 + 1 - ## This unsubscription is necessary to remove the default relay handler, which is - ## added when mountRelay is called. - nodes[2].unsubscribe((kind: PubsubUnsub, topic: $shards[0])).isOkOr: - assert false, "Failed to unsubscribe to pubsub topic: " & $error - nodes[2].unsubscribe((kind: PubsubUnsub, topic: $shards[1])).isOkOr: - assert false, "Failed to unsubscribe to pubsub topic: " & $error + proc simpleHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + nodes[0].subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic in nodes[0]: " & $error + nodes[1].subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic in nodes[1]: " & $error # mount the relay handlers - nodes[2].subscribe((kind: PubsubSub, topic: $shards[0]), some(relayHandler)).isOkOr: + nodes[2].subscribe((kind: PubsubSub, topic: $shards[0]), relayHandler).isOkOr: assert false, "Failed to subscribe to pubsub topic: " & $error - nodes[2].subscribe((kind: PubsubSub, topic: $shards[1]), some(relayHandler)).isOkOr: + nodes[2].subscribe((kind: PubsubSub, topic: $shards[1]), relayHandler).isOkOr: assert false, "Failed to subscribe to pubsub topic: " & $error await sleepAsync(1000.millis) @@ -221,19 +228,25 @@ procSuite "WakuNode - RLN relay": var messages1: seq[WakuMessage] = @[] var messages2: seq[WakuMessage] = @[] - let epochTime = epochTime() + var epochTime = epochTime() for i in 0 ..< 3: var message = WakuMessage( - payload: ("Payload_" & $i).toBytes(), contentTopic: contentTopics[0] + payload: ("Payload_" & $i).toBytes(), + timestamp: now(), + contentTopic: contentTopics[0], ) nodes[0].wakuRlnRelay.unsafeAppendRLNProof(message, epochTime).isOkOr: raiseAssert $error messages1.add(message) + epochTime = epochTime() + for i in 0 ..< 3: var message = WakuMessage( - payload: ("Payload_" & $i).toBytes(), contentTopic: contentTopics[1] + payload: ("Payload_" & $i).toBytes(), + timestamp: now(), + contentTopic: contentTopics[1], ) nodes[1].wakuRlnRelay.unsafeAppendRLNProof(message, epochTime).isOkOr: raiseAssert $error @@ -273,7 +286,7 @@ procSuite "WakuNode - RLN relay": # set up three nodes # node1 - (await node1.mountRelay(@[DefaultRelayShard])).isOkOr: + (await node1.mountRelay()).isOkOr: assert false, "Failed to mount relay" # mount rlnrelay in off-chain mode @@ -290,7 +303,7 @@ procSuite "WakuNode - RLN relay": await node1.start() # node 2 - (await node2.mountRelay(@[DefaultRelayShard])).isOkOr: + (await node2.mountRelay()).isOkOr: assert false, "Failed to mount relay" # mount rlnrelay in off-chain mode let wakuRlnConfig2 = WakuRlnConfig( @@ -306,7 +319,7 @@ procSuite "WakuNode - RLN relay": await node2.start() # node 3 - (await node3.mountRelay(@[DefaultRelayShard])).isOkOr: + (await node3.mountRelay()).isOkOr: assert false, "Failed to mount relay" let wakuRlnConfig3 = WakuRlnConfig( @@ -333,13 +346,18 @@ procSuite "WakuNode - RLN relay": if topic == DefaultPubsubTopic: completionFut.complete(true) - ## The following unsubscription is necessary to remove the default relay handler, which is - ## added when mountRelay is called. - node3.unsubscribe((kind: PubsubUnsub, topic: DefaultPubsubTopic)).isOkOr: - assert false, "Failed to unsubscribe to pubsub topic: " & $error + proc simpleHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + node1.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic in node1: " & $error + node2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic in node2: " & $error # mount the relay handler - node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)).isOkOr: + node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), relayHandler).isOkOr: assert false, "Failed to subscribe to pubsub topic: " & $error await sleepAsync(2000.millis) @@ -364,8 +382,12 @@ procSuite "WakuNode - RLN relay": # check the proof is generated correctly outside when block to avoid duplication let rateLimitProof = rateLimitProofRes.get().encode().buffer - let message = - WakuMessage(payload: @payload, contentTopic: contentTopic, proof: rateLimitProof) + let message = WakuMessage( + payload: @payload, + contentTopic: contentTopic, + proof: rateLimitProof, + timestamp: now(), + ) ## node1 publishes a message with an invalid rln proof, the message is then relayed to node2 which in turn ## attempts to verify the rate limit proof and fails hence does not relay the message to node3, thus the relayHandler of node3 @@ -398,7 +420,7 @@ procSuite "WakuNode - RLN relay": # set up three nodes # node1 - (await node1.mountRelay(@[DefaultRelayShard])).isOkOr: + (await node1.mountRelay()).isOkOr: assert false, "Failed to mount relay" # mount rlnrelay in off-chain mode @@ -415,7 +437,7 @@ procSuite "WakuNode - RLN relay": await node1.start() # node 2 - (await node2.mountRelay(@[DefaultRelayShard])).isOkOr: + (await node2.mountRelay()).isOkOr: assert false, "Failed to mount relay" # mount rlnrelay in off-chain mode @@ -431,7 +453,7 @@ procSuite "WakuNode - RLN relay": await node2.start() # node 3 - (await node3.mountRelay(@[DefaultRelayShard])).isOkOr: + (await node3.mountRelay()).isOkOr: assert false, "Failed to mount relay" # mount rlnrelay in off-chain mode @@ -452,24 +474,36 @@ procSuite "WakuNode - RLN relay": await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) # get the current epoch time - let time = epochTime() + let time_1 = epochTime() + # create some messages with rate limit proofs var - wm1 = WakuMessage(payload: "message 1".toBytes(), contentTopic: contentTopic) + wm1 = WakuMessage( + payload: "message 1".toBytes(), timestamp: now(), contentTopic: contentTopic + ) # another message in the same epoch as wm1, it will break the messaging rate limit - wm2 = WakuMessage(payload: "message 2".toBytes(), contentTopic: contentTopic) + wm2 = WakuMessage( + payload: "message 2".toBytes(), timestamp: now(), contentTopic: contentTopic + ) # wm3 points to the next epoch - wm3 = WakuMessage(payload: "message 3".toBytes(), contentTopic: contentTopic) - wm4 = WakuMessage(payload: "message 4".toBytes(), contentTopic: contentTopic) - node3.wakuRlnRelay.unsafeAppendRLNProof(wm1, time).isOkOr: + await sleepAsync(1000.millis) + let time_2 = epochTime() + + var + wm3 = WakuMessage( + payload: "message 3".toBytes(), timestamp: now(), contentTopic: contentTopic + ) + wm4 = WakuMessage( + payload: "message 4".toBytes(), timestamp: now(), contentTopic: contentTopic + ) + + node3.wakuRlnRelay.unsafeAppendRLNProof(wm1, time_1).isOkOr: raiseAssert $error - node3.wakuRlnRelay.unsafeAppendRLNProof(wm2, time).isOkOr: + node3.wakuRlnRelay.unsafeAppendRLNProof(wm2, time_1).isOkOr: raiseAssert $error - node3.wakuRlnRelay.unsafeAppendRLNProof( - wm3, time + float64(node3.wakuRlnRelay.rlnEpochSizeSec) - ).isOkOr: + node3.wakuRlnRelay.unsafeAppendRLNProof(wm3, time_2).isOkOr: raiseAssert $error # relay handler for node3 @@ -491,13 +525,18 @@ procSuite "WakuNode - RLN relay": if msg.payload == wm4.payload: completionFut4.complete(true) - ## The following unsubscription is necessary to remove the default relay handler, which is - ## added when mountRelay is called. - node3.unsubscribe((kind: PubsubUnsub, topic: DefaultPubsubTopic)).isOkOr: - assert false, "Failed to unsubscribe to pubsub topic: " & $error + proc simpleHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + node1.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic in node1: " & $error + node2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic in node2: " & $error # mount the relay handler for node3 - node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)).isOkOr: + node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), relayHandler).isOkOr: assert false, "Failed to subscribe to pubsub topic: " & $error await sleepAsync(2000.millis) @@ -540,14 +579,14 @@ procSuite "WakuNode - RLN relay": epochSizeSec: uint64 = 5 # This means rlnMaxEpochGap = 4 # Given both nodes mount relay and rlnrelay - (await node1.mountRelay(shardSeq)).isOkOr: + (await node1.mountRelay()).isOkOr: assert false, "Failed to mount relay" let wakuRlnConfig1 = buildWakuRlnConfig(1, epochSizeSec, "wakunode_10") (await node1.mountRlnRelay(wakuRlnConfig1)).isOkOr: assert false, "Failed to mount rlnrelay" # Mount rlnrelay in node2 in off-chain mode - (await node2.mountRelay(@[DefaultRelayShard])).isOkOr: + (await node2.mountRelay()).isOkOr: assert false, "Failed to mount relay" let wakuRlnConfig2 = buildWakuRlnConfig(2, epochSizeSec, "wakunode_11") await node2.mountRlnRelay(wakuRlnConfig2) @@ -591,7 +630,7 @@ procSuite "WakuNode - RLN relay": if msg == wm6: completionFut6.complete(true) - node2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)).isOkOr: + node2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), relayHandler).isOkOr: assert false, "Failed to subscribe to pubsub topic: " & $error # Given all messages have an rln proof and are published by the node 1 @@ -682,17 +721,27 @@ procSuite "WakuNode - RLN relay": # Given both nodes mount relay and rlnrelay # Mount rlnrelay in node1 in off-chain mode - (await node1.mountRelay(shardSeq)).isOkOr: + (await node1.mountRelay()).isOkOr: assert false, "Failed to mount relay" let wakuRlnConfig1 = buildWakuRlnConfig(1, epochSizeSec, "wakunode_10") await node1.mountRlnRelay(wakuRlnConfig1) # Mount rlnrelay in node2 in off-chain mode - (await node2.mountRelay(@[DefaultRelayShard])).isOkOr: + (await node2.mountRelay()).isOkOr: assert false, "Failed to mount relay" let wakuRlnConfig2 = buildWakuRlnConfig(2, epochSizeSec, "wakunode_11") await node2.mountRlnRelay(wakuRlnConfig2) + proc simpleHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + node1.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic in node2: " & $error + node2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic in node1: " & $error + # Given the two nodes are started and connected waitFor allFutures(node1.start(), node2.start()) await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) @@ -700,8 +749,12 @@ procSuite "WakuNode - RLN relay": # Given some messages with rln proofs let time = epochTime() var - msg1 = WakuMessage(payload: "message 1".toBytes(), contentTopic: contentTopic) - msg2 = WakuMessage(payload: "message 2".toBytes(), contentTopic: contentTopic) + msg1 = WakuMessage( + payload: "message 1".toBytes(), timestamp: now(), contentTopic: contentTopic + ) + msg2 = WakuMessage( + payload: "message 2".toBytes(), timestamp: now(), contentTopic: contentTopic + ) node1.wakuRlnRelay.unsafeAppendRLNProof(msg1, time).isOkOr: raiseAssert $error diff --git a/tests/waku_rln_relay/utils_onchain.nim b/tests/waku_rln_relay/utils_onchain.nim index 433f865c4..9066b0292 100644 --- a/tests/waku_rln_relay/utils_onchain.nim +++ b/tests/waku_rln_relay/utils_onchain.nim @@ -13,6 +13,7 @@ import web3, web3/conversions, web3/eth_api_types, + json_rpc/rpcclient, json, libp2p/crypto/crypto, eth/keys, @@ -29,7 +30,7 @@ import ../testlib/common, ./utils -const CHAIN_ID* = 1337 +const CHAIN_ID* = 1234'u256 template skip0xPrefix(hexStr: string): int = ## Returns the index of the first meaningful char in `hexStr` by skipping @@ -60,63 +61,347 @@ proc generateCredentials*(rlnInstance: ptr RLN, n: int): seq[IdentityCredential] credentials.add(generateCredentials(rlnInstance)) return credentials -# a util function used for testing purposes -# it deploys membership contract on Anvil (or any Eth client available on EthClient address) -# must be edited if used for a different contract than membership contract -# -proc uploadRLNContract*(ethClientAddress: string): Future[Address] {.async.} = - let web3 = await newWeb3(ethClientAddress) - debug "web3 connected to", ethClientAddress +proc getContractAddressFromDeployScriptOutput(output: string): Result[string, string] = + const searchStr = "Return ==\n0: address " + const addressLength = 42 # Length of an Ethereum address in hex format + let idx = output.find(searchStr) + if idx >= 0: + let startPos = idx + searchStr.len + let endPos = output.find('\n', startPos) + if (endPos - startPos) >= addressLength: + let address = output[startPos ..< endPos] + return ok(address) + return err("Unable to find contract address in deploy script output") - # fetch the list of registered accounts - let accounts = await web3.provider.eth_accounts() - web3.defaultAccount = accounts[1] - let add = web3.defaultAccount - debug "contract deployer account address ", add +proc getForgePath(): string = + var forgePath = "" + if existsEnv("XDG_CONFIG_HOME"): + forgePath = joinPath(forgePath, os.getEnv("XDG_CONFIG_HOME", "")) + else: + forgePath = joinPath(forgePath, os.getEnv("HOME", "")) + forgePath = joinPath(forgePath, ".foundry/bin/forge") + return $forgePath - let balance = await web3.provider.eth_getBalance(web3.defaultAccount, "latest") - debug "Initial account balance: ", balance +contract(ERC20Token): + proc allowance(owner: Address, spender: Address): UInt256 {.view.} + proc balanceOf(account: Address): UInt256 {.view.} - # deploy poseidon hasher bytecode - let poseidonT3Receipt = await web3.deployContract(PoseidonT3) - let poseidonT3Address = poseidonT3Receipt.contractAddress.get() - let poseidonAddressStripped = strip0xPrefix($poseidonT3Address) +proc getTokenBalance( + web3: Web3, tokenAddress: Address, account: Address +): Future[UInt256] {.async.} = + let token = web3.contractSender(ERC20Token, tokenAddress) + return await token.balanceOf(account).call() - # deploy lazy imt bytecode - let lazyImtReceipt = await web3.deployContract( - LazyIMT.replace("__$PoseidonT3$__", poseidonAddressStripped) - ) - let lazyImtAddress = lazyImtReceipt.contractAddress.get() - let lazyImtAddressStripped = strip0xPrefix($lazyImtAddress) +proc ethToWei(eth: UInt256): UInt256 = + eth * 1000000000000000000.u256 - # deploy waku rlnv2 contract - let wakuRlnContractReceipt = await web3.deployContract( - WakuRlnV2Contract.replace("__$PoseidonT3$__", poseidonAddressStripped).replace( - "__$LazyIMT$__", lazyImtAddressStripped +proc sendMintCall( + web3: Web3, + accountFrom: Address, + tokenAddress: Address, + recipientAddress: Address, + amountTokens: UInt256, + recipientBalanceBeforeExpectedTokens: Option[UInt256] = none(UInt256), +): Future[TxHash] {.async.} = + let doBalanceAssert = recipientBalanceBeforeExpectedTokens.isSome() + + if doBalanceAssert: + let balanceBeforeMint = await getTokenBalance(web3, tokenAddress, recipientAddress) + let balanceBeforeExpectedTokens = recipientBalanceBeforeExpectedTokens.get() + assert balanceBeforeMint == balanceBeforeExpectedTokens, + fmt"Balance is {balanceBeforeMint} before minting but expected {balanceBeforeExpectedTokens}" + + # Create mint transaction + # Method ID for mint(address,uint256) is 0x40c10f19 which is part of the openzeppelin ERC20 standard + # The method ID for a deployed test token can be viewed here https://sepolia.lineascan.build/address/0x185A0015aC462a0aECb81beCc0497b649a64B9ea#writeContract + let mintSelector = "0x40c10f19" + let addressHex = recipientAddress.toHex() + # Pad the address and amount to 32 bytes each + let paddedAddress = addressHex.align(64, '0') + + let amountHex = amountTokens.toHex() + let amountWithout0x = + if amountHex.toLower().startsWith("0x"): + amountHex[2 .. ^1] + else: + amountHex + let paddedAmount = amountWithout0x.align(64, '0') + let mintCallData = mintSelector & paddedAddress & paddedAmount + let gasPrice = int(await web3.provider.eth_gasPrice()) + + # Create the transaction + var tx: TransactionArgs + tx.`from` = Opt.some(accountFrom) + tx.to = Opt.some(tokenAddress) + tx.value = Opt.some(0.u256) # No ETH is sent for token operations + tx.gasPrice = Opt.some(Quantity(gasPrice)) + tx.data = Opt.some(byteutils.hexToSeqByte(mintCallData)) + + trace "Sending mint call" + let txHash = await web3.send(tx) + + let balanceOfSelector = "0x70a08231" + let balanceCallData = balanceOfSelector & paddedAddress + + # Wait a bit for transaction to be mined + await sleepAsync(500.milliseconds) + + if doBalanceAssert: + let balanceAfterMint = await getTokenBalance(web3, tokenAddress, recipientAddress) + let balanceAfterExpectedTokens = + recipientBalanceBeforeExpectedTokens.get() + amountTokens + assert balanceAfterMint == balanceAfterExpectedTokens, + fmt"Balance is {balanceAfterMint} after transfer but expected {balanceAfterExpectedTokens}" + + return txHash + +# Check how many tokens a spender (the RLN contract) is allowed to spend on behalf of the owner (account which wishes to register a membership) +proc checkTokenAllowance( + web3: Web3, tokenAddress: Address, owner: Address, spender: Address +): Future[UInt256] {.async.} = + let token = web3.contractSender(ERC20Token, tokenAddress) + let allowance = await token.allowance(owner, spender).call() + trace "Current allowance", owner = owner, spender = spender, allowance = allowance + return allowance + +proc setupContractDeployment( + forgePath: string, submodulePath: string +): Result[void, string] = + trace "Contract deployer paths", forgePath = forgePath, submodulePath = submodulePath + # Build the Foundry project + try: + let (forgeCleanOutput, forgeCleanExitCode) = + execCmdEx(fmt"""cd {submodulePath} && {forgePath} clean""") + trace "Executed forge clean command", output = forgeCleanOutput + if forgeCleanExitCode != 0: + return err("forge clean command failed") + + let (forgeInstallOutput, forgeInstallExitCode) = + execCmdEx(fmt"""cd {submodulePath} && {forgePath} install""") + trace "Executed forge install command", output = forgeInstallOutput + if forgeInstallExitCode != 0: + return err("forge install command failed") + + let (pnpmInstallOutput, pnpmInstallExitCode) = + execCmdEx(fmt"""cd {submodulePath} && pnpm install""") + trace "Executed pnpm install command", output = pnpmInstallOutput + if pnpmInstallExitCode != 0: + return err("pnpm install command failed" & pnpmInstallOutput) + + let (forgeBuildOutput, forgeBuildExitCode) = + execCmdEx(fmt"""cd {submodulePath} && {forgePath} build""") + trace "Executed forge build command", output = forgeBuildOutput + if forgeBuildExitCode != 0: + return err("forge build command failed") + + # Set the environment variable API keys to anything for local testnet deployment + putEnv("API_KEY_CARDONA", "123") + putEnv("API_KEY_LINEASCAN", "123") + putEnv("API_KEY_ETHERSCAN", "123") + except OSError, IOError: + return err("Command execution failed: " & getCurrentExceptionMsg()) + return ok() + +proc deployTestToken*( + pk: keys.PrivateKey, acc: Address, web3: Web3 +): Future[Result[Address, string]] {.async.} = + ## Executes a Foundry forge script that deploys the a token contract (ERC-20) used for testing. This is a prerequisite to enable the contract deployment and this token contract address needs to be minted and approved for the accounts that need to register memberships with the contract + ## submodulePath: path to the submodule containing contract deploy scripts + + # All RLN related tests should be run from the root directory of the project + let submodulePath = absolutePath("./vendor/waku-rlnv2-contract") + + # Verify submodule path exists + if not dirExists(submodulePath): + error "Submodule path does not exist", submodulePath = submodulePath + return err("Submodule path does not exist: " & submodulePath) + + let forgePath = getForgePath() + + setupContractDeployment(forgePath, submodulePath).isOkOr: + error "Failed to setup contract deployment", error = $error + return err("Failed to setup contract deployment: " & $error) + + # Deploy TestToken contract + let forgeCmdTestToken = + fmt"""cd {submodulePath} && {forgePath} script test/TestToken.sol --broadcast -vvv --rpc-url http://localhost:8540 --tc TestTokenFactory --private-key {pk} && rm -rf broadcast/*/*/run-1*.json && rm -rf cache/*/*/run-1*.json""" + let (outputDeployTestToken, exitCodeDeployTestToken) = execCmdEx(forgeCmdTestToken) + trace "Executed forge command to deploy TestToken contract", + output = outputDeployTestToken + if exitCodeDeployTestToken != 0: + return error("Forge command to deploy TestToken contract failed") + + # Parse the command output to find contract address + let testTokenAddress = getContractAddressFromDeployScriptOutput(outputDeployTestToken).valueOr: + error "Failed to get TestToken contract address from deploy script output", + error = $error + return err( + "Failed to get TestToken contract address from deploy script output: " & $error ) - ) - let wakuRlnContractAddress = wakuRlnContractReceipt.contractAddress.get() - let wakuRlnAddressStripped = strip0xPrefix($wakuRlnContractAddress) + debug "Address of the TestToken contract", testTokenAddress - debug "Address of the deployed rlnv2 contract: ", wakuRlnContractAddress + let testTokenAddressBytes = hexToByteArray[20](testTokenAddress) + let testTokenAddressAddress = Address(testTokenAddressBytes) + putEnv("TOKEN_ADDRESS", testTokenAddressAddress.toHex()) - # need to send concat: impl & init_bytes - let contractInput = - byteutils.toHex(encode(wakuRlnContractAddress)) & Erc1967ProxyContractInput - debug "contractInput", contractInput - let proxyReceipt = - await web3.deployContract(Erc1967Proxy, contractInput = contractInput) + return ok(testTokenAddressAddress) - debug "proxy receipt", contractAddress = proxyReceipt.contractAddress.get() - let proxyAddress = proxyReceipt.contractAddress.get() +# Sends an ERC20 token approval call to allow a spender to spend a certain amount of tokens on behalf of the owner +proc approveTokenAllowanceAndVerify*( + web3: Web3, + accountFrom: Address, + privateKey: keys.PrivateKey, + tokenAddress: Address, + spender: Address, + amountWei: UInt256, + expectedAllowanceBefore: Option[UInt256] = none(UInt256), +): Future[Result[TxHash, string]] {.async.} = + var allowanceBefore: UInt256 + if expectedAllowanceBefore.isSome(): + allowanceBefore = + await checkTokenAllowance(web3, tokenAddress, accountFrom, spender) + let expected = expectedAllowanceBefore.get() + if allowanceBefore != expected: + return + err(fmt"Allowance is {allowanceBefore} before approval but expected {expected}") - let newBalance = await web3.provider.eth_getBalance(web3.defaultAccount, "latest") - debug "Account balance after the contract deployment: ", newBalance + # Temporarily set the private key + let oldPrivateKey = web3.privateKey + web3.privateKey = Opt.some(privateKey) + web3.lastKnownNonce = Opt.none(Quantity) + + try: + # ERC20 approve function signature: approve(address spender, uint256 amount) + # Method ID for approve(address,uint256) is 0x095ea7b3 + const APPROVE_SELECTOR = "0x095ea7b3" + let addressHex = spender.toHex().align(64, '0') + let amountHex = amountWei.toHex().align(64, '0') + let approveCallData = APPROVE_SELECTOR & addressHex & amountHex + + let gasPrice = await web3.provider.eth_gasPrice() + + var tx: TransactionArgs + tx.`from` = Opt.some(accountFrom) + tx.to = Opt.some(tokenAddress) + tx.value = Opt.some(0.u256) + tx.gasPrice = Opt.some(gasPrice) + tx.gas = Opt.some(Quantity(100000)) + tx.data = Opt.some(byteutils.hexToSeqByte(approveCallData)) + tx.chainId = Opt.some(CHAIN_ID) + + trace "Sending approve call", tx = tx + let txHash = await web3.send(tx) + let receipt = await web3.getMinedTransactionReceipt(txHash) + + if receipt.status.isNone(): + return err("Approval transaction failed receipt is none") + if receipt.status.get() != 1.Quantity: + return err("Approval transaction failed status quantity not 1") + + # Single verification check after mining (no extra sleep needed) + let allowanceAfter = + await checkTokenAllowance(web3, tokenAddress, accountFrom, spender) + let expectedAfter = + if expectedAllowanceBefore.isSome(): + expectedAllowanceBefore.get() + amountWei + else: + amountWei + + if allowanceAfter < expectedAfter: + return err( + fmt"Allowance is {allowanceAfter} after approval but expected at least {expectedAfter}" + ) + + return ok(txHash) + except CatchableError as e: + return err(fmt"Failed to send approve transaction: {e.msg}") + finally: + # Restore the old private key + web3.privateKey = oldPrivateKey + +proc executeForgeContractDeployScripts*( + privateKey: keys.PrivateKey, acc: Address, web3: Web3 +): Future[Result[Address, string]] {.async, gcsafe.} = + ## Executes a set of foundry forge scripts required to deploy the RLN contract and returns the deployed proxy contract address + ## submodulePath: path to the submodule containing contract deploy scripts + + # All RLN related tests should be run from the root directory of the project + let submodulePath = "./vendor/waku-rlnv2-contract" + + # Verify submodule path exists + if not dirExists(submodulePath): + error "Submodule path does not exist", submodulePath = submodulePath + return err("Submodule path does not exist: " & submodulePath) + + let forgePath = getForgePath() + debug "Forge path", forgePath + + # Verify forge executable exists + if not fileExists(forgePath): + error "Forge executable not found", forgePath = forgePath + return err("Forge executable not found: " & forgePath) + + trace "contract deployer account details", account = acc, privateKey = privateKey + let setupContractEnv = setupContractDeployment(forgePath, submodulePath) + if setupContractEnv.isErr(): + error "Failed to setup contract deployment" + return err("Failed to setup contract deployment") + + # Deploy LinearPriceCalculator contract + let forgeCmdPriceCalculator = + fmt"""cd {submodulePath} && {forgePath} script script/Deploy.s.sol --broadcast -vvvv --rpc-url http://localhost:8540 --tc DeployPriceCalculator --private-key {privateKey} && rm -rf broadcast/*/*/run-1*.json && rm -rf cache/*/*/run-1*.json""" + let (outputDeployPriceCalculator, exitCodeDeployPriceCalculator) = + execCmdEx(forgeCmdPriceCalculator) + trace "Executed forge command to deploy LinearPriceCalculator contract", + output = outputDeployPriceCalculator + if exitCodeDeployPriceCalculator != 0: + return error("Forge command to deploy LinearPriceCalculator contract failed") + + # Parse the output to find contract address + let priceCalculatorAddressRes = + getContractAddressFromDeployScriptOutput(outputDeployPriceCalculator) + if priceCalculatorAddressRes.isErr(): + error "Failed to get LinearPriceCalculator contract address from deploy script output" + let priceCalculatorAddress = priceCalculatorAddressRes.get() + debug "Address of the LinearPriceCalculator contract", priceCalculatorAddress + putEnv("PRICE_CALCULATOR_ADDRESS", priceCalculatorAddress) + + let forgeCmdWakuRln = + fmt"""cd {submodulePath} && {forgePath} script script/Deploy.s.sol --broadcast -vvvv --rpc-url http://localhost:8540 --tc DeployWakuRlnV2 --private-key {privateKey} && rm -rf broadcast/*/*/run-1*.json && rm -rf cache/*/*/run-1*.json""" + let (outputDeployWakuRln, exitCodeDeployWakuRln) = execCmdEx(forgeCmdWakuRln) + trace "Executed forge command to deploy WakuRlnV2 contract", + output = outputDeployWakuRln + if exitCodeDeployWakuRln != 0: + error "Forge command to deploy WakuRlnV2 contract failed", + output = outputDeployWakuRln + + # Parse the output to find contract address + let wakuRlnV2AddressRes = + getContractAddressFromDeployScriptOutput(outputDeployWakuRln) + if wakuRlnV2AddressRes.isErr(): + error "Failed to get WakuRlnV2 contract address from deploy script output" + ##TODO: raise exception here? + let wakuRlnV2Address = wakuRlnV2AddressRes.get() + debug "Address of the WakuRlnV2 contract", wakuRlnV2Address + putEnv("WAKURLNV2_ADDRESS", wakuRlnV2Address) + + # Deploy Proxy contract + let forgeCmdProxy = + fmt"""cd {submodulePath} && {forgePath} script script/Deploy.s.sol --broadcast -vvvv --rpc-url http://localhost:8540 --tc DeployProxy --private-key {privateKey} && rm -rf broadcast/*/*/run-1*.json && rm -rf cache/*/*/run-1*.json""" + let (outputDeployProxy, exitCodeDeployProxy) = execCmdEx(forgeCmdProxy) + trace "Executed forge command to deploy proxy contract", output = outputDeployProxy + if exitCodeDeployProxy != 0: + error "Forge command to deploy Proxy failed", error = outputDeployProxy + return err("Forge command to deploy Proxy failed") + + let proxyAddress = getContractAddressFromDeployScriptOutput(outputDeployProxy) + let proxyAddressBytes = hexToByteArray[20](proxyAddress.get()) + let proxyAddressAddress = Address(proxyAddressBytes) + + info "Address of the Proxy contract", proxyAddressAddress await web3.close() - debug "disconnected from ", ethClientAddress - - return proxyAddress + return ok(proxyAddressAddress) proc sendEthTransfer*( web3: Web3, @@ -131,7 +416,7 @@ proc sendEthTransfer*( let balanceBeforeWei = await web3.provider.eth_getBalance(accountTo, "latest") let balanceBeforeExpectedWei = accountToBalanceBeforeExpectedWei.get() assert balanceBeforeWei == balanceBeforeExpectedWei, - fmt"Balance is {balanceBeforeWei} but expected {balanceBeforeExpectedWei}" + fmt"Balance is {balanceBeforeWei} before transfer but expected {balanceBeforeExpectedWei}" let gasPrice = int(await web3.provider.eth_gasPrice()) @@ -144,17 +429,17 @@ proc sendEthTransfer*( # TODO: handle the error if sending fails let txHash = await web3.send(tx) + # Wait a bit for transaction to be mined + await sleepAsync(200.milliseconds) + if doBalanceAssert: let balanceAfterWei = await web3.provider.eth_getBalance(accountTo, "latest") let balanceAfterExpectedWei = accountToBalanceBeforeExpectedWei.get() + amountWei assert balanceAfterWei == balanceAfterExpectedWei, - fmt"Balance is {balanceAfterWei} but expected {balanceAfterExpectedWei}" + fmt"Balance is {balanceAfterWei} after transfer but expected {balanceAfterExpectedWei}" return txHash -proc ethToWei(eth: UInt256): UInt256 = - eth * 1000000000000000000.u256 - proc createEthAccount*( ethAmount: UInt256 = 1000.u256 ): Future[(keys.PrivateKey, Address)] {.async.} = @@ -196,7 +481,7 @@ proc getAnvilPath*(): string = return $anvilPath # Runs Anvil daemon -proc runAnvil*(port: int = 8540, chainId: string = "1337"): Process = +proc runAnvil*(port: int = 8540, chainId: string = "1234"): Process = # Passed options are # --port Port to listen on. # --gas-limit Sets the block gas limit in WEI. @@ -210,13 +495,13 @@ proc runAnvil*(port: int = 8540, chainId: string = "1337"): Process = anvilPath, args = [ "--port", - "8540", + $port, "--gas-limit", "300000000000000", "--balance", "1000000000", "--chain-id", - $CHAIN_ID, + $chainId, ], options = {poUsePath}, ) @@ -240,14 +525,26 @@ proc runAnvil*(port: int = 8540, chainId: string = "1337"): Process = # Stops Anvil daemon proc stopAnvil*(runAnvil: Process) {.used.} = + if runAnvil.isNil: + debug "stopAnvil called with nil Process" + return + let anvilPID = runAnvil.processID - # We wait the daemon to exit + debug "Stopping Anvil daemon", anvilPID = anvilPID + try: - # We terminate Anvil daemon by sending a SIGTERM signal to the runAnvil PID to trigger RPC server termination and clean-up - kill(runAnvil) - debug "Sent SIGTERM to Anvil", anvilPID = anvilPID - except: - error "Anvil daemon termination failed: ", err = getCurrentExceptionMsg() + # Send termination signals + when not defined(windows): + discard execCmdEx(fmt"kill -TERM {anvilPID}") + discard execCmdEx(fmt"kill -9 {anvilPID}") + else: + discard execCmdEx(fmt"taskkill /F /PID {anvilPID}") + + # Close Process object to release resources + close(runAnvil) + debug "Anvil daemon stopped", anvilPID = anvilPID + except Exception as e: + debug "Error stopping Anvil daemon", anvilPID = anvilPID, error = e.msg proc setupOnchainGroupManager*( ethClientUrl: string = EthClient, amountEth: UInt256 = 10.u256 @@ -259,12 +556,10 @@ proc setupOnchainGroupManager*( let rlnInstance = rlnInstanceRes.get() - let contractAddress = await uploadRLNContract(ethClientUrl) # connect to the eth client let web3 = await newWeb3(ethClientUrl) - let accounts = await web3.provider.eth_accounts() - web3.defaultAccount = accounts[0] + web3.defaultAccount = accounts[1] let (privateKey, acc) = createEthAccount(web3) @@ -274,6 +569,32 @@ proc setupOnchainGroupManager*( web3, web3.defaultAccount, acc, ethToWei(1000.u256), some(0.u256) ) + let testTokenAddress = (await deployTestToken(privateKey, acc, web3)).valueOr: + assert false, "Failed to deploy test token contract: " & $error + return + + # mint the token from the generated account + discard await sendMintCall( + web3, web3.defaultAccount, testTokenAddress, acc, ethToWei(1000.u256), some(0.u256) + ) + + let contractAddress = (await executeForgeContractDeployScripts(privateKey, acc, web3)).valueOr: + assert false, "Failed to deploy RLN contract: " & $error + return + + # If the generated account wishes to register a membership, it needs to approve the contract to spend its tokens + let tokenApprovalResult = await approveTokenAllowanceAndVerify( + web3, + acc, # owner + privateKey, + testTokenAddress, # ERC20 token address + contractAddress, # spender - the proxy contract that will spend the tokens + ethToWei(200.u256), + some(0.u256), # expected allowance before approval + ) + + assert tokenApprovalResult.isOk, tokenApprovalResult.error() + let manager = OnchainGroupManager( ethClientUrls: @[ethClientUrl], ethContractAddress: $contractAddress, diff --git a/tests/waku_rln_relay/utils_static.nim b/tests/waku_rln_relay/utils_static.nim index 8f564beb1..cbf3e9253 100644 --- a/tests/waku_rln_relay/utils_static.nim +++ b/tests/waku_rln_relay/utils_static.nim @@ -3,7 +3,6 @@ import std/[sequtils, tempfiles], stew/byteutils, - stew/shims/net as stewNet, chronos, chronicles, libp2p/switch, diff --git a/tests/waku_store/test_wakunode_store.nim b/tests/waku_store/test_wakunode_store.nim index 1d5e4dcfd..b20309079 100644 --- a/tests/waku_store/test_wakunode_store.nim +++ b/tests/waku_store/test_wakunode_store.nim @@ -2,7 +2,6 @@ import std/sequtils, - stew/shims/net as stewNet, testutils/unittests, chronicles, chronos, diff --git a/tests/waku_store_sync/sync_utils.nim b/tests/waku_store_sync/sync_utils.nim index e7fd82b57..d5cb601a2 100644 --- a/tests/waku_store_sync/sync_utils.nim +++ b/tests/waku_store_sync/sync_utils.nim @@ -26,6 +26,7 @@ proc newTestWakuRecon*( wantsTx: AsyncQueue[PeerId], needsTx: AsyncQueue[(PeerId, Fingerprint)], cluster: uint16 = 1, + syncRange: timer.Duration = DefaultSyncRange, shards: seq[uint16] = @[0, 1, 2, 3, 4, 5, 6, 7], ): Future[SyncReconciliation] {.async.} = let peerManager = PeerManager.new(switch) @@ -36,6 +37,7 @@ proc newTestWakuRecon*( peerManager = peerManager, wakuArchive = nil, relayJitter = 0.seconds, + syncRange = syncRange, idsRx = idsRx, localWantsTx = wantsTx, remoteNeedsTx = needsTx, diff --git a/tests/waku_store_sync/test_protocol.nim b/tests/waku_store_sync/test_protocol.nim index efdd6a885..c606934cf 100644 --- a/tests/waku_store_sync/test_protocol.nim +++ b/tests/waku_store_sync/test_protocol.nim @@ -1,8 +1,12 @@ {.used.} import - std/[options, sets, random, math], testutils/unittests, chronos, libp2p/crypto/crypto - + std/[options, sets, random, math, algorithm], + testutils/unittests, + chronos, + libp2p/crypto/crypto +import chronos, chronos/asyncsync +import nimcrypto import ../../waku/[ node/peer_manager, @@ -21,6 +25,15 @@ import ../waku_archive/archive_utils, ./sync_utils +proc collectDiffs*( + chan: var Channel[SyncID], diffCount: int +): HashSet[WakuMessageHash] = + var received: HashSet[WakuMessageHash] + while received.len < diffCount: + let sid = chan.recv() # synchronous receive + received.incl sid.hash + result = received + suite "Waku Sync: reconciliation": var serverSwitch {.threadvar.}: Switch var clientSwitch {.threadvar.}: Switch @@ -234,53 +247,376 @@ suite "Waku Sync: reconciliation": remoteNeeds.contains((clientPeerInfo.peerId, WakuMessageHash(diff))) == true asyncTest "sync 2 nodes 10K msgs 1K diffs": - let msgCount = 10_000 - var diffCount = 1_000 + const + msgCount = 200_000 # total messages on the server + diffCount = 100 # messages initially missing on the client - var diffMsgHashes: HashSet[WakuMessageHash] - var randIndexes: HashSet[int] + ## ── choose which messages will be absent from the client ───────────── + var missingIdx: HashSet[int] + while missingIdx.len < diffCount: + missingIdx.incl rand(0 ..< msgCount) - # Diffs - for i in 0 ..< diffCount: - var randInt = rand(0 ..< msgCount) - - #make sure we actually have the right number of diffs - while randInt in randIndexes: - randInt = rand(0 ..< msgCount) - - randIndexes.incl(randInt) - - # sync window is 1 hour, spread msg equally in that time - let timeSlice = calculateTimeRange() - let timeWindow = int64(timeSlice.b) - int64(timeSlice.a) - let (part, _) = divmod(timeWindow, 100_000) - - var timestamp = timeSlice.a + ## ── generate messages and pre-load the two reconcilers ─────────────── + let slice = calculateTimeRange() # 1-hour window + let step = (int64(slice.b) - int64(slice.a)) div msgCount + var ts = slice.a for i in 0 ..< msgCount: let - msg = fakeWakuMessage(ts = timestamp, contentTopic = DefaultContentTopic) - hash = computeMessageHash(DefaultPubsubTopic, msg) + msg = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic) + h = computeMessageHash(DefaultPubsubTopic, msg) - server.messageIngress(hash, msg) + server.messageIngress(h, msg) # every msg is on the server + if i notin missingIdx: + client.messageIngress(h, msg) # all but 100 are on the client + ts += Timestamp(step) - if i in randIndexes: - diffMsgHashes.incl(hash) + ## ── sanity before we start the round ───────────────────────────────── + check remoteNeeds.len == 0 + + ## ── launch reconciliation from the client towards the server ───────── + let res = await client.storeSynchronization(some(serverPeerInfo)) + assert res.isOk(), $res.error + + ## ── verify that ≈100 diffs were queued (allow 10 % slack) ──────────── + check remoteNeeds.len >= 90 # ≈ 100 × 0.9 + + asyncTest "sync 2 nodes 400K msgs 100k diffs": + const + msgCount = 400_000 + diffCount = 100_000 + tol = 1000 + + var diffMsgHashes: HashSet[WakuMessageHash] + var missingIdx: HashSet[int] + while missingIdx.len < diffCount: + missingIdx.incl rand(0 ..< msgCount) + + let slice = calculateTimeRange() + let step = (int64(slice.b) - int64(slice.a)) div msgCount + var ts = slice.a + + for i in 0 ..< msgCount: + let + msg = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic) + h = computeMessageHash(DefaultPubsubTopic, msg) + + server.messageIngress(h, msg) + if i notin missingIdx: + client.messageIngress(h, msg) else: - client.messageIngress(hash, msg) + diffMsgHashes.incl h - timestamp += Timestamp(part) - continue + ts += Timestamp(step) - check: - remoteNeeds.len == 0 + check remoteNeeds.len == 0 let res = await client.storeSynchronization(some(serverPeerInfo)) assert res.isOk(), $res.error - # timimg issue make it hard to match exact numbers - check: - remoteNeeds.len > 900 + check remoteNeeds.len >= diffCount - tol and remoteNeeds.len < diffCount + let (_, deliveredHash) = await remoteNeeds.get() + check deliveredHash in diffMsgHashes + + asyncTest "sync 2 nodes 100 msgs 20 diff – 1-second window": + const + msgCount = 100 + diffCount = 20 + + var missingIdx: seq[int] = @[] + while missingIdx.len < diffCount: + let n = rand(0 ..< msgCount) + if n notin missingIdx: + missingIdx.add n + + var diffMsgHashes: HashSet[WakuMessageHash] + + let sliceEnd = now() + let sliceStart = Timestamp uint64(sliceEnd) - 1_000_000_000'u64 + let step = (int64(sliceEnd) - int64(sliceStart)) div msgCount + var ts = sliceStart + + for i in 0 ..< msgCount: + let msg = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic) + let hash = computeMessageHash(DefaultPubsubTopic, msg) + server.messageIngress(hash, msg) + + if i in missingIdx: + diffMsgHashes.incl hash + else: + client.messageIngress(hash, msg) + + ts += Timestamp(step) + + check remoteNeeds.len == 0 + + let res = await client.storeSynchronization(some(serverPeerInfo)) + assert res.isOk(), $res.error + + check remoteNeeds.len == diffCount + + for _ in 0 ..< diffCount: + let (_, deliveredHash) = await remoteNeeds.get() + check deliveredHash in diffMsgHashes + + asyncTest "sync 2 nodes 500k msgs 300k diff – stress window": + const + msgCount = 500_000 + diffCount = 300_000 + + randomize() + var allIdx = newSeq[int](msgCount) + for i in 0 ..< msgCount: + allIdx[i] = i + shuffle(allIdx) + + let missingIdx = allIdx[0 ..< diffCount] + var missingSet: HashSet[int] + for idx in missingIdx: + missingSet.incl idx + + var diffMsgHashes: HashSet[WakuMessageHash] + + let sliceEnd = now() + let sliceStart = Timestamp uint64(sliceEnd) - 1_000_000_000'u64 + let step = (int64(sliceEnd) - int64(sliceStart)) div msgCount + var ts = sliceStart + + for i in 0 ..< msgCount: + let msg = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic) + let hash = computeMessageHash(DefaultPubsubTopic, msg) + server.messageIngress(hash, msg) + + if i in missingSet: + diffMsgHashes.incl hash + else: + client.messageIngress(hash, msg) + + ts += Timestamp(step) + + check remoteNeeds.len == 0 + + let res = await client.storeSynchronization(some(serverPeerInfo)) + assert res.isOk(), $res.error + + check remoteNeeds.len == diffCount + + for _ in 0 ..< 1000: + let (_, deliveredHash) = await remoteNeeds.get() + check deliveredHash in diffMsgHashes + + asyncTest "sync 2 nodes, 40 msgs: 20 in-window diff, 20 out-window ignored": + const + diffInWin = 20 + diffOutWin = 20 + stepOutNs = 100_000_000'u64 + outOffsetNs = 2_300_000_000'u64 # for 20 mesg they sent 2 seconds earlier + + randomize() + + let nowNs = getNowInNanosecondTime() + let sliceStart = Timestamp(uint64(nowNs) - 700_000_000'u64) + let sliceEnd = nowNs + let stepIn = (sliceEnd.int64 - sliceStart.int64) div diffInWin + + let oldStart = Timestamp(uint64(sliceStart) - outOffsetNs) + let stepOut = Timestamp(stepOutNs) + + var inWinHashes, outWinHashes: HashSet[WakuMessageHash] + + var ts = sliceStart + for _ in 0 ..< diffInWin: + let msg = fakeWakuMessage(ts = Timestamp ts, contentTopic = DefaultContentTopic) + let hash = computeMessageHash(DefaultPubsubTopic, msg) + server.messageIngress(hash, msg) + inWinHashes.incl hash + ts += Timestamp(stepIn) + + ts = oldStart + for _ in 0 ..< diffOutWin: + let msg = fakeWakuMessage(ts = Timestamp ts, contentTopic = DefaultContentTopic) + let hash = computeMessageHash(DefaultPubsubTopic, msg) + server.messageIngress(hash, msg) + outWinHashes.incl hash + ts += Timestamp(stepOut) + + check remoteNeeds.len == 0 + + let oneSec = timer.seconds(1) + + server = await newTestWakuRecon( + serverSwitch, idsChannel, localWants, remoteNeeds, syncRange = oneSec + ) + + client = await newTestWakuRecon( + clientSwitch, idsChannel, localWants, remoteNeeds, syncRange = oneSec + ) + + defer: + server.stop() + client.stop() + + let res = await client.storeSynchronization(some(serverPeerInfo)) + assert res.isOk(), $res.error + + check remoteNeeds.len == diffInWin + + for _ in 0 ..< diffInWin: + let (_, deliveredHashes) = await remoteNeeds.get() + check deliveredHashes in inWinHashes + check deliveredHashes notin outWinHashes + + asyncTest "hash-fingerprint collision, same timestamp – stable sort": + let ts = Timestamp(getNowInNanosecondTime()) + + var msg1 = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic) + var msg2 = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic) + msg2.payload[0] = msg2.payload[0] xor 0x01 + var h1 = computeMessageHash(DefaultPubsubTopic, msg1) + var h2 = computeMessageHash(DefaultPubsubTopic, msg2) + + for i in 0 ..< 8: + h2[i] = h1[i] + for i in 0 ..< 8: + check h1[i] == h2[i] + + check h1 != h2 + + server.messageIngress(h1, msg1) + client.messageIngress(h2, msg2) + + check remoteNeeds.len == 0 + server = await newTestWakuRecon(serverSwitch, idsChannel, localWants, remoteNeeds) + + client = await newTestWakuRecon(clientSwitch, idsChannel, localWants, remoteNeeds) + + defer: + server.stop() + client.stop() + + let res = await client.storeSynchronization(some(serverPeerInfo)) + assert res.isOk(), $res.error + + check remoteNeeds.len == 1 + + var vec = @[SyncID(time: ts, hash: h2), SyncID(time: ts, hash: h1)] + vec.shuffle() + vec.sort() + + let hFirst = vec[0].hash + let hSecond = vec[1].hash + check vec[0].time == ts and vec[1].time == ts + + asyncTest "malformed message-ID is ignored during reconciliation": + let nowTs = Timestamp(getNowInNanosecondTime()) + + let goodMsg = fakeWakuMessage(ts = nowTs, contentTopic = DefaultContentTopic) + var goodHash = computeMessageHash(DefaultPubsubTopic, goodMsg) + + var badHash: WakuMessageHash + for i in 0 ..< 32: + badHash[i] = 0'u8 + let badMsg = fakeWakuMessage(ts = Timestamp(0), contentTopic = DefaultContentTopic) + + server.messageIngress(goodHash, goodMsg) + server.messageIngress(badHash, badMsg) + + check remoteNeeds.len == 0 + + server = await newTestWakuRecon(serverSwitch, idsChannel, localWants, remoteNeeds) + client = await newTestWakuRecon(clientSwitch, idsChannel, localWants, remoteNeeds) + + defer: + server.stop() + client.stop() + + let res = await client.storeSynchronization(some(serverPeerInfo)) + assert res.isOk(), $res.error + + check remoteNeeds.len == 1 + let (_, neededHash) = await remoteNeeds.get() + check neededHash == goodHash + check neededHash != badHash + + asyncTest "malformed ID: future-timestamp msg is ignored": + let nowNs = getNowInNanosecondTime() + let tsNow = Timestamp(nowNs) + + let goodMsg = fakeWakuMessage(ts = tsNow, contentTopic = DefaultContentTopic) + let goodHash = computeMessageHash(DefaultPubsubTopic, goodMsg) + + const tenYearsSec = 10 * 365 * 24 * 60 * 60 + let futureNs = nowNs + int64(tenYearsSec) * 1_000_000_000'i64 + let badTs = Timestamp(futureNs.uint64) + + let badMsg = fakeWakuMessage(ts = badTs, contentTopic = DefaultContentTopic) + let badHash = computeMessageHash(DefaultPubsubTopic, badMsg) + + server.messageIngress(goodHash, goodMsg) + server.messageIngress(badHash, badMsg) + + check remoteNeeds.len == 0 + server = await newTestWakuRecon(serverSwitch, idsChannel, localWants, remoteNeeds) + client = await newTestWakuRecon(clientSwitch, idsChannel, localWants, remoteNeeds) + + defer: + server.stop() + client.stop() + + let res = await client.storeSynchronization(some(serverPeerInfo)) + assert res.isOk(), $res.error + + check remoteNeeds.len == 1 + let (_, neededHash) = await remoteNeeds.get() + check neededHash == goodHash + check neededHash != badHash + + asyncTest "duplicate ID is queued only once": + let ts = Timestamp(getNowInNanosecondTime()) + let msg = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic) + let h = computeMessageHash(DefaultPubsubTopic, msg) + + server.messageIngress(h, msg) + server.messageIngress(h, msg) + check remoteNeeds.len == 0 + + server = await newTestWakuRecon(serverSwitch, idsChannel, localWants, remoteNeeds) + client = await newTestWakuRecon(clientSwitch, idsChannel, localWants, remoteNeeds) + + defer: + server.stop() + client.stop() + + let res = await client.storeSynchronization(some(serverPeerInfo)) + assert res.isOk(), $res.error + + check remoteNeeds.len == 1 + let (_, neededHash) = await remoteNeeds.get() + check neededHash == h + + asyncTest "sync terminates immediately when no diffs exist": + let ts = Timestamp(getNowInNanosecondTime()) + let msg = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic) + let hash = computeMessageHash(DefaultPubsubTopic, msg) + + server.messageIngress(hash, msg) + client.messageIngress(hash, msg) + + let idsQ = newAsyncQueue[SyncID]() + let wantsQ = newAsyncQueue[PeerId]() + let needsQ = newAsyncQueue[(PeerId, Fingerprint)]() + + server = await newTestWakuRecon(serverSwitch, idsQ, wantsQ, needsQ) + client = await newTestWakuRecon(clientSwitch, idsQ, wantsQ, needsQ) + + defer: + server.stop() + client.stop() + + let res = await client.storeSynchronization(some(serverPeerInfo)) + assert res.isOk(), $res.error + + check needsQ.len == 0 suite "Waku Sync: transfer": var @@ -396,3 +732,40 @@ suite "Waku Sync: transfer": check: response.messages.len > 0 + + asyncTest "Check the exact missing messages are received": + let timeSlice = calculateTimeRange() + let timeWindow = int64(timeSlice.b) - int64(timeSlice.a) + let (part, _) = divmod(timeWindow, 3) + + var ts = timeSlice.a + + let msgA = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic) + ts += Timestamp(part) + let msgB = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic) + ts += Timestamp(part) + let msgC = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic) + + let hA = computeMessageHash(DefaultPubsubTopic, msgA) + let hB = computeMessageHash(DefaultPubsubTopic, msgB) + let hC = computeMessageHash(DefaultPubsubTopic, msgC) + + discard serverDriver.put(DefaultPubsubTopic, @[msgA, msgB, msgC]) + discard clientDriver.put(DefaultPubsubTopic, @[msgA]) + + await serverRemoteNeeds.put((clientPeerInfo.peerId, hB)) + await serverRemoteNeeds.put((clientPeerInfo.peerId, hC)) + await clientLocalWants.put(serverPeerInfo.peerId) + + await sleepAsync(1.seconds) + check serverRemoteNeeds.len == 0 + + let sid1 = await clientIds.get() + let sid2 = await clientIds.get() + + let received = [sid1.hash, sid2.hash].toHashSet() + let expected = [hB, hC].toHashSet + + check received == expected + + check clientIds.len == 0 diff --git a/tests/waku_store_sync/test_range_split.nim b/tests/waku_store_sync/test_range_split.nim new file mode 100644 index 000000000..50ebc39fd --- /dev/null +++ b/tests/waku_store_sync/test_range_split.nim @@ -0,0 +1,242 @@ +import unittest, nimcrypto, std/sequtils, results +import ../../waku/waku_store_sync/[reconciliation, common] +import ../../waku/waku_store_sync/storage/seq_storage +import ../../waku/waku_core/message/digest + +proc toDigest(s: string): WakuMessageHash = + let d = nimcrypto.keccak256.digest((s & "").toOpenArrayByte(0, (s.len - 1))) + var res: WakuMessageHash + for i in 0 .. 31: + res[i] = d.data[i] + return res + +proc `..`(a, b: SyncID): Slice[SyncID] = + Slice[SyncID](a: a, b: b) + +suite "Waku Sync – reconciliation": + test "fan-out: eight fingerprint sub-ranges for large slice": + const N = 2_048 + const mismatchI = 70 + + let local = SeqStorage.new(@[]) + let remote = SeqStorage.new(@[]) + + var baseHashMismatch: WakuMessageHash + var remoteHashMismatch: WakuMessageHash + + for i in 0 ..< N: + let ts = 1000 + i + let hashLocal = toDigest("msg" & $i) + local.insert(SyncID(time: ts, hash: hashLocal)).isOkOr: + assert false, "failed to insert hash: " & $error + + var hashRemote = hashLocal + if i == mismatchI: + baseHashMismatch = hashLocal + remoteHashMismatch = toDigest("msg" & $i & "_x") + hashRemote = remoteHashMismatch + remote.insert(SyncID(time: ts, hash: hashRemote)).isOkOr: + assert false, "failed to insert hash: " & $error + + var z: WakuMessageHash + let whole = SyncID(time: 1000, hash: z) .. SyncID(time: 1000 + N - 1, hash: z) + + check local.computeFingerprint(whole) != remote.computeFingerprint(whole) + + let remoteFp = remote.computeFingerprint(whole) + let payload = RangesData( + cluster: 0, + shards: @[0], + ranges: @[(whole, RangeType.Fingerprint)], + fingerprints: @[remoteFp], + itemSets: @[], + ) + + var toSend, toRecv: seq[WakuMessageHash] + let reply = local.processPayload(payload, toSend, toRecv) + + check reply.ranges.len == 8 + check reply.ranges.allIt(it[1] == RangeType.Fingerprint) + check reply.itemSets.len == 0 + check reply.fingerprints.len == 8 + + let mismTime = 1000 + mismatchI + var covered = false + for (slc, _) in reply.ranges: + if mismTime >= slc.a.time and mismTime <= slc.b.time: + covered = true + break + check covered + + check toSend.len == 0 + check toRecv.len == 0 + + test "splits mismatched fingerprint into two sub-ranges then item-set": + const threshold = 4 + const partitions = 2 + + let local = SeqStorage.new(@[], threshold = threshold, partitions = partitions) + let remote = SeqStorage.new(@[], threshold = threshold, partitions = partitions) + + var mismatchHash: WakuMessageHash + for i in 0 ..< 8: + let t = 1000 + i + let baseHash = toDigest("msg" & $i) + + var localHash = baseHash + var remoteHash = baseHash + + if i == 3: + mismatchHash = toDigest("msg" & $i & "_x") + localHash = mismatchHash + + discard local.insert (SyncID(time: t, hash: localHash)) + discard remote.insert(SyncID(time: t, hash: remoteHash)) + + var zeroHash: WakuMessageHash + let wholeRange = + SyncID(time: 1000, hash: zeroHash) .. SyncID(time: 1007, hash: zeroHash) + + var toSend, toRecv: seq[WakuMessageHash] + + let payload = RangesData( + cluster: 0, + shards: @[0], + ranges: @[(wholeRange, RangeType.Fingerprint)], + fingerprints: @[remote.computeFingerprint(wholeRange)], + itemSets: @[], + ) + + let reply = local.processPayload(payload, toSend, toRecv) + + check reply.ranges.len == partitions + check reply.itemSets.len == partitions + + check reply.itemSets.anyIt( + it.elements.anyIt(it.hash == mismatchHash and it.time == 1003) + ) + + test "second round when N =2048 & local ": + const N = 2_048 + const mismatchI = 70 + + let local = SeqStorage.new(@[]) + let remote = SeqStorage.new(@[]) + + var baseHashMismatch, remoteHashMismatch: WakuMessageHash + + for i in 0 ..< N: + let ts = 1000 + i + let hashLocal = toDigest("msg" & $i) + local.insert(SyncID(time: ts, hash: hashLocal)).isOkOr: + assert false, "failed to insert hash: " & $error + + var hashRemote = hashLocal + if i == mismatchI: + baseHashMismatch = hashLocal + remoteHashMismatch = toDigest("msg" & $i & "_x") + hashRemote = remoteHashMismatch + remote.insert(SyncID(time: ts, hash: hashRemote)).isOkOr: + assert false, "failed to insert hash: " & $error + + var zero: WakuMessageHash + let sliceWhole = + SyncID(time: 1000, hash: zero) .. SyncID(time: 1000 + N - 1, hash: zero) + check local.computeFingerprint(sliceWhole) != remote.computeFingerprint(sliceWhole) + + let payload1 = RangesData( + cluster: 0, + shards: @[0], + ranges: @[(sliceWhole, RangeType.Fingerprint)], + fingerprints: @[remote.computeFingerprint(sliceWhole)], + itemSets: @[], + ) + + var toSend, toRecv: seq[WakuMessageHash] + let reply1 = local.processPayload(payload1, toSend, toRecv) + + check reply1.ranges.len == 8 + check reply1.ranges.allIt(it[1] == RangeType.Fingerprint) + + let mismTime = 1000 + mismatchI + var subSlice: Slice[SyncID] + for (sl, _) in reply1.ranges: + if mismTime >= sl.a.time and mismTime <= sl.b.time: + subSlice = sl + break + check subSlice.a.time != 0 + + let payload2 = RangesData( + cluster: 0, + shards: @[0], + ranges: @[(subSlice, RangeType.Fingerprint)], + fingerprints: @[remote.computeFingerprint(subSlice)], + itemSets: @[], + ) + + var toSend2, toRecv2: seq[WakuMessageHash] + let reply2 = local.processPayload(payload2, toSend2, toRecv2) + + check reply2.ranges.len == 8 + check reply2.ranges.allIt(it[1] == RangeType.ItemSet) + check reply2.itemSets.len == 8 + + var matchCount = 0 + for iset in reply2.itemSets: + if iset.elements.anyIt(it.time == mismTime and it.hash == baseHashMismatch): + inc matchCount + check not iset.elements.anyIt(it.hash == remoteHashMismatch) + check matchCount == 1 + + check toSend2.len == 0 + check toRecv2.len == 0 + + test "second-round payload remote": + let local = SeqStorage.new(@[]) + let remote = SeqStorage.new(@[]) + + var baseHash: WakuMessageHash + var alteredHash: WakuMessageHash + + for i in 0 ..< 8: + let ts = 1000 + i + let hashLocal = toDigest("msg" & $i) + local.insert(SyncID(time: ts, hash: hashLocal)).isOkOr: + assert false, "failed to insert hash: " & $error + + var hashRemote = hashLocal + if i == 3: + baseHash = hashLocal + alteredHash = toDigest("msg" & $i & "_x") + hashRemote = alteredHash + + remote.insert(SyncID(time: ts, hash: hashRemote)).isOkOr: + assert false, "failed to insert hash: " & $error + + var zero: WakuMessageHash + let slice = SyncID(time: 1000, hash: zero) .. SyncID(time: 1007, hash: zero) + + check local.computeFingerprint(slice) != remote.computeFingerprint(slice) + + var toSend1, toRecv1: seq[WakuMessageHash] + let pay1 = RangesData( + cluster: 0, + shards: @[0], + ranges: @[(slice, RangeType.Fingerprint)], + fingerprints: @[remote.computeFingerprint(slice)], + itemSets: @[], + ) + let rep1 = local.processPayload(pay1, toSend1, toRecv1) + + check rep1.ranges.len == 1 + check rep1.ranges[0][1] == RangeType.ItemSet + check toSend1.len == 0 + check toRecv1.len == 0 + + var toSend2, toRecv2: seq[WakuMessageHash] + discard remote.processPayload(rep1, toSend2, toRecv2) + + check toSend2.len == 1 + check toSend2[0] == alteredHash + check toRecv2.len == 1 + check toRecv2[0] == baseHash diff --git a/tests/waku_store_sync/test_state_transition.nim b/tests/waku_store_sync/test_state_transition.nim new file mode 100644 index 000000000..732a577a9 --- /dev/null +++ b/tests/waku_store_sync/test_state_transition.nim @@ -0,0 +1,244 @@ +import unittest, nimcrypto, std/sequtils +import ../../waku/waku_store_sync/[reconciliation, common] +import ../../waku/waku_store_sync/storage/seq_storage +import ../../waku/waku_core/message/digest + +proc toDigest*(s: string): WakuMessageHash = + let d = nimcrypto.keccak256.digest((s & "").toOpenArrayByte(0, s.high)) + for i in 0 .. 31: + result[i] = d.data[i] + +proc `..`(a, b: SyncID): Slice[SyncID] = + Slice[SyncID](a: a, b: b) + +suite "Waku Sync – reconciliation": + test "Fingerprint → ItemSet → zero (default thresholds)": + const N = 2_000 + const idx = 137 + + let local = SeqStorage.new(@[]) + let remote = SeqStorage.new(@[]) + + var baseH, altH: WakuMessageHash + for i in 0 ..< N: + let ts = 1000 + i + let h = toDigest("msg" & $i) + discard local.insert(SyncID(time: ts, hash: h)) + var hr = h + if i == idx: + baseH = h + altH = toDigest("msg" & $i & "x") + hr = altH + discard remote.insert(SyncID(time: ts, hash: hr)) + + var z: WakuMessageHash + let whole = SyncID(time: 1000, hash: z) .. SyncID(time: 1000 + N - 1, hash: z) + + var s1, r1: seq[WakuMessageHash] + let p1 = RangesData( + cluster: 0, + shards: @[0], + ranges: @[(whole, RangeType.Fingerprint)], + fingerprints: @[remote.computeFingerprint(whole)], + itemSets: @[], + ) + let rep1 = local.processPayload(p1, s1, r1) + check rep1.ranges.len == 8 + check rep1.ranges.allIt(it[1] == RangeType.Fingerprint) + + let mismT = 1000 + idx + let sub = + rep1.ranges.filterIt(mismT >= it[0].a.time and mismT <= it[0].b.time)[0][0] + + var s2, r2: seq[WakuMessageHash] + let p2 = RangesData( + cluster: 0, + shards: @[0], + ranges: @[(sub, RangeType.Fingerprint)], + fingerprints: @[remote.computeFingerprint(sub)], + itemSets: @[], + ) + let rep2 = local.processPayload(p2, s2, r2) + check rep2.ranges.len == 8 + check rep2.ranges.allIt(it[1] == RangeType.ItemSet) + + var s3, r3: seq[WakuMessageHash] + discard remote.processPayload(rep2, s3, r3) + check s3.len == 1 and s3[0] == altH + check r3.len == 1 and r3[0] == baseH + + discard local.insert(SyncID(time: mismT, hash: altH)) + discard remote.insert(SyncID(time: mismT, hash: baseH)) + + var s4, r4: seq[WakuMessageHash] + let p3 = RangesData( + cluster: 0, + shards: @[0], + ranges: @[(sub, RangeType.Fingerprint)], + fingerprints: @[remote.computeFingerprint(sub)], + itemSets: @[], + ) + let rep3 = local.processPayload(p3, s4, r4) + check rep3.ranges.len == 0 + check s4.len == 0 and r4.len == 0 + + test "test 2 ranges includes 1 skip": + const N = 120 + const pivot = 60 + + let local = SeqStorage.new(@[]) + let remote = SeqStorage.new(@[]) + + var diffHash: WakuMessageHash + for i in 0 ..< N: + let ts = 1000 + i + let h = toDigest("msg" & $i) + discard local.insert(SyncID(time: ts, hash: h)) + var hr: WakuMessageHash + if i >= pivot: + diffHash = toDigest("msg" & $i & "_x") + hr = diffHash + else: + hr = h + + discard remote.insert(SyncID(time: ts, hash: hr)) + + var z: WakuMessageHash + let sliceA = SyncID(time: 1000, hash: z) .. SyncID(time: 1059, hash: z) + let sliceB = SyncID(time: 1060, hash: z) .. SyncID(time: 1119, hash: z) + + var s, r: seq[WakuMessageHash] + let payload = RangesData( + cluster: 0, + shards: @[0], + ranges: @[(sliceA, RangeType.Fingerprint), (sliceB, RangeType.Fingerprint)], + fingerprints: + @[remote.computeFingerprint(sliceA), remote.computeFingerprint(sliceB)], + itemSets: @[], + ) + let reply = local.processPayload(payload, s, r) + + check reply.ranges.len == 2 + check reply.ranges[0][1] == RangeType.Skip + check reply.ranges[1][1] == RangeType.ItemSet + check reply.itemSets.len == 1 + check not reply.itemSets[0].elements.anyIt(it.hash == diffHash) + + test "custom threshold (50) → eight ItemSets first round": + const N = 300 + const idx = 123 + + let local = SeqStorage.new(capacity = N, threshold = 50, partitions = 8) + let remote = SeqStorage.new(capacity = N, threshold = 50, partitions = 8) + + var baseH, altH: WakuMessageHash + for i in 0 ..< N: + let ts = 1000 + i + let h = toDigest("msg" & $i) + discard local.insert(SyncID(time: ts, hash: h)) + var hr = h + if i == idx: + baseH = h + altH = toDigest("msg" & $i & "_x") + hr = altH + discard remote.insert(SyncID(time: ts, hash: hr)) + + var z: WakuMessageHash + let slice = SyncID(time: 1000, hash: z) .. SyncID(time: 1000 + N - 1, hash: z) + + var toS, toR: seq[WakuMessageHash] + let p = RangesData( + cluster: 0, + shards: @[0], + ranges: @[(slice, RangeType.Fingerprint)], + fingerprints: @[remote.computeFingerprint(slice)], + itemSets: @[], + ) + let reply = local.processPayload(p, toS, toR) + + check reply.ranges.len == 8 + check reply.ranges.allIt(it[1] == RangeType.ItemSet) + check reply.itemSets.len == 8 + + let mismT = 1000 + idx + var hit = 0 + for ist in reply.itemSets: + if ist.elements.anyIt(it.time == mismT and it.hash == baseH): + inc hit + check hit == 1 + + test "test N=80K,3FP,2IS,SKIP": + const N = 80_000 + const bad = N - 10 + + let local = SeqStorage.new(@[]) + let remote = SeqStorage.new(@[]) + + var baseH, altH: WakuMessageHash + for i in 0 ..< N: + let ts = 1000 + i + let h = toDigest("msg" & $i) + discard local.insert(SyncID(time: ts, hash: h)) + + let hr = + if i == bad: + baseH = h + altH = toDigest("msg" & $i & "_x") + altH + else: + h + discard remote.insert(SyncID(time: ts, hash: hr)) + + var slice = + SyncID(time: 1000, hash: EmptyFingerprint) .. + SyncID(time: 1000 + N - 1, hash: FullFingerprint) + + proc fpReply(s: Slice[SyncID], sendQ, recvQ: var seq[WakuMessageHash]): RangesData = + local.processPayload( + RangesData( + cluster: 0, + shards: @[0], + ranges: @[(s, RangeType.Fingerprint)], + fingerprints: @[remote.computeFingerprint(s)], + itemSets: @[], + ), + sendQ, + recvQ, + ) + + var tmpS, tmpR: seq[WakuMessageHash] + + for r in 1 .. 3: + let rep = fpReply(slice, tmpS, tmpR) + check rep.ranges.len == 8 + check rep.ranges.allIt(it[1] == RangeType.Fingerprint) + for (sl, _) in rep.ranges: + if local.computeFingerprint(sl) != remote.computeFingerprint(sl): + slice = sl + break + + let rep4 = fpReply(slice, tmpS, tmpR) + check rep4.ranges.len == 8 + check rep4.ranges.allIt(it[1] == RangeType.ItemSet) + for (sl, _) in rep4.ranges: + if sl.a.time <= 1000 + bad and sl.b.time >= 1000 + bad: + slice = sl + break + + var send5, recv5: seq[WakuMessageHash] + let rep5 = fpReply(slice, send5, recv5) + check rep5.ranges.len == 1 + check rep5.ranges[0][1] == RangeType.ItemSet + + var qSend, qRecv: seq[WakuMessageHash] + discard remote.processPayload(rep5, qSend, qRecv) + check qSend.len == 1 and qSend[0] == altH + check qRecv.len == 1 and qRecv[0] == baseH + + discard local.insert(SyncID(time: slice.a.time, hash: altH)) + discard remote.insert(SyncID(time: slice.a.time, hash: baseH)) + + var send6, recv6: seq[WakuMessageHash] + let rep6 = fpReply(slice, send6, recv6) + check rep6.ranges.len == 0 + check send6.len == 0 and recv6.len == 0 diff --git a/tests/wakunode2/test_app.nim b/tests/wakunode2/test_app.nim index 2ee933e3f..2d62d4956 100644 --- a/tests/wakunode2/test_app.nim +++ b/tests/wakunode2/test_app.nim @@ -1,7 +1,6 @@ {.used.} import - stew/shims/net, testutils/unittests, chronicles, chronos, @@ -66,7 +65,7 @@ suite "Wakunode2 - Waku initialization": test "app properly handles dynamic port configuration": ## Given var conf = defaultTestWakuConf() - conf.networkConf.p2pTcpPort = Port(0) + conf.endpointConf.p2pTcpPort = Port(0) ## When var waku = Waku.new(conf).valueOr: diff --git a/tests/wakunode2/test_validators.nim b/tests/wakunode2/test_validators.nim index 23a3e5d6f..b0a8dd8fb 100644 --- a/tests/wakunode2/test_validators.nim +++ b/tests/wakunode2/test_validators.nim @@ -2,7 +2,6 @@ import std/[sequtils, sysrand, math], - stew/shims/net as stewNet, testutils/unittests, chronos, libp2p/crypto/crypto, @@ -74,7 +73,9 @@ suite "WakuNode2 - Validators": # Subscribe all nodes to the same topic/handler for node in nodes: - discard node.wakuRelay.subscribe($spamProtectedShard, handler) + node.subscribe((kind: PubsubSub, topic: $spamProtectedShard), handler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + await sleepAsync(500.millis) # Each node publishes 10 signed messages @@ -164,7 +165,9 @@ suite "WakuNode2 - Validators": # Subscribe all nodes to the same topic/handler for node in nodes: - discard node.wakuRelay.subscribe($spamProtectedShard, handler) + node.subscribe((kind: PubsubSub, topic: $spamProtectedShard), handler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + await sleepAsync(500.millis) # Each node sends 5 messages, signed but with a non-whitelisted key (total = 25) @@ -292,7 +295,8 @@ suite "WakuNode2 - Validators": # Subscribe all nodes to the same topic/handler for node in nodes: - discard node.wakuRelay.subscribe($spamProtectedShard, handler) + node.subscribe((kind: PubsubSub, topic: $spamProtectedShard), handler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error await sleepAsync(500.millis) # Add signed message validator to all nodes. They will only route signed messages diff --git a/tests/wakunode_rest/test_all.nim b/tests/wakunode_rest/test_all.nim index 6e34b6fdd..4071e635b 100644 --- a/tests/wakunode_rest/test_all.nim +++ b/tests/wakunode_rest/test_all.nim @@ -1,14 +1,15 @@ {.used.} import - ./test_rest_debug_serdes, + ./test_rest_admin, + ./test_rest_cors, ./test_rest_debug, + ./test_rest_debug_serdes, ./test_rest_filter, - ./test_rest_lightpush_legacy, ./test_rest_health, + ./test_rest_lightpush, + ./test_rest_lightpush_legacy, ./test_rest_relay_serdes, ./test_rest_relay, ./test_rest_serdes, - ./test_rest_store, - ./test_rest_admin, - ./test_rest_cors + ./test_rest_store diff --git a/tests/wakunode_rest/test_rest_admin.nim b/tests/wakunode_rest/test_rest_admin.nim index a3546f1f8..c928140e1 100644 --- a/tests/wakunode_rest/test_rest_admin.nim +++ b/tests/wakunode_rest/test_rest_admin.nim @@ -1,7 +1,7 @@ {.used.} import - std/[sequtils, strformat, net], + std/[sequtils, net], testutils/unittests, presto, presto/client as presto_client, @@ -42,15 +42,36 @@ suite "Waku v2 Rest API - Admin": node2 = newTestWakuNode(generateSecp256k1Key(), getPrimaryIPAddr(), Port(60602)) node3 = newTestWakuNode(generateSecp256k1Key(), getPrimaryIPAddr(), Port(60604)) + let clusterId = 1.uint16 + node1.mountMetadata(clusterId).isOkOr: + assert false, "Failed to mount metadata: " & $error + node2.mountMetadata(clusterId).isOkOr: + assert false, "Failed to mount metadata: " & $error + node3.mountMetadata(clusterId).isOkOr: + assert false, "Failed to mount metadata: " & $error + await allFutures(node1.start(), node2.start(), node3.start()) - let shards = @[RelayShard(clusterId: 1, shardId: 0)] await allFutures( - node1.mountRelay(shards = shards), - node2.mountRelay(shards = shards), - node3.mountRelay(shards = shards), + node1.mountRelay(), + node2.mountRelay(), + node3.mountRelay(), node3.mountPeerExchange(), ) + # The three nodes should be subscribed to the same shard + proc simpleHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + let shard = RelayShard(clusterId: clusterId, shardId: 0) + node1.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + node2.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + node3.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + peerInfo1 = node1.switch.peerInfo peerInfo2 = node2.switch.peerInfo peerInfo3 = node3.switch.peerInfo diff --git a/tests/wakunode_rest/test_rest_cors.nim b/tests/wakunode_rest/test_rest_cors.nim index 7d29711b1..58e70aa25 100644 --- a/tests/wakunode_rest/test_rest_cors.nim +++ b/tests/wakunode_rest/test_rest_cors.nim @@ -1,7 +1,6 @@ {.used.} import - stew/shims/net, testutils/unittests, presto, presto/client as presto_client, diff --git a/tests/wakunode_rest/test_rest_debug.nim b/tests/wakunode_rest/test_rest_debug.nim index 3129b3544..9add57cbe 100644 --- a/tests/wakunode_rest/test_rest_debug.nim +++ b/tests/wakunode_rest/test_rest_debug.nim @@ -1,7 +1,6 @@ {.used.} import - stew/shims/net, testutils/unittests, presto, presto/client as presto_client, diff --git a/tests/wakunode_rest/test_rest_filter.nim b/tests/wakunode_rest/test_rest_filter.nim index 556b6b52e..f8dbf429a 100644 --- a/tests/wakunode_rest/test_rest_filter.nim +++ b/tests/wakunode_rest/test_rest_filter.nim @@ -3,7 +3,6 @@ import chronos/timer, stew/byteutils, - stew/shims/net, testutils/unittests, presto, presto/client as presto_client, @@ -279,8 +278,16 @@ suite "Waku v2 Rest API - Filter V2": restFilterTest = await RestFilterTest.init() subPeerId = restFilterTest.subscriberNode.peerInfo.toRemotePeerInfo().peerId + let simpleHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + restFilterTest.messageCache.pubsubSubscribe(DefaultPubsubTopic) - restFilterTest.serviceNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr: + + restFilterTest.serviceNode.subscribe( + (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler + ).isOkOr: assert false, "Failed to subscribe to topic: " & $error # When @@ -326,7 +333,14 @@ suite "Waku v2 Rest API - Filter V2": # setup filter service and client node let restFilterTest = await RestFilterTest.init() let subPeerId = restFilterTest.subscriberNode.peerInfo.toRemotePeerInfo().peerId - restFilterTest.serviceNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr: + let simpleHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + restFilterTest.serviceNode.subscribe( + (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler + ).isOkOr: assert false, "Failed to subscribe to topic: " & $error let requestBody = FilterSubscribeRequest( @@ -398,7 +412,14 @@ suite "Waku v2 Rest API - Filter V2": # setup filter service and client node let restFilterTest = await RestFilterTest.init() let subPeerId = restFilterTest.subscriberNode.peerInfo.toRemotePeerInfo().peerId - restFilterTest.serviceNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr: + let simpleHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + restFilterTest.serviceNode.subscribe( + (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler + ).isOkOr: assert false, "Failed to subscribe to topic: " & $error let requestBody = FilterSubscribeRequest( diff --git a/tests/wakunode_rest/test_rest_health.nim b/tests/wakunode_rest/test_rest_health.nim index 3c7c94e87..964e09c5b 100644 --- a/tests/wakunode_rest/test_rest_health.nim +++ b/tests/wakunode_rest/test_rest_health.nim @@ -2,7 +2,6 @@ import std/tempfiles, - stew/shims/net, testutils/unittests, presto, presto/client as presto_client, @@ -40,7 +39,7 @@ suite "Waku v2 REST API - health": asyncTest "Get node health info - GET /health": # Given let node = testWakuNode() - let healthMonitor = WakuNodeHealthMonitor() + let healthMonitor = NodeHealthMonitor() await node.start() (await node.mountRelay()).isOkOr: assert false, "Failed to mount relay" @@ -75,7 +74,11 @@ suite "Waku v2 REST API - health": treePath: genTempPath("rln_tree", "wakunode"), ) ) - healthMonitor.setNode(node) + + node.mountLightPushClient() + await node.mountFilterClient() + + healthMonitor.setNodeToHealthMonitor(node) healthMonitor.setOverallHealth(HealthStatus.READY) # When response = await client.healthCheck() @@ -85,9 +88,40 @@ suite "Waku v2 REST API - health": response.status == 200 $response.contentType == $MIMETYPE_JSON response.data.nodeHealth == HealthStatus.READY - response.data.protocolsHealth.len() == 1 - response.data.protocolsHealth[0].protocol == "Rln Relay" - response.data.protocolsHealth[0].health == HealthStatus.READY + response.data.protocolsHealth.len() == 14 + response.data.protocolsHealth[0].protocol == "Relay" + response.data.protocolsHealth[0].health == HealthStatus.NOT_READY + response.data.protocolsHealth[0].desc == some("No connected peers") + response.data.protocolsHealth[1].protocol == "Rln Relay" + response.data.protocolsHealth[1].health == HealthStatus.READY + response.data.protocolsHealth[2].protocol == "Lightpush" + response.data.protocolsHealth[2].health == HealthStatus.NOT_MOUNTED + response.data.protocolsHealth[3].protocol == "Legacy Lightpush" + response.data.protocolsHealth[3].health == HealthStatus.NOT_MOUNTED + response.data.protocolsHealth[4].protocol == "Filter" + response.data.protocolsHealth[4].health == HealthStatus.NOT_MOUNTED + response.data.protocolsHealth[5].protocol == "Store" + response.data.protocolsHealth[5].health == HealthStatus.NOT_MOUNTED + response.data.protocolsHealth[6].protocol == "Legacy Store" + response.data.protocolsHealth[6].health == HealthStatus.NOT_MOUNTED + response.data.protocolsHealth[7].protocol == "Peer Exchange" + response.data.protocolsHealth[7].health == HealthStatus.NOT_MOUNTED + response.data.protocolsHealth[8].protocol == "Rendezvous" + response.data.protocolsHealth[8].health == HealthStatus.NOT_MOUNTED + response.data.protocolsHealth[9].protocol == "Lightpush Client" + response.data.protocolsHealth[9].health == HealthStatus.NOT_READY + response.data.protocolsHealth[9].desc == + some("No Lightpush service peer available yet") + response.data.protocolsHealth[10].protocol == "Legacy Lightpush Client" + response.data.protocolsHealth[10].health == HealthStatus.NOT_MOUNTED + response.data.protocolsHealth[11].protocol == "Store Client" + response.data.protocolsHealth[11].health == HealthStatus.NOT_MOUNTED + response.data.protocolsHealth[12].protocol == "Legacy Store Client" + response.data.protocolsHealth[12].health == HealthStatus.NOT_MOUNTED + response.data.protocolsHealth[13].protocol == "Filter Client" + response.data.protocolsHealth[13].health == HealthStatus.NOT_READY + response.data.protocolsHealth[13].desc == + some("No Filter service peer available yet") await restServer.stop() await restServer.closeWait() diff --git a/tests/wakunode_rest/test_rest_lightpush.nim b/tests/wakunode_rest/test_rest_lightpush.nim index 72e309a13..b09c72ee3 100644 --- a/tests/wakunode_rest/test_rest_lightpush.nim +++ b/tests/wakunode_rest/test_rest_lightpush.nim @@ -3,7 +3,6 @@ import std/sequtils, stew/byteutils, - stew/shims/net, testutils/unittests, presto, presto/client as presto_client, @@ -129,13 +128,18 @@ suite "Waku v2 Rest API - lightpush": # Given let restLightPushTest = await RestLightPushTest.init() + let simpleHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + restLightPushTest.consumerNode.subscribe( - (kind: PubsubSub, topic: DefaultPubsubTopic) + (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler ).isOkOr: assert false, "Failed to subscribe to relay: " & $error restLightPushTest.serviceNode.subscribe( - (kind: PubsubSub, topic: DefaultPubsubTopic) + (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler ).isOkOr: assert false, "Failed to subscribe to relay: " & $error require: @@ -163,9 +167,13 @@ suite "Waku v2 Rest API - lightpush": asyncTest "Push message bad-request": # Given let restLightPushTest = await RestLightPushTest.init() + let simpleHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) restLightPushTest.serviceNode.subscribe( - (kind: PubsubSub, topic: DefaultPubsubTopic) + (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler ).isOkOr: assert false, "Failed to subscribe to relay: " & $error require: @@ -221,14 +229,18 @@ suite "Waku v2 Rest API - lightpush": let budgetCap = 3 let tokenPeriod = 500.millis let restLightPushTest = await RestLightPushTest.init((budgetCap, tokenPeriod)) + let simpleHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) restLightPushTest.consumerNode.subscribe( - (kind: PubsubSub, topic: DefaultPubsubTopic) + (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler ).isOkOr: assert false, "Failed to subscribe to relay: " & $error restLightPushTest.serviceNode.subscribe( - (kind: PubsubSub, topic: DefaultPubsubTopic) + (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler ).isOkOr: assert false, "Failed to subscribe to relay: " & $error require: diff --git a/tests/wakunode_rest/test_rest_lightpush_legacy.nim b/tests/wakunode_rest/test_rest_lightpush_legacy.nim index e1d6dca30..fea51554b 100644 --- a/tests/wakunode_rest/test_rest_lightpush_legacy.nim +++ b/tests/wakunode_rest/test_rest_lightpush_legacy.nim @@ -3,7 +3,6 @@ import std/sequtils, stew/byteutils, - stew/shims/net, testutils/unittests, presto, presto/client as presto_client, @@ -123,14 +122,18 @@ suite "Waku v2 Rest API - lightpush": asyncTest "Push message request": # Given let restLightPushTest = await RestLightPushTest.init() + let simpleHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) restLightPushTest.consumerNode.subscribe( - (kind: PubsubSub, topic: DefaultPubsubTopic) + (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler ).isOkOr: assert false, "Failed to subscribe to topic" restLightPushTest.serviceNode.subscribe( - (kind: PubsubSub, topic: DefaultPubsubTopic) + (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler ).isOkOr: assert false, "Failed to subscribe to topic" require: @@ -158,9 +161,13 @@ suite "Waku v2 Rest API - lightpush": asyncTest "Push message bad-request": # Given let restLightPushTest = await RestLightPushTest.init() + let simpleHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) restLightPushTest.serviceNode.subscribe( - (kind: PubsubSub, topic: DefaultPubsubTopic) + (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler ).isOkOr: assert false, "Failed to subscribe to topic" require: @@ -219,14 +226,18 @@ suite "Waku v2 Rest API - lightpush": let budgetCap = 3 let tokenPeriod = 500.millis let restLightPushTest = await RestLightPushTest.init((budgetCap, tokenPeriod)) + let simpleHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) restLightPushTest.consumerNode.subscribe( - (kind: PubsubSub, topic: DefaultPubsubTopic) + (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler ).isOkOr: assert false, "Failed to subscribe to topic" restLightPushTest.serviceNode.subscribe( - (kind: PubsubSub, topic: DefaultPubsubTopic) + (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler ).isOkOr: assert false, "Failed to subscribe to topic" require: diff --git a/tests/wakunode_rest/test_rest_relay.nim b/tests/wakunode_rest/test_rest_relay.nim index 719e66b8a..147f6e68f 100644 --- a/tests/wakunode_rest/test_rest_relay.nim +++ b/tests/wakunode_rest/test_rest_relay.nim @@ -3,7 +3,6 @@ import std/[sequtils, strformat, tempfiles], stew/byteutils, - stew/shims/net, testutils/unittests, presto, presto/client as presto_client, @@ -96,9 +95,18 @@ suite "Waku v2 Rest API - Relay": shard3 = RelayShard(clusterId: DefaultClusterId, shardId: 3) shard4 = RelayShard(clusterId: DefaultClusterId, shardId: 4) - (await node.mountRelay(@[shard0, shard1, shard2, shard3, shard4])).isOkOr: + (await node.mountRelay()).isOkOr: assert false, "Failed to mount relay" + proc simpleHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + for shard in @[$shard0, $shard1, $shard2, $shard3, $shard4]: + node.subscribe((kind: PubsubSub, topic: shard), simpleHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic: " & $error + var restPort = Port(0) let restAddress = parseIpAddress("0.0.0.0") let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() @@ -249,8 +257,14 @@ suite "Waku v2 Rest API - Relay": let client = newRestHttpClient(initTAddress(restAddress, restPort)) - node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr: + let simpleHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: assert false, "Failed to subscribe to pubsub topic" + require: toSeq(node.wakuRelay.subscribedTopics).len == 1 @@ -260,7 +274,7 @@ suite "Waku v2 Rest API - Relay": RelayWakuMessage( payload: base64.encode("TEST-PAYLOAD"), contentTopic: some(DefaultContentTopic), - timestamp: some(int64(2022)), + timestamp: some(now()), ), ) @@ -282,7 +296,7 @@ suite "Waku v2 Rest API - Relay": await node.start() (await node.mountRelay()).isOkOr: assert false, "Failed to mount relay" - require node.mountSharding(1, 8).isOk + require node.mountAutoSharding(1, 8).isOk var restPort = Port(0) let restAddress = parseIpAddress("0.0.0.0") @@ -320,7 +334,7 @@ suite "Waku v2 Rest API - Relay": check: # Node should be subscribed to all shards node.wakuRelay.subscribedTopics == - @["/waku/2/rs/1/7", "/waku/2/rs/1/2", "/waku/2/rs/1/5"] + @["/waku/2/rs/1/5", "/waku/2/rs/1/7", "/waku/2/rs/1/2"] await restServer.stop() await restServer.closeWait() @@ -332,6 +346,7 @@ suite "Waku v2 Rest API - Relay": await node.start() (await node.mountRelay()).isOkOr: assert false, "Failed to mount relay" + require node.mountAutoSharding(1, 8).isOk var restPort = Port(0) let restAddress = parseIpAddress("0.0.0.0") @@ -390,6 +405,7 @@ suite "Waku v2 Rest API - Relay": await node.start() (await node.mountRelay()).isOkOr: assert false, "Failed to mount relay" + require node.mountAutoSharding(1, 8).isOk var restPort = Port(0) let restAddress = parseIpAddress("0.0.0.0") @@ -455,6 +471,8 @@ suite "Waku v2 Rest API - Relay": await node.start() (await node.mountRelay()).isOkOr: assert false, "Failed to mount relay" + require node.mountAutoSharding(1, 8).isOk + let wakuRlnConfig = WakuRlnConfig( dynamic: false, credIndex: some(1.uint), @@ -478,7 +496,12 @@ suite "Waku v2 Rest API - Relay": let client = newRestHttpClient(initTAddress(restAddress, restPort)) - node.subscribe((kind: ContentSub, topic: DefaultContentTopic)).isOkOr: + let simpleHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + node.subscribe((kind: ContentSub, topic: DefaultContentTopic), simpleHandler).isOkOr: assert false, "Failed to subscribe to content topic: " & $error require: toSeq(node.wakuRelay.subscribedTopics).len == 1 @@ -488,7 +511,7 @@ suite "Waku v2 Rest API - Relay": RelayWakuMessage( payload: base64.encode("TEST-PAYLOAD"), contentTopic: some(DefaultContentTopic), - timestamp: some(int64(2022)), + timestamp: some(now()), ) ) @@ -509,6 +532,8 @@ suite "Waku v2 Rest API - Relay": await node.start() (await node.mountRelay()).isOkOr: assert false, "Failed to mount relay" + require node.mountAutoSharding(1, 8).isOk + let wakuRlnConfig = WakuRlnConfig( dynamic: false, credIndex: some(1.uint), @@ -584,7 +609,12 @@ suite "Waku v2 Rest API - Relay": let client = newRestHttpClient(initTAddress(restAddress, restPort)) - node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr: + let simpleHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: assert false, "Failed to subscribe to pubsub topic: " & $error require: toSeq(node.wakuRelay.subscribedTopics).len == 1 @@ -617,6 +647,8 @@ suite "Waku v2 Rest API - Relay": await node.start() (await node.mountRelay()).isOkOr: assert false, "Failed to mount relay" + require node.mountAutoSharding(1, 8).isOk + let wakuRlnConfig = WakuRlnConfig( dynamic: false, credIndex: some(1.uint), @@ -641,7 +673,12 @@ suite "Waku v2 Rest API - Relay": let client = newRestHttpClient(initTAddress(restAddress, restPort)) - node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr: + let simpleHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: assert false, "Failed to subscribe to pubsub topic: " & $error require: toSeq(node.wakuRelay.subscribedTopics).len == 1 diff --git a/tests/wakunode_rest/test_rest_store.nim b/tests/wakunode_rest/test_rest_store.nim index d0631bfbf..f08ed0a17 100644 --- a/tests/wakunode_rest/test_rest_store.nim +++ b/tests/wakunode_rest/test_rest_store.nim @@ -2,7 +2,6 @@ import std/[options, sugar], - stew/shims/net as stewNet, chronicles, chronos/timer, testutils/unittests, diff --git a/tools/rln_keystore_generator/rln_keystore_generator.nim b/tools/rln_keystore_generator/rln_keystore_generator.nim index cd501e52d..ee5911abf 100644 --- a/tools/rln_keystore_generator/rln_keystore_generator.nim +++ b/tools/rln_keystore_generator/rln_keystore_generator.nim @@ -20,7 +20,7 @@ type RlnKeystoreGeneratorConf* = object execute*: bool ethContractAddress*: string ethClientUrls*: seq[string] - chainId*: uint + chainId*: UInt256 credPath*: string credPassword*: string userMessageLimit*: uint64 diff --git a/vendor/nim-chronicles b/vendor/nim-chronicles index 81a4a7a36..a8fb38a10 160000 --- a/vendor/nim-chronicles +++ b/vendor/nim-chronicles @@ -1 +1 @@ -Subproject commit 81a4a7a360c78be9c80c8f735c76b6d4a1517304 +Subproject commit a8fb38a10bcb548df78e9a70bd77b26bb50abd12 diff --git a/vendor/nim-chronos b/vendor/nim-chronos index c04576d82..0646c444f 160000 --- a/vendor/nim-chronos +++ b/vendor/nim-chronos @@ -1 +1 @@ -Subproject commit c04576d829b8a0a1b12baaa8bc92037501b3a4a0 +Subproject commit 0646c444fce7c7ed08ef6f2c9a7abfd172ffe655 diff --git a/vendor/nim-eth b/vendor/nim-eth index c6c9dc7ae..a1f7d63ab 160000 --- a/vendor/nim-eth +++ b/vendor/nim-eth @@ -1 +1 @@ -Subproject commit c6c9dc7ae01656eba8126b913e84bdfb95c8c323 +Subproject commit a1f7d63ababa6ce90798e16a110fc4e43ac93f03 diff --git a/vendor/nim-faststreams b/vendor/nim-faststreams index 2b08c774a..c51315d0a 160000 --- a/vendor/nim-faststreams +++ b/vendor/nim-faststreams @@ -1 +1 @@ -Subproject commit 2b08c774afaafd600cf4c6f994cf78b8aa090c0c +Subproject commit c51315d0ae5eb2594d0bf41181d0e1aca1b3c01d diff --git a/vendor/nim-json-rpc b/vendor/nim-json-rpc index c0ac84873..cbe8edf69 160000 --- a/vendor/nim-json-rpc +++ b/vendor/nim-json-rpc @@ -1 +1 @@ -Subproject commit c0ac848733e42e672081f429fb146451894f7711 +Subproject commit cbe8edf69d743a787b76b1cd25bfc4eae89927f7 diff --git a/vendor/nim-libbacktrace b/vendor/nim-libbacktrace index dbade9ba2..822849874 160000 --- a/vendor/nim-libbacktrace +++ b/vendor/nim-libbacktrace @@ -1 +1 @@ -Subproject commit dbade9ba250da7db519c5cdfb225d03ca1255efc +Subproject commit 822849874926ba3849a86cb3eafdf017bd11bd2d diff --git a/vendor/nim-libp2p b/vendor/nim-libp2p index 78a434405..cd60b254a 160000 --- a/vendor/nim-libp2p +++ b/vendor/nim-libp2p @@ -1 +1 @@ -Subproject commit 78a434405435b69a24e8b263d48d622d57c4db5b +Subproject commit cd60b254a0700b0daac7a6cb2c0c48860b57c539 diff --git a/vendor/nim-metrics b/vendor/nim-metrics index 25ffd054f..ecf64c607 160000 --- a/vendor/nim-metrics +++ b/vendor/nim-metrics @@ -1 +1 @@ -Subproject commit 25ffd054fd774f8cf7935e75d6cad542306d7802 +Subproject commit ecf64c6078d1276d3b7d9b3d931fbdb70004db11 diff --git a/vendor/nim-nat-traversal b/vendor/nim-nat-traversal index 213ac13df..dfbf8c9ad 160000 --- a/vendor/nim-nat-traversal +++ b/vendor/nim-nat-traversal @@ -1 +1 @@ -Subproject commit 213ac13dfe5c4830474912c48181b86b73f1ec1f +Subproject commit dfbf8c9ad3655f238b350f690bbfce5ec34d25fb diff --git a/vendor/nim-regex b/vendor/nim-regex index 0673df07c..4593305ed 160000 --- a/vendor/nim-regex +++ b/vendor/nim-regex @@ -1 +1 @@ -Subproject commit 0673df07cb266e15942c3b5f5b8a4732f049cd73 +Subproject commit 4593305ed1e49731fc75af1dc572dd2559aad19c diff --git a/vendor/nim-secp256k1 b/vendor/nim-secp256k1 index 62e16b4df..f808ed5e7 160000 --- a/vendor/nim-secp256k1 +++ b/vendor/nim-secp256k1 @@ -1 +1 @@ -Subproject commit 62e16b4dff513f1eea7148a8cbba8a8c547b9546 +Subproject commit f808ed5e7a7bfc42204ec7830f14b7a42b63c284 diff --git a/vendor/nim-sqlite3-abi b/vendor/nim-sqlite3-abi index cc4fefd53..d08e96487 160000 --- a/vendor/nim-sqlite3-abi +++ b/vendor/nim-sqlite3-abi @@ -1 +1 @@ -Subproject commit cc4fefd538aa43814c5864c540fb75b567c2dcc3 +Subproject commit d08e964872271e83fb1b6de67ad57c2d0fcdfe63 diff --git a/vendor/nim-stew b/vendor/nim-stew index 687d1b4ab..58abb4891 160000 --- a/vendor/nim-stew +++ b/vendor/nim-stew @@ -1 +1 @@ -Subproject commit 687d1b4ab1a91e6cc9c92e4fd4d98bec7874c259 +Subproject commit 58abb4891f97c6cdc07335e868414e0c7b736c68 diff --git a/vendor/nim-taskpools b/vendor/nim-taskpools index 7b74a716a..9e8ccc754 160000 --- a/vendor/nim-taskpools +++ b/vendor/nim-taskpools @@ -1 +1 @@ -Subproject commit 7b74a716a40249720fd7da428113147942b9642d +Subproject commit 9e8ccc754631ac55ac2fd495e167e74e86293edb diff --git a/vendor/nim-testutils b/vendor/nim-testutils index 14a56ae5a..94d68e796 160000 --- a/vendor/nim-testutils +++ b/vendor/nim-testutils @@ -1 +1 @@ -Subproject commit 14a56ae5aada81bed43e29d2368fc8ab8a449bf5 +Subproject commit 94d68e796c045d5b37cabc6be32d7bfa168f8857 diff --git a/vendor/nim-unittest2 b/vendor/nim-unittest2 index 88a613ffa..8b51e99b4 160000 --- a/vendor/nim-unittest2 +++ b/vendor/nim-unittest2 @@ -1 +1 @@ -Subproject commit 88a613ffa4dbe452971beb937ea2db736dc9a9f4 +Subproject commit 8b51e99b4a57fcfb31689230e75595f024543024 diff --git a/vendor/nim-web3 b/vendor/nim-web3 index 94aac8a77..3ef986c9d 160000 --- a/vendor/nim-web3 +++ b/vendor/nim-web3 @@ -1 +1 @@ -Subproject commit 94aac8a77cd265fe779ce8ed25a028340b925fd1 +Subproject commit 3ef986c9d93604775595f116a35c6ac0bf5257fc diff --git a/vendor/nim-zlib b/vendor/nim-zlib index 3f7998095..daa8723fd 160000 --- a/vendor/nim-zlib +++ b/vendor/nim-zlib @@ -1 +1 @@ -Subproject commit 3f7998095264d262a8d99e2be89045e6d9301537 +Subproject commit daa8723fd32299d4ca621c837430c29a5a11e19a diff --git a/vendor/nimbus-build-system b/vendor/nimbus-build-system index 8fafcd0ba..0be0663e1 160000 --- a/vendor/nimbus-build-system +++ b/vendor/nimbus-build-system @@ -1 +1 @@ -Subproject commit 8fafcd0bac9f409091b7bcaee62ab6330f57441e +Subproject commit 0be0663e1af76e869837226a4ef3e586fcc737d3 diff --git a/vendor/nimcrypto b/vendor/nimcrypto index dc07e3058..19c41d6be 160000 --- a/vendor/nimcrypto +++ b/vendor/nimcrypto @@ -1 +1 @@ -Subproject commit dc07e3058c6904eef965394493b6ea99aa2adefc +Subproject commit 19c41d6be4c00b4a2c8000583bd30cf8ceb5f4b1 diff --git a/vendor/nph b/vendor/nph index 0d8000e74..c6e03162d 160000 --- a/vendor/nph +++ b/vendor/nph @@ -1 +1 @@ -Subproject commit 0d8000e741fa11ed48fdd116f24b4251b92aa9b5 +Subproject commit c6e03162dc2820d3088660f644818d7040e95791 diff --git a/vendor/waku-rlnv2-contract b/vendor/waku-rlnv2-contract index a576a8949..b7e9a9b1b 160000 --- a/vendor/waku-rlnv2-contract +++ b/vendor/waku-rlnv2-contract @@ -1 +1 @@ -Subproject commit a576a8949ca20e310f2fbb4ec0bd05a57ac3045f +Subproject commit b7e9a9b1bc69256a2a3076c1f099b50ce84e7eff diff --git a/waku.nimble b/waku.nimble index 9cf73295f..3d2d7de88 100644 --- a/waku.nimble +++ b/waku.nimble @@ -1,3 +1,4 @@ +import os mode = ScriptMode.Verbose ### Package @@ -8,7 +9,7 @@ license = "MIT or Apache License 2.0" #bin = @["build/waku"] ### Dependencies -requires "nim >= 2.0.8", +requires "nim >= 2.2.4", "chronicles", "confutils", "chronos", @@ -66,12 +67,18 @@ proc buildLibrary(name: string, srcDir = "./", params = "", `type` = "static") = extra_params &= " " & paramStr(i) if `type` == "static": exec "nim c" & " --out:build/" & name & - ".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header --undef:metrics --nimMainPrefix:libwaku --skipParentCfg:on " & + ".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " & extra_params & " " & srcDir & name & ".nim" else: - exec "nim c" & " --out:build/" & name & - ".so --threads:on --app:lib --opt:size --noMain --mm:refc --header --undef:metrics --nimMainPrefix:libwaku --skipParentCfg:on " & - extra_params & " " & srcDir & name & ".nim" + let lib_name = (when defined(windows): toDll(name) else: name & ".so") + when defined(windows): + exec "nim c" & " --out:build/" & lib_name & + " --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:off -d:discv5_protocol_id=d5waku " & + extra_params & " " & srcDir & name & ".nim" + else: + exec "nim c" & " --out:build/" & lib_name & + " --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " & + extra_params & " " & srcDir & name & ".nim" proc buildMobileAndroid(srcDir = ".", params = "") = let cpu = getEnv("CPU") @@ -154,39 +161,29 @@ task buildone, "Build custom target": let filepath = paramStr(paramCount()) discard buildModule filepath -task testone, "Test custom target": +task buildTest, "Test custom target": let filepath = paramStr(paramCount()) - if buildModule(filepath): - exec "build/" & filepath & ".bin" + discard buildModule(filepath) + +task execTest, "Run test": + let filepath = paramStr(paramCount() - 1) + exec "build/" & filepath & ".bin" & " test \"" & paramStr(paramCount()) & "\"" ### C Bindings +let chroniclesParams = + "-d:chronicles_line_numbers " & "-d:chronicles_runtime_filtering=on " & + """-d:chronicles_sinks="textlines,json" """ & + "-d:chronicles_default_output_device=Dynamic " & + """-d:chronicles_disabled_topics="eth,dnsdisc.client" """ & "--warning:Deprecated:off " & + "--warning:UnusedImport:on " & "-d:chronicles_log_level=TRACE" + task libwakuStatic, "Build the cbindings waku node library": let name = "libwaku" - buildLibrary name, - "library/", - """-d:chronicles_line_numbers \ - -d:chronicles_runtime_filtering=on \ - -d:chronicles_sinks="textlines,json" \ - -d:chronicles_default_output_device=Dynamic \ - -d:chronicles_disabled_topics="eth,dnsdisc.client" \ - --warning:Deprecated:off \ - --warning:UnusedImport:on \ - -d:chronicles_log_level=TRACE """, - "static" + buildLibrary name, "library/", chroniclesParams, "static" task libwakuDynamic, "Build the cbindings waku node library": let name = "libwaku" - buildLibrary name, - "library/", - """-d:chronicles_line_numbers \ - -d:chronicles_runtime_filtering=on \ - -d:chronicles_sinks="textlines,json" \ - -d:chronicles_default_output_device=Dynamic \ - -d:chronicles_disabled_topics="eth,dnsdisc.client" \ - --warning:Deprecated:off \ - --warning:UnusedImport:on \ - -d:chronicles_log_level=TRACE """, - "dynamic" + buildLibrary name, "library/", chroniclesParams, "dynamic" ### Mobile Android task libWakuAndroid, "Build the mobile bindings for Android": diff --git a/waku/discovery/autonat_service.nim b/waku/discovery/autonat_service.nim index c4e2dd8ed..efc3de561 100644 --- a/waku/discovery/autonat_service.nim +++ b/waku/discovery/autonat_service.nim @@ -26,7 +26,7 @@ proc getAutonatService*(rng: ref HmacDrbgContext): AutonatService = proc statusAndConfidenceHandler( networkReachability: NetworkReachability, confidence: Opt[float] - ): Future[void] {.async.} = + ): Future[void] {.gcsafe, async: (raises: [CancelledError]).} = if confidence.isSome(): info "Peer reachability status", networkReachability = networkReachability, confidence = confidence.get() diff --git a/waku/discovery/waku_discv5.nim b/waku/discovery/waku_discv5.nim index 221acef42..5bdb91a2e 100644 --- a/waku/discovery/waku_discv5.nim +++ b/waku/discovery/waku_discv5.nim @@ -26,7 +26,6 @@ logScope: type Discv5Conf* {.requiresInit.} = object # TODO: This should probably be an option on the builder # But translated to everything else "false" on the config - discv5Only*: bool bootstrapNodes*: seq[string] udpPort*: Port tableIpLimit*: uint diff --git a/waku/factory/builder.nim b/waku/factory/builder.nim index 2fc91c8c7..df84b0ba5 100644 --- a/waku/factory/builder.nim +++ b/waku/factory/builder.nim @@ -43,8 +43,8 @@ type switchSendSignedPeerRecord: Option[bool] circuitRelay: Relay - #Rate limit configs for non-relay req-resp protocols - rateLimitSettings: Option[seq[string]] + # Rate limit configs for non-relay req-resp protocols + rateLimitSettings: Option[ProtocolRateLimitSettings] # Eligibility enabled eligibilityEnabled: bool @@ -85,6 +85,7 @@ proc withNetworkConfigurationDetails*( wssEnabled: bool = false, wakuFlags = none(CapabilitiesBitfield), dns4DomainName = none(string), + dnsNameServers = @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")], ): WakuNodeBuilderResult {. deprecated: "use 'builder.withNetworkConfiguration()' instead" .} = @@ -100,6 +101,7 @@ proc withNetworkConfigurationDetails*( wssEnabled = wssEnabled, wakuFlags = wakuFlags, dns4DomainName = dns4DomainName, + dnsNameServers = dnsNameServers, ) builder.withNetworkConfiguration(netConfig) ok() @@ -135,7 +137,7 @@ proc withPeerManagerConfig*( proc withColocationLimit*(builder: var WakuNodeBuilder, colocationLimit: int) = builder.colocationLimit = colocationLimit -proc withRateLimit*(builder: var WakuNodeBuilder, limits: seq[string]) = +proc withRateLimit*(builder: var WakuNodeBuilder, limits: ProtocolRateLimitSettings) = builder.rateLimitSettings = some(limits) proc withCircuitRelay*(builder: var WakuNodeBuilder, circuitRelay: Relay) = @@ -176,6 +178,10 @@ proc build*(builder: WakuNodeBuilder): Result[WakuNode, string] = if builder.netConfig.isNone(): return err("network configuration is required") + let netConfig = builder.netConfig.get() + if netConfig.dnsNameServers.len == 0: + return err("DNS name servers are required for WakuNode") + if builder.record.isNone(): return err("node record is required") @@ -206,8 +212,6 @@ proc build*(builder: WakuNodeBuilder): Result[WakuNode, string] = except CatchableError: return err("failed to create switch: " & getCurrentExceptionMsg()) - let netConfig = builder.netConfig.get() - let peerManager = PeerManager.new( switch = switch, storage = builder.peerStorage.get(nil), @@ -215,7 +219,6 @@ proc build*(builder: WakuNodeBuilder): Result[WakuNode, string] = maxServicePeers = some(builder.maxServicePeers), colocationLimit = builder.colocationLimit, shardedPeerManagement = builder.shardAware, - dnsNameServers = netConfig.dnsNameServers, eligibilityEnabled = builder.eligibilityEnabled, reputationEnabled = builder.reputationEnabled ) @@ -228,11 +231,9 @@ proc build*(builder: WakuNodeBuilder): Result[WakuNode, string] = switch = switch, peerManager = peerManager, rng = rng, + rateLimitSettings = builder.rateLimitSettings.get(DefaultProtocolRateLimit), ) except Exception: return err("failed to build WakuNode instance: " & getCurrentExceptionMsg()) - if builder.rateLimitSettings.isSome(): - ?node.setRateLimits(builder.rateLimitSettings.get()) - ok(node) diff --git a/waku/factory/conf_builder/conf_builder.nim b/waku/factory/conf_builder/conf_builder.nim index 9b7f44ada..14b762756 100644 --- a/waku/factory/conf_builder/conf_builder.nim +++ b/waku/factory/conf_builder/conf_builder.nim @@ -8,10 +8,11 @@ import ./discv5_conf_builder, ./web_socket_conf_builder, ./metrics_server_conf_builder, + ./rate_limit_conf_builder, ./rln_relay_conf_builder export waku_conf_builder, filter_service_conf_builder, store_sync_conf_builder, store_service_conf_builder, rest_server_conf_builder, dns_discovery_conf_builder, discv5_conf_builder, web_socket_conf_builder, metrics_server_conf_builder, - rln_relay_conf_builder + rate_limit_conf_builder, rln_relay_conf_builder diff --git a/waku/factory/conf_builder/discv5_conf_builder.nim b/waku/factory/conf_builder/discv5_conf_builder.nim index 950b2a4f6..e2729021e 100644 --- a/waku/factory/conf_builder/discv5_conf_builder.nim +++ b/waku/factory/conf_builder/discv5_conf_builder.nim @@ -13,7 +13,6 @@ type Discv5ConfBuilder* = object bootstrapNodes*: seq[string] bitsPerHop*: Option[int] bucketIpLimit*: Option[uint] - discv5Only*: Option[bool] enrAutoUpdate*: Option[bool] tableIpLimit*: Option[uint] udpPort*: Option[Port] @@ -30,9 +29,6 @@ proc withBitsPerHop*(b: var Discv5ConfBuilder, bitsPerHop: int) = proc withBucketIpLimit*(b: var Discv5ConfBuilder, bucketIpLimit: uint) = b.bucketIpLimit = some(bucketIpLimit) -proc withDiscv5Only*(b: var Discv5ConfBuilder, discv5Only: bool) = - b.discv5Only = some(discv5Only) - proc withEnrAutoUpdate*(b: var Discv5ConfBuilder, enrAutoUpdate: bool) = b.enrAutoUpdate = some(enrAutoUpdate) @@ -42,6 +38,9 @@ proc withTableIpLimit*(b: var Discv5ConfBuilder, tableIpLimit: uint) = proc withUdpPort*(b: var Discv5ConfBuilder, udpPort: Port) = b.udpPort = some(udpPort) +proc withUdpPort*(b: var Discv5ConfBuilder, udpPort: uint) = + b.udpPort = some(Port(udpPort.uint16)) + proc withBootstrapNodes*(b: var Discv5ConfBuilder, bootstrapNodes: seq[string]) = # TODO: validate ENRs? b.bootstrapNodes = concat(b.bootstrapNodes, bootstrapNodes) @@ -56,7 +55,6 @@ proc build*(b: Discv5ConfBuilder): Result[Option[Discv5Conf], string] = bootstrapNodes: b.bootstrapNodes, bitsPerHop: b.bitsPerHop.get(1), bucketIpLimit: b.bucketIpLimit.get(2), - discv5Only: b.discv5Only.get(false), enrAutoUpdate: b.enrAutoUpdate.get(true), tableIpLimit: b.tableIpLimit.get(10), udpPort: b.udpPort.get(9000.Port), diff --git a/waku/factory/conf_builder/dns_discovery_conf_builder.nim b/waku/factory/conf_builder/dns_discovery_conf_builder.nim index 8ac33a18f..1c577bbf8 100644 --- a/waku/factory/conf_builder/dns_discovery_conf_builder.nim +++ b/waku/factory/conf_builder/dns_discovery_conf_builder.nim @@ -1,4 +1,4 @@ -import chronicles, std/[net, options, sequtils], results +import chronicles, std/[net, options, strutils], results import ../waku_conf logScope: @@ -8,30 +8,26 @@ logScope: ## DNS Discovery Config Builder ## ################################## type DnsDiscoveryConfBuilder* = object - enabled*: Option[bool] enrTreeUrl*: Option[string] nameServers*: seq[IpAddress] proc init*(T: type DnsDiscoveryConfBuilder): DnsDiscoveryConfBuilder = DnsDiscoveryConfBuilder() -proc withEnabled*(b: var DnsDiscoveryConfBuilder, enabled: bool) = - b.enabled = some(enabled) - proc withEnrTreeUrl*(b: var DnsDiscoveryConfBuilder, enrTreeUrl: string) = b.enrTreeUrl = some(enrTreeUrl) proc withNameServers*(b: var DnsDiscoveryConfBuilder, nameServers: seq[IpAddress]) = - b.nameServers = concat(b.nameServers, nameServers) + b.nameServers = nameServers proc build*(b: DnsDiscoveryConfBuilder): Result[Option[DnsDiscoveryConf], string] = - if not b.enabled.get(false): + if b.enrTreeUrl.isNone(): return ok(none(DnsDiscoveryConf)) + if isEmptyOrWhiteSpace(b.enrTreeUrl.get()): + return err("dnsDiscovery.enrTreeUrl cannot be an empty string") if b.nameServers.len == 0: return err("dnsDiscovery.nameServers is not specified") - if b.enrTreeUrl.isNone(): - return err("dnsDiscovery.enrTreeUrl is not specified") return ok( some(DnsDiscoveryConf(nameServers: b.nameServers, enrTreeUrl: b.enrTreeUrl.get())) diff --git a/waku/factory/conf_builder/rate_limit_conf_builder.nim b/waku/factory/conf_builder/rate_limit_conf_builder.nim new file mode 100644 index 000000000..0d466a132 --- /dev/null +++ b/waku/factory/conf_builder/rate_limit_conf_builder.nim @@ -0,0 +1,29 @@ +import chronicles, std/[net, options], results +import waku/common/rate_limit/setting + +logScope: + topics = "waku conf builder rate limit" + +type RateLimitConfBuilder* = object + strValue: Option[seq[string]] + objValue: Option[ProtocolRateLimitSettings] + +proc init*(T: type RateLimitConfBuilder): RateLimitConfBuilder = + RateLimitConfBuilder() + +proc withRateLimits*(b: var RateLimitConfBuilder, rateLimits: seq[string]) = + b.strValue = some(rateLimits) + +proc build*(b: RateLimitConfBuilder): Result[ProtocolRateLimitSettings, string] = + if b.strValue.isSome() and b.objValue.isSome(): + return err("Rate limits conf must only be set once on the builder") + + if b.objValue.isSome(): + return ok(b.objValue.get()) + + if b.strValue.isSome(): + let rateLimits = ProtocolRateLimitSettings.parse(b.strValue.get()).valueOr: + return err("Invalid rate limits settings:" & $error) + return ok(rateLimits) + + return ok(DefaultProtocolRateLimit) diff --git a/waku/factory/conf_builder/rln_relay_conf_builder.nim b/waku/factory/conf_builder/rln_relay_conf_builder.nim index ea87eb278..455f0a57f 100644 --- a/waku/factory/conf_builder/rln_relay_conf_builder.nim +++ b/waku/factory/conf_builder/rln_relay_conf_builder.nim @@ -1,4 +1,4 @@ -import chronicles, std/options, results +import chronicles, std/options, results, stint, stew/endians2 import ../waku_conf logScope: @@ -9,7 +9,7 @@ logScope: ############################## type RlnRelayConfBuilder* = object enabled*: Option[bool] - chainId*: Option[uint] + chainId*: Option[UInt256] ethClientUrls*: Option[seq[string]] ethContractAddress*: Option[string] credIndex*: Option[uint] @@ -26,8 +26,11 @@ proc init*(T: type RlnRelayConfBuilder): RlnRelayConfBuilder = proc withEnabled*(b: var RlnRelayConfBuilder, enabled: bool) = b.enabled = some(enabled) -proc withChainId*(b: var RlnRelayConfBuilder, chainId: uint) = - b.chainId = some(chainId) +proc withChainId*(b: var RlnRelayConfBuilder, chainId: uint | UInt256) = + when chainId is uint: + b.chainId = some(UInt256.fromBytesBE(chainId.toBytesBE())) + else: + b.chainId = some(chainId) proc withCredIndex*(b: var RlnRelayConfBuilder, credIndex: uint) = b.credIndex = some(credIndex) diff --git a/waku/factory/conf_builder/store_service_conf_builder.nim b/waku/factory/conf_builder/store_service_conf_builder.nim index d12bc8150..d5d48c34d 100644 --- a/waku/factory/conf_builder/store_service_conf_builder.nim +++ b/waku/factory/conf_builder/store_service_conf_builder.nim @@ -64,7 +64,7 @@ proc build*(b: StoreServiceConfBuilder): Result[Option[StoreServiceConf], string dbMigration: b.dbMigration.get(true), dbURl: b.dbUrl.get(), dbVacuum: b.dbVacuum.get(false), - supportV2: b.supportV2.get(true), + supportV2: b.supportV2.get(false), maxNumDbConnections: b.maxNumDbConnections.get(50), retentionPolicy: b.retentionPolicy.get("time:" & $2.days.seconds), resume: b.resume.get(false), diff --git a/waku/factory/conf_builder/waku_conf_builder.nim b/waku/factory/conf_builder/waku_conf_builder.nim index d89e026f4..a26e80329 100644 --- a/waku/factory/conf_builder/waku_conf_builder.nim +++ b/waku/factory/conf_builder/waku_conf_builder.nim @@ -2,6 +2,7 @@ import libp2p/crypto/crypto, libp2p/multiaddress, std/[net, options, sequtils, strutils], + stint, chronicles, chronos, results @@ -23,6 +24,7 @@ import ./web_socket_conf_builder, ./metrics_server_conf_builder, ./rln_relay_conf_builder, + ./rate_limit_conf_builder, ./eligibility_conf_builder, ./reputation_conf_builder @@ -60,8 +62,9 @@ type WakuConfBuilder* = object nodeKey: Option[crypto.PrivateKey] clusterId: Option[uint16] - numShardsInNetwork: Option[uint32] - shards: Option[seq[uint16]] + shardingConf: Option[ShardingConfKind] + numShardsInCluster: Option[uint16] + subscribeShards: Option[seq[uint16]] protectedShards: Option[seq[ProtectedShard]] contentTopics: Option[seq[string]] @@ -76,6 +79,7 @@ type WakuConfBuilder* = object webSocketConf*: WebSocketConfBuilder eligibilityConf*: EligibilityConfBuilder reputationConf*: ReputationConfBuilder + rateLimitConf*: RateLimitConfBuilder # End conf builders relay: Option[bool] lightPush: Option[bool] @@ -85,9 +89,8 @@ type WakuConfBuilder* = object # TODO: move within a relayConf rendezvous: Option[bool] - discv5Only: Option[bool] - clusterConf: Option[ClusterConf] + networkConf: Option[NetworkConf] staticNodes: seq[string] @@ -110,7 +113,6 @@ type WakuConfBuilder* = object extMultiAddrs: seq[string] extMultiAddrsOnly: Option[bool] - dnsAddrs: Option[bool] dnsAddrsNameServers: seq[IpAddress] peerPersistence: Option[bool] @@ -120,13 +122,10 @@ type WakuConfBuilder* = object agentString: Option[string] - rateLimits: Option[seq[string]] - maxRelayPeers: Option[int] relayShardedPeerManagement: Option[bool] relayServiceRatio: Option[string] circuitRelayClient: Option[bool] - keepAlive: Option[bool] p2pReliability: Option[bool] proc init*(T: type WakuConfBuilder): WakuConfBuilder = @@ -141,10 +140,11 @@ proc init*(T: type WakuConfBuilder): WakuConfBuilder = webSocketConf: WebSocketConfBuilder.init(), eligibilityConf: EligibilityConfBuilder.init(), reputationConf: ReputationConfBuilder.init(), + rateLimitConf: RateLimitConfBuilder.init(), ) -proc withClusterConf*(b: var WakuConfBuilder, clusterConf: ClusterConf) = - b.clusterConf = some(clusterConf) +proc withNetworkConf*(b: var WakuConfBuilder, networkConf: NetworkConf) = + b.networkConf = some(networkConf) proc withNodeKey*(b: var WakuConfBuilder, nodeKey: crypto.PrivateKey) = b.nodeKey = some(nodeKey) @@ -152,11 +152,14 @@ proc withNodeKey*(b: var WakuConfBuilder, nodeKey: crypto.PrivateKey) = proc withClusterId*(b: var WakuConfBuilder, clusterId: uint16) = b.clusterId = some(clusterId) -proc withNumShardsInNetwork*(b: var WakuConfBuilder, numShardsInNetwork: uint32) = - b.numShardsInNetwork = some(numShardsInNetwork) +proc withShardingConf*(b: var WakuConfBuilder, shardingConf: ShardingConfKind) = + b.shardingConf = some(shardingConf) -proc withShards*(b: var WakuConfBuilder, shards: seq[uint16]) = - b.shards = some(shards) +proc withNumShardsInCluster*(b: var WakuConfBuilder, numShardsInCluster: uint16) = + b.numShardsInCluster = some(numShardsInCluster) + +proc withSubscribeShards*(b: var WakuConfBuilder, shards: seq[uint16]) = + b.subscribeShards = some(shards) proc withProtectedShards*( b: var WakuConfBuilder, protectedShards: seq[ProtectedShard] @@ -198,9 +201,6 @@ proc withRemotePeerExchangeNode*( ) = b.remotePeerExchangeNode = some(remotePeerExchangeNode) -proc withDnsAddrs*(b: var WakuConfBuilder, dnsAddrs: bool) = - b.dnsAddrs = some(dnsAddrs) - proc withPeerPersistence*(b: var WakuConfBuilder, peerPersistence: bool) = b.peerPersistence = some(peerPersistence) @@ -213,7 +213,7 @@ proc withMaxConnections*(b: var WakuConfBuilder, maxConnections: int) = proc withDnsAddrsNameServers*( b: var WakuConfBuilder, dnsAddrsNameServers: seq[IpAddress] ) = - b.dnsAddrsNameServers = concat(b.dnsAddrsNameServers, dnsAddrsNameServers) + b.dnsAddrsNameServers.insert(dnsAddrsNameServers) proc withLogLevel*(b: var WakuConfBuilder, logLevel: logging.LogLevel) = b.logLevel = some(logLevel) @@ -248,9 +248,6 @@ proc withAgentString*(b: var WakuConfBuilder, agentString: string) = proc withColocationLimit*(b: var WakuConfBuilder, colocationLimit: int) = b.colocationLimit = some(colocationLimit) -proc withRateLimits*(b: var WakuConfBuilder, rateLimits: seq[string]) = - b.rateLimits = some(rateLimits) - proc withMaxRelayPeers*(b: var WakuConfBuilder, maxRelayPeers: int) = b.maxRelayPeers = some(maxRelayPeers) @@ -265,9 +262,6 @@ proc withRelayShardedPeerManagement*( ) = b.relayShardedPeerManagement = some(relayShardedPeerManagement) -proc withKeepAlive*(b: var WakuConfBuilder, keepAlive: bool) = - b.keepAlive = some(keepAlive) - proc withP2pReliability*(b: var WakuConfBuilder, p2pReliability: bool) = b.p2pReliability = some(p2pReliability) @@ -283,6 +277,8 @@ proc withMaxMessageSize*(builder: var WakuConfBuilder, maxMessageSize: string) = proc withStaticNodes*(builder: var WakuConfBuilder, staticNodes: seq[string]) = builder.staticNodes = concat(builder.staticNodes, staticNodes) +## Building + proc nodeKey( builder: WakuConfBuilder, rng: ref HmacDrbgContext ): Result[crypto.PrivateKey, string] = @@ -295,77 +291,105 @@ proc nodeKey( return err("Failed to generate key: " & $error) return ok(nodeKey) -proc applyClusterConf(builder: var WakuConfBuilder) = - # Apply cluster conf, overrides most values passed individually - # If you want to tweak values, don't use clusterConf - if builder.clusterConf.isNone: +proc buildShardingConf( + bShardingConfKind: Option[ShardingConfKind], + bNumShardsInCluster: Option[uint16], + bSubscribeShards: Option[seq[uint16]], +): (ShardingConf, seq[uint16]) = + echo "bSubscribeShards: ", bSubscribeShards + case bShardingConfKind.get(AutoSharding) + of StaticSharding: + (ShardingConf(kind: StaticSharding), bSubscribeShards.get(@[])) + of AutoSharding: + let numShardsInCluster = bNumShardsInCluster.get(1) + let shardingConf = + ShardingConf(kind: AutoSharding, numShardsInCluster: numShardsInCluster) + let upperShard = uint16(numShardsInCluster - 1) + (shardingConf, bSubscribeShards.get(toSeq(0.uint16 .. upperShard))) + +proc applyNetworkConf(builder: var WakuConfBuilder) = + # Apply network conf, overrides most values passed individually + # If you want to tweak values, don't use networkConf + # TODO: networkconf should be one field of the conf builder so that this function becomes unnecessary + if builder.networkConf.isNone(): return - let clusterConf = builder.clusterConf.get() + let networkConf = builder.networkConf.get() if builder.clusterId.isSome(): - warn "Cluster id was provided alongside a cluster conf", - used = clusterConf.clusterId, discarded = builder.clusterId.get() - builder.clusterId = some(clusterConf.clusterId) + warn "Cluster id was provided alongside a network conf", + used = networkConf.clusterId, discarded = builder.clusterId.get() + builder.clusterId = some(networkConf.clusterId) # Apply relay parameters - if builder.relay.get(false) and clusterConf.rlnRelay: + if builder.relay.get(false) and networkConf.rlnRelay: if builder.rlnRelayConf.enabled.isSome(): - warn "RLN Relay was provided alongside a cluster conf", - used = clusterConf.rlnRelay, discarded = builder.rlnRelayConf.enabled + warn "RLN Relay was provided alongside a network conf", + used = networkConf.rlnRelay, discarded = builder.rlnRelayConf.enabled builder.rlnRelayConf.withEnabled(true) if builder.rlnRelayConf.ethContractAddress.get("") != "": - warn "RLN Relay ETH Contract Address was provided alongside a cluster conf", - used = clusterConf.rlnRelayEthContractAddress.string, + warn "RLN Relay ETH Contract Address was provided alongside a network conf", + used = networkConf.rlnRelayEthContractAddress.string, discarded = builder.rlnRelayConf.ethContractAddress.get().string - builder.rlnRelayConf.withEthContractAddress(clusterConf.rlnRelayEthContractAddress) + builder.rlnRelayConf.withEthContractAddress(networkConf.rlnRelayEthContractAddress) if builder.rlnRelayConf.chainId.isSome(): - warn "RLN Relay Chain Id was provided alongside a cluster conf", - used = clusterConf.rlnRelayChainId, discarded = builder.rlnRelayConf.chainId - builder.rlnRelayConf.withChainId(clusterConf.rlnRelayChainId) + warn "RLN Relay Chain Id was provided alongside a network conf", + used = networkConf.rlnRelayChainId, discarded = builder.rlnRelayConf.chainId + builder.rlnRelayConf.withChainId(networkConf.rlnRelayChainId) if builder.rlnRelayConf.dynamic.isSome(): - warn "RLN Relay Dynamic was provided alongside a cluster conf", - used = clusterConf.rlnRelayDynamic, discarded = builder.rlnRelayConf.dynamic - builder.rlnRelayConf.withDynamic(clusterConf.rlnRelayDynamic) + warn "RLN Relay Dynamic was provided alongside a network conf", + used = networkConf.rlnRelayDynamic, discarded = builder.rlnRelayConf.dynamic + builder.rlnRelayConf.withDynamic(networkConf.rlnRelayDynamic) if builder.rlnRelayConf.epochSizeSec.isSome(): - warn "RLN Epoch Size in Seconds was provided alongside a cluster conf", - used = clusterConf.rlnEpochSizeSec, + warn "RLN Epoch Size in Seconds was provided alongside a network conf", + used = networkConf.rlnEpochSizeSec, discarded = builder.rlnRelayConf.epochSizeSec - builder.rlnRelayConf.withEpochSizeSec(clusterConf.rlnEpochSizeSec) + builder.rlnRelayConf.withEpochSizeSec(networkConf.rlnEpochSizeSec) if builder.rlnRelayConf.userMessageLimit.isSome(): - warn "RLN Relay Dynamic was provided alongside a cluster conf", - used = clusterConf.rlnRelayUserMessageLimit, + warn "RLN Relay Dynamic was provided alongside a network conf", + used = networkConf.rlnRelayUserMessageLimit, discarded = builder.rlnRelayConf.userMessageLimit - builder.rlnRelayConf.withUserMessageLimit(clusterConf.rlnRelayUserMessageLimit) + builder.rlnRelayConf.withUserMessageLimit(networkConf.rlnRelayUserMessageLimit) # End Apply relay parameters case builder.maxMessageSize.kind of mmskNone: discard of mmskStr, mmskInt: - warn "Max Message Size was provided alongside a cluster conf", - used = clusterConf.maxMessageSize, discarded = $builder.maxMessageSize - builder.withMaxMessageSize(parseCorrectMsgSize(clusterConf.maxMessageSize)) + warn "Max Message Size was provided alongside a network conf", + used = networkConf.maxMessageSize, discarded = $builder.maxMessageSize + builder.withMaxMessageSize(parseCorrectMsgSize(networkConf.maxMessageSize)) - if builder.numShardsInNetwork.isSome(): - warn "Num Shards In Network was provided alongside a cluster conf", - used = clusterConf.numShardsInNetwork, discarded = builder.numShardsInNetwork - builder.numShardsInNetwork = some(clusterConf.numShardsInNetwork) + if builder.shardingConf.isSome(): + warn "Sharding Conf was provided alongside a network conf", + used = networkConf.shardingConf.kind, discarded = builder.shardingConf - if clusterConf.discv5Discovery: + if builder.numShardsInCluster.isSome(): + warn "Num Shards In Cluster was provided alongside a network conf", + used = networkConf.shardingConf.numShardsInCluster, + discarded = builder.numShardsInCluster + + case networkConf.shardingConf.kind + of StaticSharding: + builder.shardingConf = some(StaticSharding) + of AutoSharding: + builder.shardingConf = some(AutoSharding) + builder.numShardsInCluster = some(networkConf.shardingConf.numShardsInCluster) + + if networkConf.discv5Discovery: if builder.discv5Conf.enabled.isNone: - builder.discv5Conf.withEnabled(clusterConf.discv5Discovery) + builder.discv5Conf.withEnabled(networkConf.discv5Discovery) if builder.discv5Conf.bootstrapNodes.len == 0 and - clusterConf.discv5BootstrapNodes.len > 0: - warn "Discv5 Boostrap nodes were provided alongside a cluster conf", - used = clusterConf.discv5BootstrapNodes, + networkConf.discv5BootstrapNodes.len > 0: + warn "Discv5 Bootstrap nodes were provided alongside a network conf", + used = networkConf.discv5BootstrapNodes, discarded = builder.discv5Conf.bootstrapNodes - builder.discv5Conf.withBootstrapNodes(clusterConf.discv5BootstrapNodes) + builder.discv5Conf.withBootstrapNodes(networkConf.discv5BootstrapNodes) proc build*( builder: var WakuConfBuilder, rng: ref HmacDrbgContext = crypto.newRng() @@ -375,7 +399,7 @@ proc build*( ## of libwaku. It aims to be agnostic so it does not apply a ## default when it is opinionated. - applyClusterConf(builder) + applyNetworkConf(builder) let relay = if builder.relay.isSome(): @@ -423,26 +447,16 @@ proc build*( warn("Cluster Id was not specified, defaulting to 0") 0.uint16 else: - builder.clusterId.get() - - let numShardsInNetwork = - if builder.numShardsInNetwork.isSome(): - builder.numShardsInNetwork.get() - else: - warn "Number of shards in network not specified, defaulting to zero (improve is wip)" - 0 - - let shards = - if builder.shards.isSome(): - builder.shards.get() - else: - warn "shards not specified, defaulting to all shards in network" - # TODO: conversion should not be needed - let upperShard: uint16 = uint16(numShardsInNetwork - 1) - toSeq(0.uint16 .. upperShard) + builder.clusterId.get().uint16 + let (shardingConf, subscribeShards) = buildShardingConf( + builder.shardingConf, builder.numShardsInCluster, builder.subscribeShards + ) let protectedShards = builder.protectedShards.get(@[]) + info "Sharding configuration: ", + shardingConf = $shardingConf, subscribeShards = $subscribeShards + let maxMessageSizeBytes = case builder.maxMessageSize.kind of mmskInt: @@ -485,6 +499,10 @@ proc build*( let reputationConf = builder.reputationConf.build().valueOr: return err("Reputation Conf building failed: " & $error) + + let rateLimitConf = builder.rateLimitConf.build().valueOr: + return err("Rate limits Conf building failed: " & $error) + # End - Build sub-configs let logLevel = @@ -552,13 +570,6 @@ proc build*( warn "Whether to only announce external multiaddresses is not specified, defaulting to false" false - let dnsAddrs = - if builder.dnsAddrs.isSome(): - builder.dnsAddrs.get() - else: - warn "Whether to resolve DNS multiaddresses was not specified, defaulting to false." - false - let dnsAddrsNameServers = if builder.dnsAddrsNameServers.len != 0: builder.dnsAddrsNameServers @@ -586,7 +597,6 @@ proc build*( # TODO: use `DefaultColocationLimit`. the user of this value should # probably be defining a config object let colocationLimit = builder.colocationLimit.get(5) - let rateLimits = builder.rateLimits.get(newSeq[string](0)) # TODO: is there a strategy for experimental features? delete vs promote let relayShardedPeerManagement = builder.relayShardedPeerManagement.get(false) @@ -610,12 +620,13 @@ proc build*( dnsDiscoveryConf: dnsDiscoveryConf, eligibilityConf: eligibilityConf, reputationConf: reputationConf, + rateLimitConf: rateLimitConf, # end confs nodeKey: nodeKey, clusterId: clusterId, - numShardsInNetwork: numShardsInNetwork, + shardingConf: shardingConf, contentTopics: contentTopics, - shards: shards, + subscribeShards: subscribeShards, protectedShards: protectedShards, relay: relay, lightPush: lightPush, @@ -630,7 +641,7 @@ proc build*( logLevel: logLevel, logFormat: logFormat, # TODO: Separate builders - networkConf: NetworkConfig( + endpointConf: EndpointConf( natStrategy: natStrategy, p2pTcpPort: p2pTcpPort, dns4DomainName: dns4DomainName, @@ -640,7 +651,6 @@ proc build*( ), portsShift: portsShift, webSocketConf: webSocketConf, - dnsAddrs: dnsAddrs, dnsAddrsNameServers: dnsAddrsNameServers, peerPersistence: peerPersistence, peerStoreCapacity: builder.peerStoreCapacity, @@ -649,9 +659,7 @@ proc build*( colocationLimit: colocationLimit, maxRelayPeers: builder.maxRelayPeers, relayServiceRatio: builder.relayServiceRatio.get("60:40"), - rateLimits: rateLimits, circuitRelayClient: builder.circuitRelayClient.get(false), - keepAlive: builder.keepAlive.get(true), staticNodes: builder.staticNodes, relayShardedPeerManagement: relayShardedPeerManagement, p2pReliability: builder.p2pReliability.get(false), diff --git a/waku/factory/conf_builder/web_socket_conf_builder.nim b/waku/factory/conf_builder/web_socket_conf_builder.nim index 25ff6461d..88edc0941 100644 --- a/waku/factory/conf_builder/web_socket_conf_builder.nim +++ b/waku/factory/conf_builder/web_socket_conf_builder.nim @@ -1,5 +1,5 @@ import chronicles, std/[net, options], results -import ../network_conf +import waku/factory/waku_conf logScope: topics = "waku conf builder websocket" diff --git a/waku/factory/external_config.nim b/waku/factory/external_config.nim index fd961bb6e..70fd1ebae 100644 --- a/waku/factory/external_config.nim +++ b/waku/factory/external_config.nim @@ -4,6 +4,8 @@ import chronicles, chronos, regex, + stew/endians2, + stint, confutils, confutils/defs, confutils/std/net, @@ -245,12 +247,6 @@ type WakuNodeConf* = object .}: bool ## DNS addrs config - dnsAddrs* {. - desc: "Enable resolution of `dnsaddr`, `dns4` or `dns6` multiaddrs", - defaultValue: true, - name: "dns-addrs" - .}: bool - dnsAddrsNameServers* {. desc: "DNS name server IPs to query for DNS multiaddrs resolution. Argument may be repeated.", @@ -320,34 +316,16 @@ type WakuNodeConf* = object name: "staticnode" .}: seq[string] - keepAlive* {. - desc: "Enable keep-alive for idle connections: true|false", - defaultValue: false, - name: "keep-alive" - .}: bool - - # TODO: This is trying to do too much, this should only be used for autosharding, which itself should be configurable - # If numShardsInNetwork is not set, we use the number of shards configured as numShardsInNetwork numShardsInNetwork* {. - desc: "Number of shards in the network", - defaultValue: 0, + desc: + "Enables autosharding and set number of shards in the cluster, set to `0` to use static sharding", + defaultValue: 1, name: "num-shards-in-network" - .}: uint32 + .}: uint16 shards* {. desc: - "Shards index to subscribe to [0..NUM_SHARDS_IN_NETWORK-1]. Argument may be repeated.", - defaultValue: - @[ - uint16(0), - uint16(1), - uint16(2), - uint16(3), - uint16(4), - uint16(5), - uint16(6), - uint16(7), - ], + "Shards index to subscribe to [0..NUM_SHARDS_IN_NETWORK-1]. Argument may be repeated. Subscribes to all shards by default in auto-sharding, no shard for static sharding", name: "shard" .}: seq[uint16] @@ -363,7 +341,7 @@ type WakuNodeConf* = object legacyStore* {. desc: "Enable/disable support of Waku Store v2 as a service", - defaultValue: true, + defaultValue: false, name: "legacy-store" .}: bool @@ -544,7 +522,7 @@ type WakuNodeConf* = object restRelayCacheCapacity* {. desc: "Capacity of the Relay REST API message cache.", - defaultValue: 30, + defaultValue: 50, name: "rest-relay-cache-capacity" .}: uint32 @@ -597,17 +575,12 @@ type WakuNodeConf* = object .}: bool dnsDiscoveryUrl* {. - desc: "URL for DNS node list in format 'enrtree://@'", + desc: + "URL for DNS node list in format 'enrtree://@', enables DNS Discovery", defaultValue: "", name: "dns-discovery-url" .}: string - dnsDiscoveryNameServers* {. - desc: "DNS name server IPs to query. Argument may be repeated.", - defaultValue: @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")], - name: "dns-discovery-name-server" - .}: seq[IpAddress] - ## Discovery v5 config discv5Discovery* {. desc: "Enable discovering nodes via Node Discovery v5.", @@ -657,12 +630,6 @@ type WakuNodeConf* = object name: "discv5-bits-per-hop" .}: int - discv5Only* {. - desc: "Disable all protocols other than discv5", - defaultValue: false, - name: "discv5-only" - .}: bool - ## waku peer exchange config peerExchange* {. desc: "Enable waku peer exchange protocol (responder side): true|false", @@ -904,7 +871,7 @@ proc defaultWakuNodeConf*(): ConfResult[WakuNodeConf] = proc toKeystoreGeneratorConf*(n: WakuNodeConf): RlnKeystoreGeneratorConf = RlnKeystoreGeneratorConf( execute: n.execute, - chainId: n.rlnRelayChainId, + chainId: UInt256.fromBytesBE(n.rlnRelayChainId.toBytesBE()), ethClientUrls: n.ethClientUrls.mapIt(string(it)), ethContractAddress: n.rlnRelayEthContractAddress, userMessageLimit: n.rlnRelayUserMessageLimit, @@ -916,9 +883,9 @@ proc toKeystoreGeneratorConf*(n: WakuNodeConf): RlnKeystoreGeneratorConf = proc toInspectRlnDbConf*(n: WakuNodeConf): InspectRlnDbConf = return InspectRlnDbConf(treePath: n.treePath) -proc toClusterConf( +proc toNetworkConf( preset: string, clusterId: Option[uint16] -): ConfResult[Option[ClusterConf]] = +): ConfResult[Option[NetworkConf]] = var lcPreset = toLowerAscii(preset) if clusterId.isSome() and clusterId.get() == 1: warn( @@ -928,9 +895,9 @@ proc toClusterConf( case lcPreset of "": - ok(none(ClusterConf)) + ok(none(NetworkConf)) of "twn": - ok(some(ClusterConf.TheWakuNetworkConf())) + ok(some(NetworkConf.TheWakuNetworkConf())) else: err("Invalid --preset value passed: " & lcPreset) @@ -967,11 +934,11 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] = b.withProtectedShards(n.protectedShards) b.withClusterId(n.clusterId) - let clusterConf = toClusterConf(n.preset, some(n.clusterId)).valueOr: + let networkConf = toNetworkConf(n.preset, some(n.clusterId)).valueOr: return err("Error determining cluster from preset: " & $error) - if clusterConf.isSome(): - b.withClusterConf(clusterConf.get()) + if networkConf.isSome(): + b.withNetworkConf(networkConf.get()) b.withAgentString(n.agentString) @@ -997,7 +964,6 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] = b.withPeerStoreCapacity(n.peerStoreCapacity.get()) b.withPeerPersistence(n.peerPersistence) - b.withDnsAddrs(n.dnsAddrs) b.withDnsAddrsNameServers(n.dnsAddrsNameServers) b.withDns4DomainName(n.dns4DomainName) b.withCircuitRelayClient(n.isRelayClient) @@ -1005,12 +971,18 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] = b.withRelayPeerExchange(n.relayPeerExchange) b.withRelayShardedPeerManagement(n.relayShardedPeerManagement) b.withStaticNodes(n.staticNodes) - b.withKeepAlive(n.keepAlive) if n.numShardsInNetwork != 0: - b.withNumShardsInNetwork(n.numShardsInNetwork) + b.withNumShardsInCluster(n.numShardsInNetwork) + b.withShardingConf(AutoSharding) + else: + b.withShardingConf(StaticSharding) + + # It is not possible to pass an empty sequence on the CLI + # If this is empty, it means the user did not specify any shards + if n.shards.len != 0: + b.withSubscribeShards(n.shards) - b.withShards(n.shards) b.withContentTopics(n.contentTopics) b.storeServiceConf.withEnabled(n.store) @@ -1057,9 +1029,9 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] = b.metricsServerConf.withHttpPort(n.metricsServerPort) b.metricsServerConf.withLogging(n.metricsLogging) - b.dnsDiscoveryConf.withEnabled(n.dnsDiscovery) - b.dnsDiscoveryConf.withEnrTreeUrl(n.dnsDiscoveryUrl) - b.dnsDiscoveryConf.withNameServers(n.dnsDiscoveryNameServers) + if n.dnsDiscoveryUrl != "": + b.dnsDiscoveryConf.withEnrTreeUrl(n.dnsDiscoveryUrl) + b.dnsDiscoveryConf.withNameServers(n.dnsAddrsNameServers) if n.discv5Discovery.isSome(): b.discv5Conf.withEnabled(n.discv5Discovery.get()) @@ -1070,7 +1042,6 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] = b.discv5Conf.withTableIpLimit(n.discv5TableIpLimit) b.discv5Conf.withBucketIpLimit(n.discv5BucketIpLimit) b.discv5Conf.withBitsPerHop(n.discv5BitsPerHop) - b.discv5Conf.withDiscv5Only(n.discv5Only) b.withPeerExchange(n.peerExchange) @@ -1082,7 +1053,7 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] = b.webSocketConf.withKeyPath(n.websocketSecureKeyPath) b.webSocketConf.withCertPath(n.websocketSecureCertPath) - b.withRateLimits(n.rateLimits) + b.rateLimitConf.withRateLimits(n.rateLimits) # Setup eligibility configuration b.eligibilityConf.withEnabled(n.eligibilityEnabled) diff --git a/waku/factory/internal_config.nim b/waku/factory/internal_config.nim index 72af28340..9fc3602a0 100644 --- a/waku/factory/internal_config.nim +++ b/waku/factory/internal_config.nim @@ -6,13 +6,7 @@ import libp2p/nameresolving/dnsresolver, std/[options, sequtils, net], results -import - ../common/utils/nat, - ../node/net_config, - ../waku_enr, - ../waku_core, - ./waku_conf, - ./network_conf +import ../common/utils/nat, ../node/net_config, ../waku_enr, ../waku_core, ./waku_conf proc enrConfiguration*( conf: WakuConf, netConfig: NetConfig @@ -29,7 +23,7 @@ proc enrConfiguration*( enrBuilder.withMultiaddrs(netConfig.enrMultiaddrs) enrBuilder.withWakuRelaySharding( - RelayShards(clusterId: conf.clusterId, shardIds: conf.shards) + RelayShards(clusterId: conf.clusterId, shardIds: conf.subscribeShards) ).isOkOr: return err("could not initialize ENR with shards") @@ -64,7 +58,7 @@ proc dnsResolve*( # TODO: Reduce number of parameters, can be done once the same is done on Netconfig.init proc networkConfiguration*( clusterId: uint16, - conf: NetworkConfig, + conf: EndpointConf, discv5Conf: Option[Discv5Conf], webSocketConf: Option[WebSocketConf], wakuFlags: CapabilitiesBitfield, @@ -139,14 +133,7 @@ proc networkConfiguration*( dns4DomainName = conf.dns4DomainName, discv5UdpPort = discv5UdpPort, wakuFlags = some(wakuFlags), + dnsNameServers = dnsAddrsNameServers, ) return netConfigRes - -# TODO: numShardsInNetwork should be mandatory with autosharding, and unneeded otherwise -proc getNumShardsInNetwork*(conf: WakuConf): uint32 = - if conf.numShardsInNetwork != 0: - return conf.numShardsInNetwork - # If conf.numShardsInNetwork is not set, use 1024 - the maximum possible as per the static sharding spec - # https://github.com/waku-org/specs/blob/master/standards/core/relay-sharding.md#static-sharding - return uint32(MaxShardIndex + 1) diff --git a/waku/factory/network_conf.nim b/waku/factory/network_conf.nim deleted file mode 100644 index c5179e53a..000000000 --- a/waku/factory/network_conf.nim +++ /dev/null @@ -1,34 +0,0 @@ -import std/[net, options, strutils] -import libp2p/multiaddress - -type WebSocketSecureConf* {.requiresInit.} = object - keyPath*: string - certPath*: string - -type WebSocketConf* = object - port*: Port - secureConf*: Option[WebSocketSecureConf] - -type NetworkConf* = object - natStrategy*: string # TODO: make enum - p2pTcpPort*: Port - dns4DomainName*: Option[string] - p2pListenAddress*: IpAddress - extMultiAddrs*: seq[MultiAddress] - extMultiAddrsOnly*: bool - webSocketConf*: Option[WebSocketConf] - -proc validateNoEmptyStrings(networkConf: NetworkConf): Result[void, string] = - if networkConf.dns4DomainName.isSome() and - isEmptyOrWhiteSpace(networkConf.dns4DomainName.get().string): - return err("dns4DomainName is an empty string, set it to none(string) instead") - - if networkConf.webSocketConf.isSome() and - networkConf.webSocketConf.get().secureConf.isSome(): - let secureConf = networkConf.webSocketConf.get().secureConf.get() - if isEmptyOrWhiteSpace(secureConf.keyPath): - return err("websocket.secureConf.keyPath is an empty string") - if isEmptyOrWhiteSpace(secureConf.certPath): - return err("websocket.secureConf.certPath is an empty string") - - return ok() diff --git a/waku/factory/networks_config.nim b/waku/factory/networks_config.nim index aceada3fe..c7193aa9c 100644 --- a/waku/factory/networks_config.nim +++ b/waku/factory/networks_config.nim @@ -1,35 +1,50 @@ {.push raises: [].} -# TODO: Rename this type to match file name +import chronicles, results, stint -type ClusterConf* = object +logScope: + topics = "waku networks conf" + +type + ShardingConfKind* = enum + AutoSharding + StaticSharding + + ShardingConf* = object + case kind*: ShardingConfKind + of AutoSharding: + numShardsInCluster*: uint16 + of StaticSharding: + discard + +type NetworkConf* = object maxMessageSize*: string # TODO: static convert to a uint64 clusterId*: uint16 rlnRelay*: bool rlnRelayEthContractAddress*: string - rlnRelayChainId*: uint + rlnRelayChainId*: UInt256 rlnRelayDynamic*: bool rlnEpochSizeSec*: uint64 rlnRelayUserMessageLimit*: uint64 - # TODO: should be uint16 like the `shards` parameter - numShardsInNetwork*: uint32 + shardingConf*: ShardingConf discv5Discovery*: bool discv5BootstrapNodes*: seq[string] # cluster-id=1 (aka The Waku Network) # Cluster configuration corresponding to The Waku Network. Note that it # overrides existing cli configuration -proc TheWakuNetworkConf*(T: type ClusterConf): ClusterConf = - return ClusterConf( +proc TheWakuNetworkConf*(T: type NetworkConf): NetworkConf = + const RelayChainId = 59141'u256 + return NetworkConf( maxMessageSize: "150KiB", clusterId: 1, rlnRelay: true, - rlnRelayEthContractAddress: "0xfe7a9eabcE779a090FD702346Fd0bFAc02ce6Ac8", + rlnRelayEthContractAddress: "0xB9cd878C90E49F797B4431fBF4fb333108CB90e6", rlnRelayDynamic: true, - rlnRelayChainId: 11155111, + rlnRelayChainId: RelayChainId, rlnEpochSizeSec: 600, rlnRelayUserMessageLimit: 100, - numShardsInNetwork: 8, + shardingConf: ShardingConf(kind: AutoSharding, numShardsInCluster: 8), discv5Discovery: true, discv5BootstrapNodes: @[ @@ -38,3 +53,21 @@ proc TheWakuNetworkConf*(T: type ClusterConf): ClusterConf = "enr:-QEkuEBfEzJm_kigJ2HoSS_RBFJYhKHocGdkhhBr6jSUAWjLdFPp6Pj1l4yiTQp7TGHyu1kC6FyaU573VN8klLsEm-XuAYJpZIJ2NIJpcIQI2SVcim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOwsS69tgD7u1K50r5-qG5hweuTwa0W26aYPnvivpNlrYN0Y3CCdl-DdWRwgiMohXdha3UyDw", ], ) + +proc validateShards*( + shardingConf: ShardingConf, shards: seq[uint16] +): Result[void, string] = + case shardingConf.kind + of StaticSharding: + return ok() + of AutoSharding: + let numShardsInCluster = shardingConf.numShardsInCluster + for shard in shards: + if shard >= numShardsInCluster: + let msg = + "validateShards invalid shard: " & $shard & " when numShardsInCluster: " & + $numShardsInCluster + error "validateShards failed", error = msg + return err(msg) + + return ok() diff --git a/waku/factory/node_factory.nim b/waku/factory/node_factory.nim index 9080b755e..e1f5fbf0a 100644 --- a/waku/factory/node_factory.nim +++ b/waku/factory/node_factory.nim @@ -10,6 +10,7 @@ import import ./internal_config, + ./networks_config, ./waku_conf, ./builder, ./validator_signed, @@ -68,17 +69,6 @@ proc initNode( ## file. Optionally include persistent peer storage. ## No protocols are mounted yet. - var dnsResolver: DnsResolver - if conf.dnsAddrs: - # Support for DNS multiaddrs - var nameServers: seq[TransportAddress] - for ip in conf.dnsAddrsNameServers: - nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53 - - dnsResolver = DnsResolver.new(nameServers) - - var node: WakuNode - let pStorage = if peerStore.isNone(): nil @@ -92,6 +82,9 @@ proc initNode( else: (none(string), none(string)) + let nameResolver = + DnsResolver.new(conf.dnsAddrsNameServers.mapIt(initTAddress(it, Port(53)))) + # Build waku node instance var builder = WakuNodeBuilder.init() builder.withRng(rng) @@ -103,7 +96,7 @@ proc initNode( maxConnections = some(conf.maxConnections.int), secureKey = secureKey, secureCert = secureCert, - nameResolver = dnsResolver, + nameResolver = nameResolver, sendSignedPeerRecord = conf.relayPeerExchange, # We send our own signed peer record when peer exchange enabled agentString = some(conf.agentString), @@ -116,6 +109,7 @@ proc initNode( else: false +<<<<<<< HEAD let reputationEnabled = if conf.reputationConf.isSome(): conf.reputationConf.get().enabled @@ -144,9 +138,24 @@ proc initNode( reputationEnabled = reputationEnabled, ) builder.withRateLimit(conf.rateLimits) +======= + builder.withPeerManagerConfig( + maxConnections = conf.maxConnections, + relayServiceRatio = $relayRatio & ":" & $serviceRatio, + shardAware = conf.relayShardedPeerManagement, + ) + error "maxRelayPeers is deprecated. It is recommended to use relayServiceRatio instead. If relayServiceRatio is not set, it will be automatically calculated based on maxConnections and maxRelayPeers." + else: + builder.withPeerManagerConfig( + maxConnections = conf.maxConnections, + relayServiceRatio = conf.relayServiceRatio, + shardAware = conf.relayShardedPeerManagement, + ) + builder.withRateLimit(conf.rateLimit) +>>>>>>> master builder.withCircuitRelay(relay) - node = + let node = ?builder.build().mapErr( proc(err: string): string = "failed to create waku node instance: " & err @@ -159,10 +168,12 @@ proc initNode( proc getAutoshards*( node: WakuNode, contentTopics: seq[string] ): Result[seq[RelayShard], string] = + if node.wakuAutoSharding.isNone(): + return err("Static sharding used, cannot get shards from content topics") var autoShards: seq[RelayShard] for contentTopic in contentTopics: - let shard = node.wakuSharding.getShard(contentTopic).valueOr: - return err("Could not parse content topic: " & error) + let shard = node.wakuAutoSharding.get().getShard(contentTopic).valueOr: + return err("Could not parse content topic: " & error) autoShards.add(shard) return ok(autoshards) @@ -173,10 +184,6 @@ proc setupProtocols( ## Optionally include persistent message storage. ## No protocols are started yet. - if conf.discv5Conf.isSome() and conf.discv5Conf.get().discv5Only: - notice "Running node only with Discv5, not mounting additional protocols" - return ok() - node.mountMetadata(conf.clusterId).isOkOr: return err("failed to mount waku metadata protocol: " & error) @@ -284,16 +291,11 @@ proc setupProtocols( if conf.storeServiceConf.isSome and conf.storeServiceConf.get().resume: node.setupStoreResume() - # If conf.numShardsInNetwork is not set, use the number of shards configured as numShardsInNetwork - let numShardsInNetwork = getNumShardsInNetwork(conf) - - if conf.numShardsInNetwork == 0: - warn "Number of shards in network not configured, setting it to", - # TODO: If not configured, it mounts 1024 shards! Make it a mandatory configuration instead - numShardsInNetwork = $numShardsInNetwork - - node.mountSharding(conf.clusterId, numShardsInNetwork).isOkOr: - return err("failed to mount waku sharding: " & error) + if conf.shardingConf.kind == AutoSharding: + node.mountAutoSharding(conf.clusterId, conf.shardingConf.numShardsInCluster).isOkOr: + return err("failed to mount waku auto sharding: " & error) + else: + warn("Auto sharding is disabled") # Mount relay on all nodes var peerExchangeHandler = none(RoutingRecordsHandler) @@ -316,14 +318,22 @@ proc setupProtocols( peerExchangeHandler = some(handlePeerExchange) - let autoShards = node.getAutoshards(conf.contentTopics).valueOr: - return err("Could not get autoshards: " & error) + # TODO: when using autosharding, the user should not be expected to pass any shards, but only content topics + # Hence, this joint logic should be removed in favour of an either logic: + # use passed shards (static) or deduce shards from content topics (auto) + let autoShards = + if node.wakuAutoSharding.isSome(): + node.getAutoshards(conf.contentTopics).valueOr: + return err("Could not get autoshards: " & error) + else: + @[] debug "Shards created from content topics", contentTopics = conf.contentTopics, shards = autoShards - let confShards = - conf.shards.mapIt(RelayShard(clusterId: conf.clusterId, shardId: uint16(it))) + let confShards = conf.subscribeShards.mapIt( + RelayShard(clusterId: conf.clusterId, shardId: uint16(it)) + ) let shards = confShards & autoShards if conf.relay: @@ -331,10 +341,7 @@ proc setupProtocols( ( await mountRelay( - node, - shards, - peerExchangeHandler = peerExchangeHandler, - int(conf.maxMessageSizeBytes), + node, peerExchangeHandler = peerExchangeHandler, int(conf.maxMessageSizeBytes) ) ).isOkOr: return err("failed to mount waku relay protocol: " & $error) @@ -342,7 +349,7 @@ proc setupProtocols( # Add validation keys to protected topics var subscribedProtectedShards: seq[ProtectedShard] for shardKey in conf.protectedShards: - if shardKey.shard notin conf.shards: + if shardKey.shard notin conf.subscribeShards: warn "protected shard not in subscribed shards, skipping adding validator", protectedShard = shardKey.shard, subscribedShards = shards continue @@ -476,7 +483,7 @@ proc startNode*( ## Connect to static nodes and start ## keep-alive, if configured. - # Start Waku v2 node + info "Running nwaku node", version = git_version try: await node.start() except CatchableError: @@ -512,10 +519,6 @@ proc startNode*( if conf.peerExchange and not conf.discv5Conf.isSome(): node.startPeerExchangeLoop() - # Start keepalive, if enabled - if conf.keepAlive: - node.startKeepalive() - # Maintain relay connections if conf.relay: node.peerManager.start() @@ -526,7 +529,7 @@ proc setupNode*( wakuConf: WakuConf, rng: ref HmacDrbgContext = crypto.newRng(), relay: Relay ): Result[WakuNode, string] = let netConfig = networkConfiguration( - wakuConf.clusterId, wakuConf.networkConf, wakuConf.discv5Conf, + wakuConf.clusterId, wakuConf.endpointConf, wakuConf.discv5Conf, wakuConf.webSocketConf, wakuConf.wakuFlags, wakuConf.dnsAddrsNameServers, wakuConf.portsShift, clientId, ).valueOr: diff --git a/waku/factory/waku.nim b/waku/factory/waku.nim index 01dc7a36f..137e3531c 100644 --- a/waku/factory/waku.nim +++ b/waku/factory/waku.nim @@ -1,7 +1,7 @@ {.push raises: [].} import - std/[options, sequtils], + std/[options, sequtils, strformat], results, chronicles, chronos, @@ -26,9 +26,11 @@ import ../waku_node, ../node/peer_manager, ../node/health_monitor, + ../node/waku_metrics, ../node/delivery_monitor/delivery_monitor, ../waku_api/message_cache, ../waku_api/rest/server, + ../waku_api/rest/builder as rest_server_builder, ../waku_archive, ../waku_relay/protocol, ../discovery/waku_dnsdisc, @@ -66,6 +68,8 @@ type Waku* = ref object node*: WakuNode + healthMonitor*: NodeHealthMonitor + deliveryMonitor: DeliveryMonitor restServer*: WakuRestServerRef @@ -123,15 +127,24 @@ proc setupAppCallbacks( if node.wakuRelay.isNil(): return err("Cannot configure relayHandler callback without Relay mounted") - let autoShards = node.getAutoshards(conf.contentTopics).valueOr: - return err("Could not get autoshards: " & error) + let autoShards = + if node.wakuAutoSharding.isSome(): + node.getAutoshards(conf.contentTopics).valueOr: + return err("Could not get autoshards: " & error) + else: + @[] - let confShards = - conf.shards.mapIt(RelayShard(clusterId: conf.clusterId, shardId: uint16(it))) + let confShards = conf.subscribeShards.mapIt( + RelayShard(clusterId: conf.clusterId, shardId: uint16(it)) + ) let shards = confShards & autoShards - for shard in shards: - discard node.wakuRelay.subscribe($shard, appCallbacks.relayHandler) + let uniqueShards = deduplicate(shards) + + for shard in uniqueShards: + let topic = $shard + node.subscribe((kind: PubsubSub, topic: topic), appCallbacks.relayHandler).isOkOr: + return err(fmt"Could not subscribe {topic}: " & $error) if not appCallbacks.topicHealthChangeHandler.isNil(): if node.wakuRelay.isNil(): @@ -155,19 +168,33 @@ proc new*( logging.setupLog(wakuConf.logLevel, wakuConf.logFormat) ?wakuConf.validate() - wakuConf.logConf() - info "Running nwaku node", version = git_version + let healthMonitor = NodeHealthMonitor.new(wakuConf.dnsAddrsNameServers) + + let restServer: WakuRestServerRef = + if wakuConf.restServerConf.isSome(): + let restServer = startRestServerEssentials( + healthMonitor, wakuConf.restServerConf.get(), wakuConf.portsShift + ).valueOr: + error "Starting essential REST server failed", error = $error + return err("Failed to start essential REST server in Waku.new: " & $error) + + restServer + else: + nil var relay = newCircuitRelay(wakuConf.circuitRelayClient) - let nodeRes = setupNode(wakuConf, rng, relay) - if nodeRes.isErr(): - error "Failed setting up node", error = nodeRes.error - return err("Failed setting up node: " & nodeRes.error) + let node = setupNode(wakuConf, rng, relay).valueOr: + error "Failed setting up node", error = $error + return err("Failed setting up node: " & $error) - let node = nodeRes.get() + healthMonitor.setNodeToHealthMonitor(node) + healthMonitor.onlineMonitor.setPeerStoreToOnlineMonitor(node.switch.peerStore) + healthMonitor.onlineMonitor.addOnlineStateObserver( + node.peerManager.getOnlineStateObserver() + ) node.setupAppCallbacks(wakuConf, appCallbacks).isOkOr: error "Failed setting up app callbacks", error = error @@ -193,8 +220,10 @@ proc new*( rng: rng, key: wakuConf.nodeKey, node: node, + healthMonitor: healthMonitor, deliveryMonitor: deliveryMonitor, appCallbacks: appCallbacks, + restServer: restServer, ) waku.setupSwitchServices(wakuConf, relay, rng) @@ -225,14 +254,14 @@ proc getRunningNetConfig(waku: ptr Waku): Result[NetConfig, string] = return err("Could not retrieve ports: " & error) if tcpPort.isSome(): - conf.networkConf.p2pTcpPort = tcpPort.get() + conf.endpointConf.p2pTcpPort = tcpPort.get() if websocketPort.isSome() and conf.webSocketConf.isSome(): conf.webSocketConf.get().port = websocketPort.get() # Rebuild NetConfig with bound port values let netConf = networkConfiguration( - conf.clusterId, conf.networkConf, conf.discv5Conf, conf.webSocketConf, + conf.clusterId, conf.endpointConf, conf.discv5Conf, conf.webSocketConf, conf.wakuFlags, conf.dnsAddrsNameServers, conf.portsShift, clientId, ).valueOr: return err("Could not update NetConfig: " & error) @@ -282,7 +311,7 @@ proc updateAddressInENR(waku: ptr Waku): Result[void, string] = proc updateWaku(waku: ptr Waku): Result[void, string] = let conf = waku[].conf - if conf.networkConf.p2pTcpPort == Port(0) or + if conf.endpointConf.p2pTcpPort == Port(0) or (conf.websocketConf.isSome() and conf.websocketConf.get.port == Port(0)): updateEnr(waku).isOkOr: return err("error calling updateEnr: " & $error) @@ -330,15 +359,6 @@ proc startDnsDiscoveryRetryLoop(waku: ptr Waku): Future[void] {.async.} = error "failed to connect to dynamic bootstrap nodes: " & getCurrentExceptionMsg() return -# The network connectivity loop checks periodically whether the node is online or not -# and triggers any change that depends on the network connectivity state -proc startNetworkConnectivityLoop(waku: Waku): Future[void] {.async.} = - while true: - await sleepAsync(15.seconds) - - # Update online state - await waku.node.peerManager.updateOnlineState() - proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} = debug "Retrieve dynamic bootstrap nodes" let conf = waku[].conf @@ -357,16 +377,15 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} = else: waku[].dynamicBootstrapNodes = dynamicBootstrapNodesRes.get() - if conf.discv5Conf.isNone or not conf.discv5Conf.get().discv5Only: - (await startNode(waku.node, waku.conf, waku.dynamicBootstrapNodes)).isOkOr: - return err("error while calling startNode: " & $error) + (await startNode(waku.node, waku.conf, waku.dynamicBootstrapNodes)).isOkOr: + return err("error while calling startNode: " & $error) - # Update waku data that is set dynamically on node start - updateWaku(waku).isOkOr: - return err("Error in updateApp: " & $error) + ## Update waku data that is set dynamically on node start + updateWaku(waku).isOkOr: + return err("Error in updateApp: " & $error) ## Discv5 - if conf.discv5Conf.isSome: + if conf.discv5Conf.isSome(): waku[].wakuDiscV5 = waku_discv5.setupDiscoveryV5( waku.node.enr, waku.node.peerManager, @@ -375,7 +394,7 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} = waku.dynamicBootstrapNodes, waku.rng, conf.nodeKey, - conf.networkConf.p2pListenAddress, + conf.endpointConf.p2pListenAddress, conf.portsShift, ) @@ -386,23 +405,42 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} = if not waku[].deliveryMonitor.isNil(): waku[].deliveryMonitor.startDeliveryMonitor() - # Start network connectivity check loop - waku[].networkConnLoopHandle = waku[].startNetworkConnectivityLoop() + ## Health Monitor + waku[].healthMonitor.startHealthMonitor().isOkOr: + return err("failed to start health monitor: " & $error) + + if conf.restServerConf.isSome(): + rest_server_builder.startRestServerProtocolSupport( + waku[].restServer, + waku[].node, + waku[].wakuDiscv5, + conf.restServerConf.get(), + conf.relay, + conf.lightPush, + conf.clusterId, + conf.subscribeShards, + conf.contentTopics, + ).isOkOr: + return err ("Starting protocols support REST server failed: " & $error) + + if conf.metricsServerConf.isSome(): + waku[].metricsServer = waku_metrics.startMetricsServerAndLogging( + conf.metricsServerConf.get(), conf.portsShift + ).valueOr: + return err("Starting monitoring and external interfaces failed: " & error) + + waku[].healthMonitor.setOverallHealth(HealthStatus.READY) return ok() -# Waku shutdown - proc stop*(waku: Waku): Future[void] {.async: (raises: [Exception]).} = - if not waku.restServer.isNil(): - await waku.restServer.stop() + ## Waku shutdown + + waku.healthMonitor.setOverallHealth(HealthStatus.SHUTTING_DOWN) if not waku.metricsServer.isNil(): await waku.metricsServer.stop() - if not waku.networkConnLoopHandle.isNil(): - await waku.networkConnLoopHandle.cancelAndWait() - if not waku.wakuDiscv5.isNil(): await waku.wakuDiscv5.stop() @@ -411,3 +449,9 @@ proc stop*(waku: Waku): Future[void] {.async: (raises: [Exception]).} = if not waku.dnsRetryLoopHandle.isNil(): await waku.dnsRetryLoopHandle.cancelAndWait() + + if not waku.healthMonitor.isNil(): + await waku.healthMonitor.stopHealthMonitor() + + if not waku.restServer.isNil(): + await waku.restServer.stop() diff --git a/waku/factory/waku_conf.nim b/waku/factory/waku_conf.nim index af115260e..eec712a51 100644 --- a/waku/factory/waku_conf.nim +++ b/waku/factory/waku_conf.nim @@ -12,14 +12,23 @@ import ../discovery/waku_discv5, ../node/waku_metrics, ../common/logging, + ../common/rate_limit/setting, ../waku_enr/capabilities, - ./network_conf + ./networks_config export RlnRelayConf, RlnRelayCreds, RestServerConf, Discv5Conf, MetricsServerConf logScope: topics = "waku conf" +type WebSocketSecureConf* {.requiresInit.} = object + keyPath*: string + certPath*: string + +type WebSocketConf* = object + port*: Port + secureConf*: Option[WebSocketSecureConf] + # TODO: should be defined in validator_signed.nim and imported here type ProtectedShard* {.requiresInit.} = object shard*: uint16 @@ -50,7 +59,7 @@ type FilterServiceConf* {.requiresInit.} = object subscriptionTimeout*: uint16 maxCriteria*: uint32 -type NetworkConfig* = object # TODO: make enum +type EndpointConf* = object # TODO: make enum natStrategy*: string p2pTcpPort*: Port dns4DomainName*: Option[string] @@ -77,11 +86,10 @@ type WakuConf* {.requiresInit.} = ref object nodeKey*: crypto.PrivateKey clusterId*: uint16 - shards*: seq[uint16] + subscribeShards*: seq[uint16] protectedShards*: seq[ProtectedShard] - # TODO: move to an autoShardingConf - numShardsInNetwork*: uint32 + shardingConf*: ShardingConf contentTopics*: seq[string] relay*: bool @@ -92,7 +100,6 @@ type WakuConf* {.requiresInit.} = ref object relayPeerExchange*: bool rendezvous*: bool circuitRelayClient*: bool - keepAlive*: bool discv5Conf*: Option[Discv5Conf] dnsDiscoveryConf*: Option[DnsDiscoveryConf] @@ -106,9 +113,8 @@ type WakuConf* {.requiresInit.} = ref object reputationConf*: Option[ReputationConf] portsShift*: uint16 - dnsAddrs*: bool dnsAddrsNameServers*: seq[IpAddress] - networkConf*: NetworkConfig + endpointConf*: EndpointConf wakuFlags*: CapabilitiesBitfield # TODO: could probably make it a `PeerRemoteInfo` @@ -133,8 +139,7 @@ type WakuConf* {.requiresInit.} = ref object colocationLimit*: int - # TODO: use proper type - rateLimits*: seq[string] + rateLimit*: ProtocolRateLimitSettings # TODO: those could be in a relay conf object maxRelayPeers*: Option[int] @@ -155,8 +160,13 @@ proc logConf*(wakuConf: WakuConf) = info "Configuration. Network", cluster = wakuConf.clusterId +<<<<<<< HEAD for shard in wakuConf.shards: info "Configuration. Shards", shard = shard +======= + for shard in conf.subscribeShards: + info "Configuration. Active Relay Shards", shard = shard +>>>>>>> master if wakuConf.discv5Conf.isSome(): for i in wakuConf.discv5Conf.get().bootstrapNodes: @@ -182,26 +192,9 @@ proc validateNodeKey(wakuConf: WakuConf): Result[void, string] = return err("nodekey param is invalid") return ok() -proc validateShards(wakuConf: WakuConf): Result[void, string] = - let numShardsInNetwork = wakuConf.numShardsInNetwork - - # TODO: fix up this behaviour - if numShardsInNetwork == 0: - return ok() - - for shard in wakuConf.shards: - if shard >= numShardsInNetwork: - let msg = - "validateShards invalid shard: " & $shard & " when numShardsInNetwork: " & - $numShardsInNetwork # fmt doesn't work - error "validateShards failed", error = msg - return err(msg) - - return ok() - proc validateNoEmptyStrings(wakuConf: WakuConf): Result[void, string] = - if wakuConf.networkConf.dns4DomainName.isSome() and - isEmptyOrWhiteSpace(wakuConf.networkConf.dns4DomainName.get().string): + if wakuConf.endpointConf.dns4DomainName.isSome() and + isEmptyOrWhiteSpace(wakuConf.endpointConf.dns4DomainName.get().string): return err("dns4-domain-name is an empty string, set it to none(string) instead") if isEmptyOrWhiteSpace(wakuConf.relayServiceRatio): @@ -253,7 +246,7 @@ proc validateNoEmptyStrings(wakuConf: WakuConf): Result[void, string] = proc validate*(wakuConf: WakuConf): Result[void, string] = ?wakuConf.validateNodeKey() - ?wakuConf.validateShards() + ?wakuConf.shardingConf.validateShards(wakuConf.subscribeShards) ?wakuConf.validateNoEmptyStrings() if wakuConf.eligibilityConf.isSome(): diff --git a/waku/node/health_monitor.nim b/waku/node/health_monitor.nim index b3fe9b227..854a8bbc0 100644 --- a/waku/node/health_monitor.nim +++ b/waku/node/health_monitor.nim @@ -1,87 +1,4 @@ -{.push raises: [].} +import + health_monitor/[node_health_monitor, protocol_health, online_monitor, health_status] -import std/[options], chronos - -import waku_node, ../waku_rln_relay - -type - HealthStatus* = enum - INITIALIZING - SYNCHRONIZING - READY - NOT_READY - NOT_MOUNTED - SHUTTING_DOWN - - ProtocolHealth* = object - protocol*: string - health*: HealthStatus - - HealthReport* = object - nodeHealth*: HealthStatus - protocolsHealth*: seq[ProtocolHealth] - - WakuNodeHealthMonitor* = ref object - nodeHealth: HealthStatus - node: Option[WakuNode] - -proc `$`*(t: HealthStatus): string = - result = - case t - of INITIALIZING: "Initializing" - of SYNCHRONIZING: "Synchronizing" - of READY: "Ready" - of NOT_READY: "Not Ready" - of NOT_MOUNTED: "Not Mounted" - of SHUTTING_DOWN: "Shutting Down" - -proc init*( - t: typedesc[HealthStatus], strRep: string -): HealthStatus {.raises: [ValueError].} = - case strRep - of "Initializing": - return HealthStatus.INITIALIZING - of "Synchronizing": - return HealthStatus.SYNCHRONIZING - of "Ready": - return HealthStatus.READY - of "Not Ready": - return HealthStatus.NOT_READY - of "Not Mounted": - return HealthStatus.NOT_MOUNTED - of "Shutting Down": - return HealthStatus.SHUTTING_DOWN - else: - raise newException(ValueError, "Invalid HealthStatus string representation") - -const FutIsReadyTimout = 5.seconds - -proc getNodeHealthReport*(hm: WakuNodeHealthMonitor): Future[HealthReport] {.async.} = - result.nodeHealth = hm.nodeHealth - - if hm.node.isSome() and hm.node.get().wakuRlnRelay != nil: - let getRlnRelayHealth = proc(): Future[HealthStatus] {.async.} = - let isReadyStateFut = hm.node.get().wakuRlnRelay.isReady() - if not await isReadyStateFut.withTimeout(FutIsReadyTimout): - return HealthStatus.NOT_READY - - try: - if not isReadyStateFut.completed(): - return HealthStatus.NOT_READY - elif isReadyStateFut.read(): - return HealthStatus.READY - - return HealthStatus.SYNCHRONIZING - except: - error "exception reading state: " & getCurrentExceptionMsg() - return HealthStatus.NOT_READY - - result.protocolsHealth.add( - ProtocolHealth(protocol: "Rln Relay", health: await getRlnRelayHealth()) - ) - -proc setNode*(hm: WakuNodeHealthMonitor, node: WakuNode) = - hm.node = some(node) - -proc setOverallHealth*(hm: WakuNodeHealthMonitor, health: HealthStatus) = - hm.nodeHealth = health +export node_health_monitor, protocol_health, online_monitor, health_status diff --git a/waku/node/health_monitor/health_status.nim b/waku/node/health_monitor/health_status.nim new file mode 100644 index 000000000..4dd2bdd9a --- /dev/null +++ b/waku/node/health_monitor/health_status.nim @@ -0,0 +1,16 @@ +import results, std/strutils + +type HealthStatus* {.pure.} = enum + INITIALIZING + SYNCHRONIZING + READY + NOT_READY + NOT_MOUNTED + SHUTTING_DOWN + +proc init*(t: typedesc[HealthStatus], strRep: string): Result[HealthStatus, string] = + try: + let status = parseEnum[HealthStatus](strRep) + return ok(status) + except ValueError: + return err("Invalid HealthStatus string representation: " & strRep) diff --git a/waku/node/health_monitor/node_health_monitor.nim b/waku/node/health_monitor/node_health_monitor.nim new file mode 100644 index 000000000..fa31c0529 --- /dev/null +++ b/waku/node/health_monitor/node_health_monitor.nim @@ -0,0 +1,422 @@ +{.push raises: [].} + +import + std/[options, sets, strformat, random, sequtils], + chronos, + chronicles, + libp2p/protocols/rendezvous + +import + ../waku_node, + ../../waku_rln_relay, + ../../waku_relay, + ../peer_manager, + ./online_monitor, + ./health_status, + ./protocol_health + +## This module is aimed to check the state of the "self" Waku Node + +# randomize initializes sdt/random's random number generator +# if not called, the outcome of randomization procedures will be the same in every run +randomize() + +type + HealthReport* = object + nodeHealth*: HealthStatus + protocolsHealth*: seq[ProtocolHealth] + + NodeHealthMonitor* = ref object + nodeHealth: HealthStatus + node: WakuNode + onlineMonitor*: OnlineMonitor + keepAliveFut: Future[void] + +template checkWakuNodeNotNil(node: WakuNode, p: ProtocolHealth): untyped = + if node.isNil(): + warn "WakuNode is not set, cannot check health", protocol_health_instance = $p + return p.notMounted() + +proc getRelayHealth(hm: NodeHealthMonitor): ProtocolHealth = + var p = ProtocolHealth.init("Relay") + checkWakuNodeNotNil(hm.node, p) + + if hm.node.wakuRelay == nil: + return p.notMounted() + + let relayPeers = hm.node.wakuRelay.getConnectedPubSubPeers(pubsubTopic = "").valueOr: + return p.notMounted() + + if relayPeers.len() == 0: + return p.notReady("No connected peers") + + return p.ready() + +proc getRlnRelayHealth(hm: NodeHealthMonitor): Future[ProtocolHealth] {.async.} = + var p = ProtocolHealth.init("Rln Relay") + if hm.node.isNil(): + warn "WakuNode is not set, cannot check health", protocol_health_instance = $p + return p.notMounted() + + if hm.node.wakuRlnRelay.isNil(): + return p.notMounted() + + const FutIsReadyTimout = 5.seconds + + let isReadyStateFut = hm.node.wakuRlnRelay.isReady() + if not await isReadyStateFut.withTimeout(FutIsReadyTimout): + return p.notReady("Ready state check timed out") + + try: + if not isReadyStateFut.completed(): + return p.notReady("Ready state check timed out") + elif isReadyStateFut.read(): + return p.ready() + + return p.synchronizing() + except: + error "exception reading state: " & getCurrentExceptionMsg() + return p.notReady("State cannot be determined") + +proc getLightpushHealth( + hm: NodeHealthMonitor, relayHealth: HealthStatus +): ProtocolHealth = + var p = ProtocolHealth.init("Lightpush") + checkWakuNodeNotNil(hm.node, p) + + if hm.node.wakuLightPush == nil: + return p.notMounted() + + if relayHealth == HealthStatus.READY: + return p.ready() + + return p.notReady("Node has no relay peers to fullfill push requests") + +proc getLightpushClientHealth( + hm: NodeHealthMonitor, relayHealth: HealthStatus +): ProtocolHealth = + var p = ProtocolHealth.init("Lightpush Client") + checkWakuNodeNotNil(hm.node, p) + + if hm.node.wakuLightpushClient == nil: + return p.notMounted() + + let selfServiceAvailable = + hm.node.wakuLightPush != nil and relayHealth == HealthStatus.READY + let servicePeerAvailable = hm.node.peerManager.selectPeer(WakuLightPushCodec).isSome() + + if selfServiceAvailable or servicePeerAvailable: + return p.ready() + + return p.notReady("No Lightpush service peer available yet") + +proc getLegacyLightpushHealth( + hm: NodeHealthMonitor, relayHealth: HealthStatus +): ProtocolHealth = + var p = ProtocolHealth.init("Legacy Lightpush") + checkWakuNodeNotNil(hm.node, p) + + if hm.node.wakuLegacyLightPush == nil: + return p.notMounted() + + if relayHealth == HealthStatus.READY: + return p.ready() + + return p.notReady("Node has no relay peers to fullfill push requests") + +proc getLegacyLightpushClientHealth( + hm: NodeHealthMonitor, relayHealth: HealthStatus +): ProtocolHealth = + var p = ProtocolHealth.init("Legacy Lightpush Client") + checkWakuNodeNotNil(hm.node, p) + + if hm.node.wakuLegacyLightpushClient == nil: + return p.notMounted() + + if (hm.node.wakuLegacyLightPush != nil and relayHealth == HealthStatus.READY) or + hm.node.peerManager.selectPeer(WakuLegacyLightPushCodec).isSome(): + return p.ready() + + return p.notReady("No Lightpush service peer available yet") + +proc getFilterHealth(hm: NodeHealthMonitor, relayHealth: HealthStatus): ProtocolHealth = + var p = ProtocolHealth.init("Filter") + checkWakuNodeNotNil(hm.node, p) + + if hm.node.wakuFilter == nil: + return p.notMounted() + + if relayHealth == HealthStatus.READY: + return p.ready() + + return p.notReady("Relay is not ready, filter will not be able to sort out messages") + +proc getFilterClientHealth( + hm: NodeHealthMonitor, relayHealth: HealthStatus +): ProtocolHealth = + var p = ProtocolHealth.init("Filter Client") + checkWakuNodeNotNil(hm.node, p) + + if hm.node.wakuFilterClient == nil: + return p.notMounted() + + if hm.node.peerManager.selectPeer(WakuFilterSubscribeCodec).isSome(): + return p.ready() + + return p.notReady("No Filter service peer available yet") + +proc getStoreHealth(hm: NodeHealthMonitor): ProtocolHealth = + var p = ProtocolHealth.init("Store") + checkWakuNodeNotNil(hm.node, p) + + if hm.node.wakuStore == nil: + return p.notMounted() + + return p.ready() + +proc getStoreClientHealth(hm: NodeHealthMonitor): ProtocolHealth = + var p = ProtocolHealth.init("Store Client") + checkWakuNodeNotNil(hm.node, p) + + if hm.node.wakuStoreClient == nil: + return p.notMounted() + + if hm.node.peerManager.selectPeer(WakuStoreCodec).isSome() or hm.node.wakuStore != nil: + return p.ready() + + return p.notReady( + "No Store service peer available yet, neither Store service set up for the node" + ) + +proc getLegacyStoreHealth(hm: NodeHealthMonitor): ProtocolHealth = + var p = ProtocolHealth.init("Legacy Store") + checkWakuNodeNotNil(hm.node, p) + + if hm.node.wakuLegacyStore == nil: + return p.notMounted() + + return p.ready() + +proc getLegacyStoreClientHealth(hm: NodeHealthMonitor): ProtocolHealth = + var p = ProtocolHealth.init("Legacy Store Client") + checkWakuNodeNotNil(hm.node, p) + + if hm.node.wakuLegacyStoreClient == nil: + return p.notMounted() + + if hm.node.peerManager.selectPeer(WakuLegacyStoreCodec).isSome() or + hm.node.wakuLegacyStore != nil: + return p.ready() + + return p.notReady( + "No Legacy Store service peers are available yet, neither Store service set up for the node" + ) + +proc getPeerExchangeHealth(hm: NodeHealthMonitor): ProtocolHealth = + var p = ProtocolHealth.init("Peer Exchange") + checkWakuNodeNotNil(hm.node, p) + + if hm.node.wakuPeerExchange == nil: + return p.notMounted() + + return p.ready() + +proc getRendezvousHealth(hm: NodeHealthMonitor): ProtocolHealth = + var p = ProtocolHealth.init("Rendezvous") + checkWakuNodeNotNil(hm.node, p) + + if hm.node.wakuRendezvous == nil: + return p.notMounted() + + if hm.node.peerManager.switch.peerStore.peers(RendezVousCodec).len() == 0: + return p.notReady("No Rendezvous peers are available yet") + + return p.ready() + +proc selectRandomPeersForKeepalive( + node: WakuNode, outPeers: seq[PeerId], numRandomPeers: int +): Future[seq[PeerId]] {.async.} = + ## Select peers for random keepalive, prioritizing mesh peers + + if node.wakuRelay.isNil(): + return selectRandomPeers(outPeers, numRandomPeers) + + let meshPeers = node.wakuRelay.getPeersInMesh().valueOr: + error "Failed getting peers in mesh for ping", error = error + # Fallback to random selection from all outgoing peers + return selectRandomPeers(outPeers, numRandomPeers) + + trace "Mesh peers for keepalive", meshPeers = meshPeers + + # Get non-mesh peers and shuffle them + var nonMeshPeers = outPeers.filterIt(it notin meshPeers) + shuffle(nonMeshPeers) + + # Combine mesh peers + random non-mesh peers up to numRandomPeers total + let numNonMeshPeers = max(0, numRandomPeers - len(meshPeers)) + let selectedNonMeshPeers = nonMeshPeers[0 ..< min(len(nonMeshPeers), numNonMeshPeers)] + + let selectedPeers = meshPeers & selectedNonMeshPeers + trace "Selected peers for keepalive", selected = selectedPeers + return selectedPeers + +proc keepAliveLoop( + node: WakuNode, + randomPeersKeepalive: chronos.Duration, + allPeersKeepAlive: chronos.Duration, + numRandomPeers = 10, +) {.async.} = + # Calculate how many random peer cycles before pinging all peers + let randomToAllRatio = + int(allPeersKeepAlive.seconds() / randomPeersKeepalive.seconds()) + var countdownToPingAll = max(0, randomToAllRatio - 1) + + # Sleep detection configuration + let sleepDetectionInterval = 3 * randomPeersKeepalive + + # Failure tracking + var consecutiveIterationFailures = 0 + const maxAllowedConsecutiveFailures = 2 + + var lastTimeExecuted = Moment.now() + + while true: + trace "Running keepalive loop" + await sleepAsync(randomPeersKeepalive) + + if not node.started: + continue + + let currentTime = Moment.now() + + # Check for sleep detection + if currentTime - lastTimeExecuted > sleepDetectionInterval: + warn "Keep alive hasn't been executed recently. Killing all connections" + await node.peerManager.disconnectAllPeers() + lastTimeExecuted = currentTime + consecutiveIterationFailures = 0 + continue + + # Check for consecutive failures + if consecutiveIterationFailures > maxAllowedConsecutiveFailures: + warn "Too many consecutive ping failures, node likely disconnected. Killing all connections", + consecutiveIterationFailures, maxAllowedConsecutiveFailures + await node.peerManager.disconnectAllPeers() + consecutiveIterationFailures = 0 + lastTimeExecuted = currentTime + continue + + # Determine which peers to ping + let outPeers = node.peerManager.connectedPeers()[1] + let peersToPing = + if countdownToPingAll > 0: + await selectRandomPeersForKeepalive(node, outPeers, numRandomPeers) + else: + outPeers + + let numPeersToPing = len(peersToPing) + + if countdownToPingAll > 0: + trace "Pinging random peers", + count = numPeersToPing, countdownToPingAll = countdownToPingAll + countdownToPingAll.dec() + else: + trace "Pinging all peers", count = numPeersToPing + countdownToPingAll = max(0, randomToAllRatio - 1) + + # Execute keepalive pings + let successfulPings = await parallelPings(node, peersToPing) + + if successfulPings != numPeersToPing: + waku_node_errors.inc( + amount = numPeersToPing - successfulPings, labelValues = ["keep_alive_failure"] + ) + + trace "Keepalive results", + attemptedPings = numPeersToPing, successfulPings = successfulPings + + # Update failure tracking + if numPeersToPing > 0 and successfulPings == 0: + consecutiveIterationFailures.inc() + error "All pings failed", consecutiveFailures = consecutiveIterationFailures + else: + consecutiveIterationFailures = 0 + + lastTimeExecuted = currentTime + +# 2 minutes default - 20% of the default chronosstream timeout duration +proc startKeepalive*( + hm: NodeHealthMonitor, + randomPeersKeepalive = 10.seconds, + allPeersKeepalive = 2.minutes, +): Result[void, string] = + # Validate input parameters + if randomPeersKeepalive.isZero() or allPeersKeepAlive.isZero(): + error "startKeepalive: allPeersKeepAlive and randomPeersKeepalive must be greater than 0", + randomPeersKeepalive = $randomPeersKeepalive, + allPeersKeepAlive = $allPeersKeepAlive + return err( + "startKeepalive: allPeersKeepAlive and randomPeersKeepalive must be greater than 0" + ) + + if allPeersKeepAlive < randomPeersKeepalive: + error "startKeepalive: allPeersKeepAlive can't be less than randomPeersKeepalive", + allPeersKeepAlive = $allPeersKeepAlive, + randomPeersKeepalive = $randomPeersKeepalive + return + err("startKeepalive: allPeersKeepAlive can't be less than randomPeersKeepalive") + + info "starting keepalive", + randomPeersKeepalive = randomPeersKeepalive, allPeersKeepalive = allPeersKeepalive + + hm.keepAliveFut = hm.node.keepAliveLoop(randomPeersKeepalive, allPeersKeepalive) + return ok() + +proc getNodeHealthReport*(hm: NodeHealthMonitor): Future[HealthReport] {.async.} = + var report: HealthReport + report.nodeHealth = hm.nodeHealth + + if not hm.node.isNil(): + let relayHealth = hm.getRelayHealth() + report.protocolsHealth.add(relayHealth) + report.protocolsHealth.add(await hm.getRlnRelayHealth()) + report.protocolsHealth.add(hm.getLightpushHealth(relayHealth.health)) + report.protocolsHealth.add(hm.getLegacyLightpushHealth(relayHealth.health)) + report.protocolsHealth.add(hm.getFilterHealth(relayHealth.health)) + report.protocolsHealth.add(hm.getStoreHealth()) + report.protocolsHealth.add(hm.getLegacyStoreHealth()) + report.protocolsHealth.add(hm.getPeerExchangeHealth()) + report.protocolsHealth.add(hm.getRendezvousHealth()) + + report.protocolsHealth.add(hm.getLightpushClientHealth(relayHealth.health)) + report.protocolsHealth.add(hm.getLegacyLightpushClientHealth(relayHealth.health)) + report.protocolsHealth.add(hm.getStoreClientHealth()) + report.protocolsHealth.add(hm.getLegacyStoreClientHealth()) + report.protocolsHealth.add(hm.getFilterClientHealth(relayHealth.health)) + return report + +proc setNodeToHealthMonitor*(hm: NodeHealthMonitor, node: WakuNode) = + hm.node = node + +proc setOverallHealth*(hm: NodeHealthMonitor, health: HealthStatus) = + hm.nodeHealth = health + +proc startHealthMonitor*(hm: NodeHealthMonitor): Result[void, string] = + hm.onlineMonitor.startOnlineMonitor() + hm.startKeepalive().isOkOr: + return err("startHealthMonitor: failed starting keep alive: " & error) + return ok() + +proc stopHealthMonitor*(hm: NodeHealthMonitor) {.async.} = + await hm.onlineMonitor.stopOnlineMonitor() + await hm.keepAliveFut.cancelAndWait() + +proc new*( + T: type NodeHealthMonitor, + dnsNameServers = @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")], +): T = + T( + nodeHealth: INITIALIZING, + node: nil, + onlineMonitor: OnlineMonitor.init(dnsNameServers), + ) diff --git a/waku/node/health_monitor/online_monitor.nim b/waku/node/health_monitor/online_monitor.nim new file mode 100644 index 000000000..27bd53bc3 --- /dev/null +++ b/waku/node/health_monitor/online_monitor.nim @@ -0,0 +1,77 @@ +import std/sequtils +import chronos, chronicles, libp2p/nameresolving/dnsresolver, libp2p/peerstore + +import ../peer_manager/waku_peer_store, waku/waku_core/peers + +type + OnOnlineStateChange* = proc(online: bool) {.gcsafe, raises: [].} + + OnlineMonitor* = ref object + onOnlineStateChange: OnOnlineStateChange + dnsNameServers*: seq[IpAddress] + onlineStateObservers: seq[OnOnlineStateChange] + networkConnLoopHandle: Future[void] # node: WakuNode + peerStore: PeerStore + online: bool + +proc checkInternetConnectivity( + nameServerIps: seq[IpAddress], timeout = 2.seconds +): Future[bool] {.async.} = + const DNSCheckDomain = "one.one.one.one" + let nameServers = nameServerIps.mapIt(initTAddress(it, Port(53))) + let dnsResolver = DnsResolver.new(nameServers) + + # Resolve domain IP + let resolved = await dnsResolver.resolveIp(DNSCheckDomain, 0.Port, Domain.AF_UNSPEC) + if resolved.len > 0: + return true + else: + return false + +proc updateOnlineState(self: OnlineMonitor) {.async.} = + if self.onlineStateObservers.len == 0: + trace "No online state observers registered, cannot notify about online state change" + return + + let numConnectedPeers = + if self.peerStore.isNil(): + 0 + else: + self.peerStore.peers().countIt(it.connectedness == Connected) + + self.online = + if numConnectedPeers > 0: + true + else: + await checkInternetConnectivity(self.dnsNameServers) + + for onlineStateObserver in self.onlineStateObservers: + onlineStateObserver(self.online) + +proc networkConnectivityLoop(self: OnlineMonitor): Future[void] {.async.} = + ## Checks periodically whether the node is online or not + ## and triggers any change that depends on the network connectivity state + while true: + await self.updateOnlineState() + await sleepAsync(5.seconds) + +proc startOnlineMonitor*(self: OnlineMonitor) = + self.networkConnLoopHandle = self.networkConnectivityLoop() + +proc stopOnlineMonitor*(self: OnlineMonitor) {.async.} = + if not self.networkConnLoopHandle.isNil(): + await self.networkConnLoopHandle.cancelAndWait() + +proc setPeerStoreToOnlineMonitor*(self: OnlineMonitor, peerStore: PeerStore) = + self.peerStore = peerStore + +proc addOnlineStateObserver*(self: OnlineMonitor, observer: OnOnlineStateChange) = + ## Adds an observer that will be called when the online state changes + if observer notin self.onlineStateObservers: + self.onlineStateObservers.add(observer) + +proc amIOnline*(self: OnlineMonitor): bool = + return self.online + +proc init*(T: type OnlineMonitor, dnsNameServers: seq[IpAddress]): OnlineMonitor = + T(dnsNameServers: dnsNameServers, onlineStateObservers: @[]) diff --git a/waku/node/health_monitor/protocol_health.nim b/waku/node/health_monitor/protocol_health.nim new file mode 100644 index 000000000..7bacea94b --- /dev/null +++ b/waku/node/health_monitor/protocol_health.nim @@ -0,0 +1,46 @@ +import std/[options, strformat] +import ./health_status + +type ProtocolHealth* = object + protocol*: string + health*: HealthStatus + desc*: Option[string] ## describes why a certain protocol is considered `NOT_READY` + +proc notReady*(p: var ProtocolHealth, desc: string): ProtocolHealth = + p.health = HealthStatus.NOT_READY + p.desc = some(desc) + return p + +proc ready*(p: var ProtocolHealth): ProtocolHealth = + p.health = HealthStatus.READY + p.desc = none[string]() + return p + +proc notMounted*(p: var ProtocolHealth): ProtocolHealth = + p.health = HealthStatus.NOT_MOUNTED + p.desc = none[string]() + return p + +proc synchronizing*(p: var ProtocolHealth): ProtocolHealth = + p.health = HealthStatus.SYNCHRONIZING + p.desc = none[string]() + return p + +proc initializing*(p: var ProtocolHealth): ProtocolHealth = + p.health = HealthStatus.INITIALIZING + p.desc = none[string]() + return p + +proc shuttingDown*(p: var ProtocolHealth): ProtocolHealth = + p.health = HealthStatus.SHUTTING_DOWN + p.desc = none[string]() + return p + +proc `$`*(p: ProtocolHealth): string = + return fmt"protocol: {p.protocol}, health: {p.health}, description: {p.desc}" + +proc init*(p: typedesc[ProtocolHealth], protocol: string): ProtocolHealth = + let p = ProtocolHealth( + protocol: protocol, health: HealthStatus.NOT_MOUNTED, desc: none[string]() + ) + return p diff --git a/waku/node/net_config.nim b/waku/node/net_config.nim index a45d95f92..4802694c4 100644 --- a/waku/node/net_config.nim +++ b/waku/node/net_config.nim @@ -78,7 +78,7 @@ proc init*( discv5UdpPort = none(Port), clusterId: uint16 = 0, wakuFlags = none(CapabilitiesBitfield), - dnsNameServers = newSeq[IpAddress](), + dnsNameServers = @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")], ): NetConfigResult = ## Initialize and validate waku node network configuration diff --git a/waku/node/peer_manager/peer_manager.nim b/waku/node/peer_manager/peer_manager.nim index 6990c38ae..dba823a45 100644 --- a/waku/node/peer_manager/peer_manager.nim +++ b/waku/node/peer_manager/peer_manager.nim @@ -8,7 +8,6 @@ import libp2p/multistream, libp2p/muxers/muxer, libp2p/nameresolving/nameresolver, - libp2p/nameresolving/dnsresolver, libp2p/peerstore import @@ -21,6 +20,7 @@ import ../../waku_enr/sharding, ../../waku_enr/capabilities, ../../waku_metadata, + ../health_monitor/online_monitor, ./peer_store/peer_storage, ./waku_peer_store, ../../incentivization/[reputation_manager, eligibility_manager] @@ -75,8 +75,6 @@ const # Max peers that we allow from the same IP DefaultColocationLimit* = 5 - DNSCheckDomain = "one.one.one.one" - type ConnectionChangeHandler* = proc( peerId: PeerId, peerEvent: PeerEventKind ): Future[void] {.gcsafe, raises: [Defect].} @@ -99,20 +97,21 @@ type PeerManager* = ref object of RootObj started: bool shardedPeerManagement: bool # temp feature flag onConnectionChange*: ConnectionChangeHandler +<<<<<<< HEAD # clients of light protocols (like Lightpush) may track servers' reputation reputationManager*: Option[ReputationManager] # servers of light protocols (like Lightpush) may track client requests' eligibility eligibilityManager*: Option[EligibilityManager] dnsNameServers*: seq[IpAddress] online: bool +======= + online: bool ## state managed by online_monitor module +>>>>>>> master #~~~~~~~~~~~~~~~~~~~# # Helper Functions # #~~~~~~~~~~~~~~~~~~~# -template isOnline*(self: PeerManager): bool = - self.online - proc calculateBackoff( initialBackoffInSec: int, backoffFactor: int, failedAttempts: int ): timer.Duration = @@ -544,6 +543,13 @@ proc connectedPeers*( return (inPeers, outPeers) +proc disconnectAllPeers*(pm: PeerManager) {.async.} = + let (inPeerIds, outPeerIds) = pm.connectedPeers() + let connectedPeers = concat(inPeerIds, outPeerIds) + + let futs = connectedPeers.mapIt(pm.disconnectNode(it)) + await allFutures(futs) + proc getStreamByPeerIdAndProtocol*( pm: PeerManager, peerId: PeerId, protocol: string ): Future[Result[Connection, string]] {.async.} = @@ -580,35 +586,9 @@ proc getStreamByPeerIdAndProtocol*( return ok(streamRes.get()) -proc checkInternetConnectivity( - nameServerIps: seq[IpAddress], timeout = 2.seconds -): Future[bool] {.async.} = - var nameServers: seq[TransportAddress] - for ip in nameServerIps: - nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53 - - let dnsResolver = DnsResolver.new(nameServers) - - # Resolve domain IP - let resolved = await dnsResolver.resolveIp(DNSCheckDomain, 0.Port, Domain.AF_UNSPEC) - - if resolved.len > 0: - return true - else: - return false - -proc updateOnlineState*(pm: PeerManager) {.async.} = - let numConnectedPeers = - pm.switch.peerStore.peers().countIt(it.connectedness == Connected) - - if numConnectedPeers > 0: - pm.online = true - else: - pm.online = await checkInternetConnectivity(pm.dnsNameServers) - proc connectToRelayPeers*(pm: PeerManager) {.async.} = # only attempt if current node is online - if not pm.isOnline(): + if not pm.online: error "connectToRelayPeers: won't attempt new connections - node is offline" return @@ -776,6 +756,7 @@ proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} = debug "Pruning connection due to ip colocation", peerId = peerId, ip = ip asyncSpawn(pm.switch.disconnect(peerId)) peerStore.delete(peerId) + if not pm.onConnectionChange.isNil(): # we don't want to await for the callback to finish asyncSpawn pm.onConnectionChange(peerId, Joined) @@ -790,6 +771,7 @@ proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} = if pm.ipTable[ip].len == 0: pm.ipTable.del(ip) break + if not pm.onConnectionChange.isNil(): # we don't want to await for the callback to finish asyncSpawn pm.onConnectionChange(peerId, Left) @@ -846,6 +828,10 @@ proc logAndMetrics(pm: PeerManager) {.async.} = protoStreamsOut.float64, labelValues = [$Direction.Out, proto] ) +proc getOnlineStateObserver*(pm: PeerManager): OnOnlineStateChange = + return proc(online: bool) {.gcsafe, raises: [].} = + pm.online = online + #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# # Pruning and Maintenance (Stale Peers Management) # #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# @@ -854,7 +840,7 @@ proc manageRelayPeers*(pm: PeerManager) {.async.} = if pm.wakuMetadata.shards.len == 0: return - if not pm.isOnline(): + if not pm.online: error "manageRelayPeers: won't attempt new connections - node is offline" return @@ -1085,9 +1071,12 @@ proc new*( maxFailedAttempts = MaxFailedAttempts, colocationLimit = DefaultColocationLimit, shardedPeerManagement = false, +<<<<<<< HEAD reputationEnabled = false, eligibilityEnabled = false, dnsNameServers = newSeq[IpAddress](), +======= +>>>>>>> master ): PeerManager {.gcsafe.} = let capacity = switch.peerStore.capacity let maxConnections = switch.connManager.inSema.size @@ -1138,12 +1127,16 @@ proc new*( maxFailedAttempts: maxFailedAttempts, colocationLimit: colocationLimit, shardedPeerManagement: shardedPeerManagement, - dnsNameServers: dnsNameServers, online: true, ) - proc peerHook(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe.} = - onPeerEvent(pm, peerId, event) + proc peerHook( + peerId: PeerId, event: PeerEvent + ): Future[void] {.gcsafe, async: (raises: [CancelledError]).} = + try: + await onPeerEvent(pm, peerId, event) + except CatchableError: + error "exception in onPeerEvent", error = getCurrentExceptionMsg() var peerStore = pm.switch.peerStore diff --git a/waku/node/waku_node.nim b/waku/node/waku_node.nim index 7cc313844..d2dd3acb2 100644 --- a/waku/node/waku_node.nim +++ b/waku/node/waku_node.nim @@ -1,7 +1,7 @@ {.push raises: [].} import - std/[hashes, options, sugar, tables, strutils, sequtils, os, net], + std/[hashes, options, sugar, tables, strutils, sequtils, os, net, random], chronos, chronicles, metrics, @@ -70,6 +70,10 @@ declarePublicGauge waku_px_peers, logScope: topics = "waku node" +# randomize initializes sdt/random's random number generator +# if not called, the outcome of randomization procedures will be the same in every run +randomize() + # TODO: Move to application instance (e.g., `WakuNode2`) # Git version in git describe format (defined compile time) const git_version* {.strdefine.} = "n/a" @@ -109,7 +113,7 @@ type wakuLightpushClient*: WakuLightPushClient wakuPeerExchange*: WakuPeerExchange wakuMetadata*: WakuMetadata - wakuSharding*: Sharding + wakuAutoSharding*: Option[Sharding] enr*: enr.Record libp2pPing*: Ping rng*: ref rand.HmacDrbgContext @@ -117,7 +121,6 @@ type announcedAddresses*: seq[MultiAddress] started*: bool # Indicates that node has started listening topicSubscriptionQueue*: AsyncEventQueue[SubscriptionEvent] - contentTopicHandlers: Table[ContentTopic, TopicHandler] rateLimitSettings*: ProtocolRateLimitSettings proc new*( @@ -126,6 +129,7 @@ proc new*( enr: enr.Record, switch: Switch, peerManager: PeerManager, + rateLimitSettings: ProtocolRateLimitSettings = DefaultProtocolRateLimit, # TODO: make this argument required after tests are updated rng: ref HmacDrbgContext = crypto.newRng(), ): T {.raises: [Defect, LPError, IOError, TLSStreamProtocolError].} = @@ -142,7 +146,7 @@ proc new*( enr: enr, announcedAddresses: netConfig.announcedAddresses, topicSubscriptionQueue: queue, - rateLimitSettings: DefaultProtocolRateLimit, + rateLimitSettings: rateLimitSettings, ) return node @@ -196,12 +200,13 @@ proc mountMetadata*(node: WakuNode, clusterId: uint32): Result[void, string] = return ok() -## Waku Sharding -proc mountSharding*( +## Waku AutoSharding +proc mountAutoSharding*( node: WakuNode, clusterId: uint16, shardCount: uint32 ): Result[void, string] = - info "mounting sharding", clusterId = clusterId, shardCount = shardCount - node.wakuSharding = Sharding(clusterId: clusterId, shardCountGenZero: shardCount) + info "mounting auto sharding", clusterId = clusterId, shardCount = shardCount + node.wakuAutoSharding = + some(Sharding(clusterId: clusterId, shardCountGenZero: shardCount)) return ok() ## Waku Sync @@ -257,7 +262,13 @@ proc mountStoreSync*( ## Waku relay -proc registerRelayDefaultHandler(node: WakuNode, topic: PubsubTopic) = +proc registerRelayHandler( + node: WakuNode, topic: PubsubTopic, appHandler: WakuRelayHandler +) = + ## Registers the only handler for the given topic. + ## Notice that this handler internally calls other handlers, such as filter, + ## archive, etc, plus the handler provided by the application. + if node.wakuRelay.isSubscribed(topic): return @@ -290,18 +301,19 @@ proc registerRelayDefaultHandler(node: WakuNode, topic: PubsubTopic) = node.wakuStoreReconciliation.messageIngress(topic, msg) - let defaultHandler = proc( + let uniqueTopicHandler = proc( topic: PubsubTopic, msg: WakuMessage ): Future[void] {.async, gcsafe.} = await traceHandler(topic, msg) await filterHandler(topic, msg) await archiveHandler(topic, msg) await syncHandler(topic, msg) + await appHandler(topic, msg) - discard node.wakuRelay.subscribe(topic, defaultHandler) + node.wakuRelay.subscribe(topic, uniqueTopicHandler) proc subscribe*( - node: WakuNode, subscription: SubscriptionEvent, handler = none(WakuRelayHandler) + node: WakuNode, subscription: SubscriptionEvent, handler: WakuRelayHandler ): Result[void, string] = ## Subscribes to a PubSub or Content topic. Triggers handler when receiving messages on ## this topic. WakuRelayHandler is a method that takes a topic and a Waku message. @@ -313,32 +325,26 @@ proc subscribe*( let (pubsubTopic, contentTopicOp) = case subscription.kind of ContentSub: - let shard = node.wakuSharding.getShard((subscription.topic)).valueOr: - error "Autosharding error", error = error - return err("Autosharding error: " & error) - - ($shard, some(subscription.topic)) + if node.wakuAutoSharding.isSome(): + let shard = node.wakuAutoSharding.get().getShard((subscription.topic)).valueOr: + error "Autosharding error", error = error + return err("Autosharding error: " & error) + ($shard, some(subscription.topic)) + else: + return err( + "Static sharding is used, relay subscriptions must specify a pubsub topic" + ) of PubsubSub: (subscription.topic, none(ContentTopic)) else: return err("Unsupported subscription type in relay subscribe") if node.wakuRelay.isSubscribed(pubsubTopic): - debug "already subscribed to topic", pubsubTopic - return err("Already subscribed to topic: " & $pubsubTopic) - - if contentTopicOp.isSome() and node.contentTopicHandlers.hasKey(contentTopicOp.get()): - error "Invalid API call to `subscribe`. Was already subscribed" - return err("Invalid API call to `subscribe`. Was already subscribed") + warn "No-effect API call to subscribe. Already subscribed to topic", pubsubTopic + return ok() + node.registerRelayHandler(pubsubTopic, handler) node.topicSubscriptionQueue.emit((kind: PubsubSub, topic: pubsubTopic)) - node.registerRelayDefaultHandler(pubsubTopic) - - if handler.isSome(): - let wrappedHandler = node.wakuRelay.subscribe(pubsubTopic, handler.get()) - - if contentTopicOp.isSome(): - node.contentTopicHandlers[contentTopicOp.get()] = wrappedHandler return ok() @@ -354,32 +360,27 @@ proc unsubscribe*( let (pubsubTopic, contentTopicOp) = case subscription.kind of ContentUnsub: - let shard = node.wakuSharding.getShard((subscription.topic)).valueOr: - error "Autosharding error", error = error - return err("Autosharding error: " & error) - - ($shard, some(subscription.topic)) + if node.wakuAutoSharding.isSome(): + let shard = node.wakuAutoSharding.get().getShard((subscription.topic)).valueOr: + error "Autosharding error", error = error + return err("Autosharding error: " & error) + ($shard, some(subscription.topic)) + else: + return err( + "Static sharding is used, relay subscriptions must specify a pubsub topic" + ) of PubsubUnsub: (subscription.topic, none(ContentTopic)) else: return err("Unsupported subscription type in relay unsubscribe") if not node.wakuRelay.isSubscribed(pubsubTopic): - error "Invalid API call to `unsubscribe`. Was not subscribed", pubsubTopic - return - err("Invalid API call to `unsubscribe`. Was not subscribed to: " & $pubsubTopic) + warn "No-effect API call to `unsubscribe`. Was not subscribed", pubsubTopic + return ok() - if contentTopicOp.isSome(): - # Remove this handler only - var handler: TopicHandler - ## TODO: refactor this part. I think we can simplify it - if node.contentTopicHandlers.pop(contentTopicOp.get(), handler): - debug "unsubscribe", contentTopic = contentTopicOp.get() - node.wakuRelay.unsubscribe(pubsubTopic) - else: - debug "unsubscribe", pubsubTopic = pubsubTopic - node.wakuRelay.unsubscribe(pubsubTopic) - node.topicSubscriptionQueue.emit((kind: PubsubUnsub, topic: pubsubTopic)) + debug "unsubscribe", pubsubTopic, contentTopicOp + node.wakuRelay.unsubscribe(pubsubTopic) + node.topicSubscriptionQueue.emit((kind: PubsubUnsub, topic: pubsubTopic)) return ok() @@ -398,9 +399,10 @@ proc publish*( return err(msg) let pubsubTopic = pubsubTopicOp.valueOr: - node.wakuSharding.getShard(message.contentTopic).valueOr: + if node.wakuAutoSharding.isNone(): + return err("Pubsub topic must be specified when static sharding is enabled.") + node.wakuAutoSharding.get().getShard(message.contentTopic).valueOr: let msg = "Autosharding error: " & error - error "publish error", err = msg return err(msg) #TODO instead of discard return error when 0 peers received the message @@ -441,7 +443,6 @@ proc startRelay*(node: WakuNode) {.async.} = proc mountRelay*( node: WakuNode, - shards: seq[RelayShard] = @[], peerExchangeHandler = none(RoutingRecordsHandler), maxMessageSize = int(DefaultMaxWakuMessageSize), ): Future[Result[void, string]] {.async.} = @@ -467,16 +468,7 @@ proc mountRelay*( node.switch.mount(node.wakuRelay, protocolMatcher(WakuRelayCodec)) - ## Make sure we don't have duplicates - let uniqueShards = deduplicate(shards) - - # Subscribe to shards - for shard in uniqueShards: - node.subscribe((kind: PubsubSub, topic: $shard)).isOkOr: - error "failed to subscribe to shard", error = error - return err("failed to subscribe to shard in mountRelay: " & error) - - info "relay mounted successfully", shards = uniqueShards + info "relay mounted successfully" return ok() ## Waku filter @@ -584,8 +576,14 @@ proc filterSubscribe*( waku_node_errors.inc(labelValues = ["subscribe_filter_failure"]) return subRes + elif node.wakuAutoSharding.isNone(): + error "Failed filter subscription, pubsub topic must be specified with static sharding" + waku_node_errors.inc(labelValues = ["subscribe_filter_failure"]) else: - let topicMapRes = node.wakuSharding.parseSharding(pubsubTopic, contentTopics) + # No pubsub topic, autosharding is used to deduce it + # but content topics must be well-formed for this + let topicMapRes = + node.wakuAutoSharding.get().getShardsFromContentTopics(contentTopics) let topicMap = if topicMapRes.isErr(): @@ -595,11 +593,11 @@ proc filterSubscribe*( topicMapRes.get() var futures = collect(newSeq): - for pubsub, topics in topicMap.pairs: + for shard, topics in topicMap.pairs: info "registering filter subscription to content", - pubsubTopic = pubsub, contentTopics = topics, peer = remotePeer.peerId + shard = shard, contentTopics = topics, peer = remotePeer.peerId let content = topics.mapIt($it) - node.wakuFilterClient.subscribe(remotePeer, $pubsub, content) + node.wakuFilterClient.subscribe(remotePeer, $shard, content) var subRes: FilterSubscribeResult = FilterSubscribeResult.ok() try: @@ -663,8 +661,12 @@ proc filterUnsubscribe*( waku_node_errors.inc(labelValues = ["unsubscribe_filter_failure"]) return unsubRes + elif node.wakuAutoSharding.isNone(): + error "Failed filter un-subscription, pubsub topic must be specified with static sharding" + waku_node_errors.inc(labelValues = ["unsubscribe_filter_failure"]) else: # pubsubTopic.isNone - let topicMapRes = node.wakuSharding.parseSharding(pubsubTopic, contentTopics) + let topicMapRes = + node.wakuAutoSharding.get().getShardsFromContentTopics(contentTopics) let topicMap = if topicMapRes.isErr(): @@ -674,11 +676,11 @@ proc filterUnsubscribe*( topicMapRes.get() var futures = collect(newSeq): - for pubsub, topics in topicMap.pairs: + for shard, topics in topicMap.pairs: info "deregistering filter subscription to content", - pubsubTopic = pubsub, contentTopics = topics, peer = remotePeer.peerId + shard = shard, contentTopics = topics, peer = remotePeer.peerId let content = topics.mapIt($it) - node.wakuFilterClient.unsubscribe(remotePeer, $pubsub, content) + node.wakuFilterClient.unsubscribe(remotePeer, $shard, content) var unsubRes: FilterSubscribeResult = FilterSubscribeResult.ok() try: @@ -1084,7 +1086,10 @@ proc legacyLightpushPublish*( if pubsubTopic.isSome(): return await internalPublish(node, pubsubTopic.get(), message, peer) - let topicMapRes = node.wakuSharding.parseSharding(pubsubTopic, message.contentTopic) + if node.wakuAutoSharding.isNone(): + return err("Pubsub topic must be specified when static sharding is enabled") + let topicMapRes = + node.wakuAutoSharding.get().getShardsFromContentTopics(message.contentTopic) let topicMap = if topicMapRes.isErr(): @@ -1140,7 +1145,7 @@ proc mountLightPush*( lightpush_protocol.getRelayPushHandler(node.wakuRelay, rlnPeer) node.wakuLightPush = WakuLightPush.new( - node.peerManager, node.rng, pushHandler, node.wakuSharding, some(rateLimit) + node.peerManager, node.rng, pushHandler, node.wakuAutoSharding, some(rateLimit) ) if node.started: @@ -1191,7 +1196,9 @@ proc lightpushPublish*( ): Future[lightpush_protocol.WakuLightPushResult] {.async.} = if node.wakuLightpushClient.isNil() and node.wakuLightPush.isNil(): error "failed to publish message as lightpush not available" - return lighpushErrorResult(SERVICE_NOT_AVAILABLE, "Waku lightpush not available") + return lighpushErrorResult( + LightPushErrorCode.SERVICE_NOT_AVAILABLE, "Waku lightpush not available" + ) let toPeer: RemotePeerInfo = peerOpt.valueOr: if not node.wakuLightPush.isNil(): @@ -1199,21 +1206,33 @@ proc lightpushPublish*( elif not node.wakuLightpushClient.isNil(): node.peerManager.selectPeer(WakuLightPushCodec).valueOr: let msg = "no suitable remote peers" +<<<<<<< HEAD error "failed to publish message", err = msg return lighpushErrorResult(NO_PEERS_TO_RELAY, msg) +======= + error "failed to publish message", msg = msg + return lighpushErrorResult(LightPushErrorCode.NO_PEERS_TO_RELAY, msg) +>>>>>>> master else: - return lighpushErrorResult(NO_PEERS_TO_RELAY, "no suitable remote peers") + return lighpushErrorResult( + LightPushErrorCode.NO_PEERS_TO_RELAY, "no suitable remote peers" + ) let pubsubForPublish = pubSubTopic.valueOr: + if node.wakuAutoSharding.isNone(): + let msg = "Pubsub topic must be specified when static sharding is enabled" + error "lightpush publish error", error = msg + return lighpushErrorResult(LightPushErrorCode.INVALID_MESSAGE, msg) + let parsedTopic = NsContentTopic.parse(message.contentTopic).valueOr: let msg = "Invalid content-topic:" & $error error "lightpush request handling error", error = msg - return lighpushErrorResult(INVALID_MESSAGE_ERROR, msg) + return lighpushErrorResult(LightPushErrorCode.INVALID_MESSAGE, msg) - node.wakuSharding.getShard(parsedTopic).valueOr: + node.wakuAutoSharding.get().getShard(parsedTopic).valueOr: let msg = "Autosharding error: " & error error "lightpush publish error", error = msg - return lighpushErrorResult(INTERNAL_SERVER_ERROR, msg) + return lighpushErrorResult(LightPushErrorCode.INTERNAL_SERVER_ERROR, msg) debug "in lightpushPublish" debug "eligibilityProof: ", eligibilityProof @@ -1356,35 +1375,60 @@ proc mountLibp2pPing*(node: WakuNode) {.async: (raises: []).} = except LPError: error "failed to mount libp2pPing", error = getCurrentExceptionMsg() -# TODO: Move this logic to PeerManager -proc keepaliveLoop(node: WakuNode, keepalive: chronos.Duration) {.async.} = - while true: - await sleepAsync(keepalive) - if not node.started: +proc pingPeer(node: WakuNode, peerId: PeerId): Future[Result[void, string]] {.async.} = + ## Ping a single peer and return the result + + try: + # Establish a stream + let stream = (await node.peerManager.dialPeer(peerId, PingCodec)).valueOr: + error "pingPeer: failed dialing peer", peerId = peerId + return err("pingPeer failed dialing peer peerId: " & $peerId) + defer: + # Always close the stream + try: + await stream.close() + except CatchableError as e: + debug "Error closing ping connection", peerId = peerId, error = e.msg + + # Perform ping + let pingDuration = await node.libp2pPing.ping(stream) + + trace "Ping successful", peerId = peerId, duration = pingDuration + return ok() + except CatchableError as e: + error "pingPeer: exception raised pinging peer", peerId = peerId, error = e.msg + return err("pingPeer: exception raised pinging peer: " & e.msg) + +proc selectRandomPeers*(peers: seq[PeerId], numRandomPeers: int): seq[PeerId] = + var randomPeers = peers + shuffle(randomPeers) + return randomPeers[0 ..< min(len(randomPeers), numRandomPeers)] + +# Returns the number of succesful pings performed +proc parallelPings*(node: WakuNode, peerIds: seq[PeerId]): Future[int] {.async.} = + if len(peerIds) == 0: + return 0 + + var pingFuts: seq[Future[Result[void, string]]] + + # Create ping futures for each peer + for i, peerId in peerIds: + let fut = pingPeer(node, peerId) + pingFuts.add(fut) + + # Wait for all pings to complete + discard await allFutures(pingFuts).withTimeout(5.seconds) + + var successCount = 0 + for fut in pingFuts: + if not fut.completed() or fut.failed(): continue - # Keep connected peers alive while running - # Each node is responsible of keeping its outgoing connections alive - trace "Running keepalive" + let res = fut.read() + if res.isOk(): + successCount.inc() - # First get a list of connected peer infos - let outPeers = node.peerManager.connectedPeers()[1] - - for peerId in outPeers: - try: - let conn = (await node.peerManager.dialPeer(peerId, PingCodec)).valueOr: - warn "Failed dialing peer for keep alive", peerId = peerId - continue - let pingDelay = await node.libp2pPing.ping(conn) - await conn.close() - except CatchableError as exc: - waku_node_errors.inc(labelValues = ["keep_alive_failure"]) - -# 2 minutes default - 20% of the default chronosstream timeout duration -proc startKeepalive*(node: WakuNode, keepalive = 2.minutes) = - info "starting keepalive", keepalive = keepalive - - asyncSpawn node.keepaliveLoop(keepalive) + return successCount proc mountRendezvous*(node: WakuNode) {.async: (raises: []).} = info "mounting rendezvous discovery protocol" @@ -1483,7 +1527,7 @@ proc start*(node: WakuNode) {.async.} = ## with announced addrs after start let addressMapper = proc( listenAddrs: seq[MultiAddress] - ): Future[seq[MultiAddress]] {.async.} = + ): Future[seq[MultiAddress]] {.gcsafe, async: (raises: [CancelledError]).} = return node.announcedAddresses node.switch.peerInfo.addressMappers.add(addressMapper) @@ -1537,10 +1581,3 @@ proc isReady*(node: WakuNode): Future[bool] {.async: (raises: [Exception]).} = return true return await node.wakuRlnRelay.isReady() ## TODO: add other protocol `isReady` checks - -proc setRateLimits*(node: WakuNode, limits: seq[string]): Result[void, string] = - let rateLimitConfig = ProtocolRateLimitSettings.parse(limits) - if rateLimitConfig.isErr(): - return err("invalid rate limit settings:" & rateLimitConfig.error) - node.rateLimitSettings = rateLimitConfig.get() - return ok() diff --git a/waku/node/waku_switch.nim b/waku/node/waku_switch.nim index 48d3612e3..cc99f46ae 100644 --- a/waku/node/waku_switch.nim +++ b/waku/node/waku_switch.nim @@ -20,7 +20,7 @@ const MaxConnectionsPerPeer* = 1 proc withWsTransport*(b: SwitchBuilder): SwitchBuilder = b.withTransport( - proc(upgr: Upgrade): Transport = + proc(upgr: Upgrade, privateKey: crypto.PrivateKey): Transport = WsTransport.new(upgr) ) @@ -48,7 +48,7 @@ proc withWssTransport*( let key: TLSPrivateKey = getSecureKey(secureKeyPath) let cert: TLSCertificate = getSecureCert(secureCertPath) b.withTransport( - proc(upgr: Upgrade): Transport = + proc(upgr: Upgrade, privateKey: crypto.PrivateKey): Transport = WsTransport.new( upgr, tlsPrivateKey = key, diff --git a/waku/waku_api/rest/admin/client.nim b/waku/waku_api/rest/admin/client.nim index 7d45544e2..87d46dd3d 100644 --- a/waku/waku_api/rest/admin/client.nim +++ b/waku/waku_api/rest/admin/client.nim @@ -62,6 +62,10 @@ proc getMeshPeersByShard*( rest, endpoint: "/admin/v1/peers/mesh/on/{shardId}", meth: HttpMethod.MethodGet .} +proc getPeersStats*(): RestResponse[PeerStats] {. + rest, endpoint: "/admin/v1/peers/stats", meth: HttpMethod.MethodGet +.} + proc getFilterSubscriptions*(): RestResponse[seq[FilterSubscription]] {. rest, endpoint: "/admin/v1/filter/subscriptions", meth: HttpMethod.MethodGet .} diff --git a/waku/waku_api/rest/admin/handlers.nim b/waku/waku_api/rest/admin/handlers.nim index 9cf6ec131..04cc31010 100644 --- a/waku/waku_api/rest/admin/handlers.nim +++ b/waku/waku_api/rest/admin/handlers.nim @@ -3,6 +3,7 @@ import std/[sets, strformat, sequtils, tables], chronicles, + chronicles/topics_registry, json_serialization, presto/route, libp2p/[peerinfo, switch, peerid, protocols/pubsub/pubsubpeer] @@ -31,6 +32,8 @@ export types logScope: topics = "waku node rest admin api" +const ROUTE_ADMIN_V1_PEERS_STATS* = "/admin/v1/peers/stats" # provides peer statistics + const ROUTE_ADMIN_V1_PEERS* = "/admin/v1/peers" # returns all peers const ROUTE_ADMIN_V1_SINGLE_PEER* = "/admin/v1/peer/{peerId}" @@ -46,6 +49,9 @@ const ROUTE_ADMIN_V1_MESH_PEERS_ON_SHARD* = "/admin/v1/peers/mesh/on/{shardId}" const ROUTE_ADMIN_V1_FILTER_SUBS* = "/admin/v1/filter/subscriptions" +const ROUTE_ADMIN_V1_POST_LOG_LEVEL* = "/admin/v1/log-level/{logLevel}" + # sets the new log level for the node + type PeerProtocolTuple = tuple[ multiaddr: string, @@ -94,6 +100,40 @@ proc populateAdminPeerInfoForCodecs(node: WakuNode, codecs: seq[string]): WakuPe return peers +proc getRelayPeers(node: WakuNode): PeersOfShards = + var relayPeers: PeersOfShards = @[] + if not node.wakuRelay.isNil(): + for topic in node.wakuRelay.getSubscribedTopics(): + let relayShard = RelayShard.parse(topic).valueOr: + error "Invalid subscribed topic", error = error, topic = topic + continue + let pubsubPeers = + node.wakuRelay.getConnectedPubSubPeers(topic).get(initHashSet[PubSubPeer](0)) + relayPeers.add( + PeersOfShard( + shard: relayShard.shardId, + peers: toSeq(pubsubPeers).mapIt(WakuPeer.init(it, node.peerManager)), + ) + ) + return relayPeers + +proc getMeshPeers(node: WakuNode): PeersOfShards = + var meshPeers: PeersOfShards = @[] + if not node.wakuRelay.isNil(): + for topic in node.wakuRelay.getSubscribedTopics(): + let relayShard = RelayShard.parse(topic).valueOr: + error "Invalid subscribed topic", error = error, topic = topic + continue + let peers = + node.wakuRelay.getPubSubPeersInMesh(topic).get(initHashSet[PubSubPeer](0)) + meshPeers.add( + PeersOfShard( + shard: relayShard.shardId, + peers: toSeq(peers).mapIt(WakuPeer.init(it, node.peerManager)), + ) + ) + return meshPeers + proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) = router.api(MethodGet, ROUTE_ADMIN_V1_PEERS) do() -> RestApiResponse: let peers = populateAdminPeerInfoForAll(node) @@ -185,19 +225,7 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) = "Error: Relay Protocol is not mounted to the node" ) - var relayPeers: PeersOfShards = @[] - for topic in node.wakuRelay.getSubscribedTopics(): - let relayShard = RelayShard.parse(topic).valueOr: - error "Invalid subscribed topic", error = error, topic = topic - continue - let pubsubPeers = - node.wakuRelay.getConnectedPubSubPeers(topic).get(initHashSet[PubSubPeer](0)) - relayPeers.add( - PeersOfShard( - shard: relayShard.shardId, - peers: toSeq(pubsubPeers).mapIt(WakuPeer.init(it, node.peerManager)), - ) - ) + var relayPeers: PeersOfShards = getRelayPeers(node) let resp = RestApiResponse.jsonResponse(relayPeers, status = Http200).valueOr: error "An error occurred while building the json response: ", error = error @@ -213,13 +241,20 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) = let shard = shardId.valueOr: return RestApiResponse.badRequest(fmt("Invalid shardId: {error}")) + if node.wakuMetadata.isNil(): + return RestApiResponse.serviceUnavailable( + "Error: Metadata Protocol is not mounted to the node" + ) + if node.wakuRelay.isNil(): return RestApiResponse.serviceUnavailable( "Error: Relay Protocol is not mounted to the node" ) - let topic = - toPubsubTopic(RelayShard(clusterId: node.wakuSharding.clusterId, shardId: shard)) + # TODO: clusterId and shards should be uint16 across all codebase and probably be defined as a type + let topic = toPubsubTopic( + RelayShard(clusterId: node.wakuMetadata.clusterId.uint16, shardId: shard) + ) let pubsubPeers = node.wakuRelay.getConnectedPubSubPeers(topic).get(initHashSet[PubSubPeer](0)) let relayPeer = PeersOfShard( @@ -240,21 +275,9 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) = "Error: Relay Protocol is not mounted to the node" ) - var relayPeers: PeersOfShards = @[] - for topic in node.wakuRelay.getSubscribedTopics(): - let relayShard = RelayShard.parse(topic).valueOr: - error "Invalid subscribed topic", error = error, topic = topic - continue - let peers = - node.wakuRelay.getPubSubPeersInMesh(topic).get(initHashSet[PubSubPeer](0)) - relayPeers.add( - PeersOfShard( - shard: relayShard.shardId, - peers: toSeq(peers).mapIt(WakuPeer.init(it, node.peerManager)), - ) - ) + var meshPeers: PeersOfShards = getMeshPeers(node) - let resp = RestApiResponse.jsonResponse(relayPeers, status = Http200).valueOr: + let resp = RestApiResponse.jsonResponse(meshPeers, status = Http200).valueOr: error "An error occurred while building the json response: ", error = error return RestApiResponse.internalServerError( fmt("An error occurred while building the json response: {error}") @@ -268,13 +291,19 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) = let shard = shardId.valueOr: return RestApiResponse.badRequest(fmt("Invalid shardId: {error}")) + if node.wakuMetadata.isNil(): + return RestApiResponse.serviceUnavailable( + "Error: Metadata Protocol is not mounted to the node" + ) + if node.wakuRelay.isNil(): return RestApiResponse.serviceUnavailable( "Error: Relay Protocol is not mounted to the node" ) - let topic = - toPubsubTopic(RelayShard(clusterId: node.wakuSharding.clusterId, shardId: shard)) + let topic = toPubsubTopic( + RelayShard(clusterId: node.wakuMetadata.clusterId.uint16, shardId: shard) + ) let peers = node.wakuRelay.getPubSubPeersInMesh(topic).get(initHashSet[PubSubPeer](0)) let relayPeer = PeersOfShard( @@ -289,6 +318,75 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) = return resp + router.api(MethodGet, ROUTE_ADMIN_V1_PEERS_STATS) do() -> RestApiResponse: + let peers = populateAdminPeerInfoForAll(node) + + var stats: PeerStats = initOrderedTable[string, OrderedTable[string, int]]() + + stats["Sum"] = {"Total peers": peers.len()}.toOrderedTable() + + # stats of connectedness + var connectednessStats = initOrderedTable[string, int]() + connectednessStats[$Connectedness.Connected] = + peers.countIt(it.connected == Connectedness.Connected) + connectednessStats[$Connectedness.NotConnected] = + peers.countIt(it.connected == Connectedness.NotConnected) + connectednessStats[$Connectedness.CannotConnect] = + peers.countIt(it.connected == Connectedness.CannotConnect) + connectednessStats[$Connectedness.CanConnect] = + peers.countIt(it.connected == Connectedness.CanConnect) + stats["By Connectedness"] = connectednessStats + + # stats of relay peers + var totalRelayPeers = 0 + stats["Relay peers"] = block: + let relayPeers = getRelayPeers(node) + var stat = initOrderedTable[string, int]() + for ps in relayPeers: + totalRelayPeers += ps.peers.len + stat[$ps.shard] = ps.peers.len + stat["Total relay peers"] = relayPeers.len + stat + + # stats of mesh peers + stats["Mesh peers"] = block: + let meshPeers = getMeshPeers(node) + var totalMeshPeers = 0 + var stat = initOrderedTable[string, int]() + for ps in meshPeers: + totalMeshPeers += ps.peers.len + stat[$ps.shard] = ps.peers.len + stat["Total mesh peers"] = meshPeers.len + stat + + var protoStats = initOrderedTable[string, int]() + protoStats[WakuRelayCodec] = peers.countIt(it.protocols.contains(WakuRelayCodec)) + protoStats[WakuFilterSubscribeCodec] = + peers.countIt(it.protocols.contains(WakuFilterSubscribeCodec)) + protoStats[WakuFilterPushCodec] = + peers.countIt(it.protocols.contains(WakuFilterPushCodec)) + protoStats[WakuStoreCodec] = peers.countIt(it.protocols.contains(WakuStoreCodec)) + protoStats[WakuLegacyStoreCodec] = + peers.countIt(it.protocols.contains(WakuLegacyStoreCodec)) + protoStats[WakuLightPushCodec] = + peers.countIt(it.protocols.contains(WakuLightPushCodec)) + protoStats[WakuLegacyLightPushCodec] = + peers.countIt(it.protocols.contains(WakuLegacyLightPushCodec)) + protoStats[WakuPeerExchangeCodec] = + peers.countIt(it.protocols.contains(WakuPeerExchangeCodec)) + protoStats[WakuReconciliationCodec] = + peers.countIt(it.protocols.contains(WakuReconciliationCodec)) + + stats["By Protocols"] = protoStats + + let resp = RestApiResponse.jsonResponse(stats, status = Http200).valueOr: + error "An error occurred while building the json response: ", error = error + return RestApiResponse.internalServerError( + fmt("An error occurred while building the json response: {error}") + ) + + return resp + proc installAdminV1PostPeersHandler(router: var RestRouter, node: WakuNode) = router.api(MethodPost, ROUTE_ADMIN_V1_PEERS) do( contentBody: Option[ContentBody] @@ -337,7 +435,40 @@ proc installAdminV1GetFilterSubsHandler(router: var RestRouter, node: WakuNode) return resp.get() +proc installAdminV1PostLogLevelHandler(router: var RestRouter, node: WakuNode) = + router.api(MethodPost, ROUTE_ADMIN_V1_POST_LOG_LEVEL) do( + logLevel: string + ) -> RestApiResponse: + when runtimeFilteringEnabled: + if logLevel.isErr() or logLevel.value().isEmptyOrWhitespace(): + return RestApiResponse.badRequest("Invalid log-level, it can’t be empty") + + try: + let newLogLevel = parseEnum[LogLevel](logLevel.value().capitalizeAscii()) + + if newLogLevel < enabledLogLevel: + return RestApiResponse.badRequest( + fmt( + "Log level {newLogLevel} is lower than the lowest log level - {enabledLogLevel} - the binary is compiled with." + ) + ) + + setLogLevel(newLogLevel) + except ValueError: + return RestApiResponse.badRequest( + fmt( + "Invalid log-level: {logLevel.value()}. Please specify one of TRACE, DEBUG, INFO, NOTICE, WARN, ERROR or FATAL" + ) + ) + + return RestApiResponse.ok() + else: + return RestApiResponse.serviceUnavailable( + "Dynamic Log level management is not enabled in this build. Please recompile with `-d:chronicles_runtime_filtering:on`." + ) + proc installAdminApiHandlers*(router: var RestRouter, node: WakuNode) = installAdminV1GetPeersHandler(router, node) installAdminV1PostPeersHandler(router, node) installAdminV1GetFilterSubsHandler(router, node) + installAdminV1PostLogLevelHandler(router, node) diff --git a/waku/waku_api/rest/admin/types.nim b/waku/waku_api/rest/admin/types.nim index 0c0786e3d..483acf8b8 100644 --- a/waku/waku_api/rest/admin/types.nim +++ b/waku/waku_api/rest/admin/types.nim @@ -35,6 +35,9 @@ type FilterSubscription* = object peerId*: string filterCriteria*: seq[FilterTopic] +type PeerStats* = OrderedTable[string, OrderedTable[string, int]] + # maps high level grouping to low level grouping of counters + #### Serialization and deserialization proc writeValue*( writer: var JsonWriter[RestJson], value: WakuPeer @@ -73,6 +76,23 @@ proc writeValue*( writer.writeField("filterCriteria", value.filterCriteria) writer.endRecord() +proc writeValue*( + writer: var JsonWriter[RestJson], value: OrderedTable[string, int] +) {.raises: [IOError].} = + writer.beginRecord() + for key, value in value.pairs: + writer.writeField(key, value) + writer.endRecord() + +proc writeValue*( + writer: var JsonWriter[RestJson], + value: OrderedTable[string, OrderedTable[string, int]], +) {.raises: [IOError].} = + writer.beginRecord() + for group, subTab in value.pairs: + writer.writeField(group, subTab) + writer.endRecord() + proc readValue*( reader: var JsonReader[RestJson], value: var WakuPeer ) {.gcsafe, raises: [SerializationError, IOError].} = @@ -238,6 +258,21 @@ proc readValue*( value = FilterSubscription(peerId: peerId.get(), filterCriteria: filterCriteria.get()) +proc readValue*( + reader: var JsonReader[RestJson], value: var OrderedTable[string, int] +) {.gcsafe, raises: [SerializationError, IOError].} = + for fieldName in readObjectFields(reader): + let fieldValue = reader.readValue(int) + value[fieldName] = fieldValue + +proc readValue*( + reader: var JsonReader[RestJson], + value: var OrderedTable[string, OrderedTable[string, int]], +) {.gcsafe, raises: [SerializationError, IOError].} = + for fieldName in readObjectFields(reader): + let fieldValue = reader.readValue(OrderedTable[string, int]) + value[fieldName] = fieldValue + func `==`*(a, b: WakuPeer): bool {.inline.} = return a.multiaddr == b.multiaddr diff --git a/waku/waku_api/rest/builder.nim b/waku/waku_api/rest/builder.nim index 6e880f5a3..eb514439f 100644 --- a/waku/waku_api/rest/builder.nim +++ b/waku/waku_api/rest/builder.nim @@ -40,7 +40,7 @@ type RestServerConf* = object relayCacheCapacity*: uint32 proc startRestServerEssentials*( - nodeHealthMonitor: WakuNodeHealthMonitor, conf: RestServerConf, portsShift: uint16 + nodeHealthMonitor: NodeHealthMonitor, conf: RestServerConf, portsShift: uint16 ): Result[WakuRestServerRef, string] = let requestErrorHandler: RestRequestErrorHandler = proc( error: RestRequestError, request: HttpRequestRef @@ -139,8 +139,7 @@ proc startRestServerProtocolSupport*( if relayEnabled: ## This MessageCache is used, f.e., in js-waku<>nwaku interop tests. ## js-waku tests asks nwaku-docker through REST whether a message is properly received. - const RestRelayCacheCapacity = 50 - let cache = MessageCache.init(int(RestRelayCacheCapacity)) + let cache = MessageCache.init(int(conf.relayCacheCapacity)) let handler: WakuRelayHandler = messageCacheHandler(cache) @@ -148,21 +147,23 @@ proc startRestServerProtocolSupport*( let pubsubTopic = $RelayShard(clusterId: clusterId, shardId: shard) cache.pubsubSubscribe(pubsubTopic) - ## TODO: remove this line. use observer-observable pattern - ## within waku_node::registerRelayDefaultHandler - discard node.wakuRelay.subscribe(pubsubTopic, handler) - - for contentTopic in contentTopics: - cache.contentSubscribe(contentTopic) - - let shard = node.wakuSharding.getShard(contentTopic).valueOr: - error "Autosharding error in REST", error = error + node.subscribe((kind: PubsubSub, topic: pubsubTopic), handler).isOkOr: + error "Could not subscribe", pubsubTopic, error continue - let pubsubTopic = $shard - ## TODO: remove this line. use observer-observable pattern - ## within waku_node::registerRelayDefaultHandler - discard node.wakuRelay.subscribe(pubsubTopic, handler) + if node.wakuAutoSharding.isSome(): + # Only deduce pubsub topics to subscribe to from content topics if autosharding is enabled + for contentTopic in contentTopics: + cache.contentSubscribe(contentTopic) + + let shard = node.wakuAutoSharding.get().getShard(contentTopic).valueOr: + error "Autosharding error in REST", error = error + continue + let pubsubTopic = $shard + + node.subscribe((kind: PubsubSub, topic: pubsubTopic), handler).isOkOr: + error "Could not subscribe", pubsubTopic, error + continue installRelayApiHandlers(router, node, cache) else: diff --git a/waku/waku_api/rest/health/handlers.nim b/waku/waku_api/rest/health/handlers.nim index 48dad9276..aa6b1e925 100644 --- a/waku/waku_api/rest/health/handlers.nim +++ b/waku/waku_api/rest/health/handlers.nim @@ -11,7 +11,7 @@ const ROUTE_HEALTH* = "/health" const FutHealthReportTimeout = 5.seconds proc installHealthApiHandler*( - router: var RestRouter, nodeHealthMonitor: WakuNodeHealthMonitor + router: var RestRouter, nodeHealthMonitor: NodeHealthMonitor ) = router.api(MethodGet, ROUTE_HEALTH) do() -> RestApiResponse: let healthReportFut = nodeHealthMonitor.getNodeHealthReport() diff --git a/waku/waku_api/rest/health/types.nim b/waku/waku_api/rest/health/types.nim index ce58ab711..57f8b284c 100644 --- a/waku/waku_api/rest/health/types.nim +++ b/waku/waku_api/rest/health/types.nim @@ -1,5 +1,6 @@ {.push raises: [].} +import results import chronicles, json_serialization, json_serialization/std/options import ../../../waku_node, ../serdes @@ -10,26 +11,33 @@ proc writeValue*( ) {.raises: [IOError].} = writer.beginRecord() writer.writeField(value.protocol, $value.health) + writer.writeField("desc", value.desc) writer.endRecord() proc readValue*( reader: var JsonReader[RestJson], value: var ProtocolHealth ) {.gcsafe, raises: [SerializationError, IOError].} = - var health: HealthStatus - var fieldCount = 0 - + var protocol = none[string]() + var health = none[HealthStatus]() + var desc = none[string]() for fieldName in readObjectFields(reader): - if fieldCount > 0: - reader.raiseUnexpectedField("Too many fields", "ProtocolHealth") - fieldCount += 1 + if fieldName == "desc": + if desc.isSome(): + reader.raiseUnexpectedField("Multiple `desc` fields found", "ProtocolHealth") + desc = some(reader.readValue(string)) + else: + if protocol.isSome(): + reader.raiseUnexpectedField( + "Multiple `protocol` fields and value found", "ProtocolHealth" + ) - let fieldValue = reader.readValue(string) - try: - health = HealthStatus.init(fieldValue) - except ValueError: - reader.raiseUnexpectedValue("Invalid `health` value") + let fieldValue = reader.readValue(string) + let h = HealthStatus.init(fieldValue).valueOr: + reader.raiseUnexpectedValue("Invalid `health` value: " & $error) + health = some(h) + protocol = some(fieldName) - value = ProtocolHealth(protocol: fieldName, health: health) + value = ProtocolHealth(protocol: protocol.get(), health: health.get(), desc: desc) proc writeValue*( writer: var JsonWriter[RestJson], value: HealthReport @@ -53,10 +61,11 @@ proc readValue*( reader.raiseUnexpectedField( "Multiple `nodeHealth` fields found", "HealthReport" ) - try: - nodeHealth = some(HealthStatus.init(reader.readValue(string))) - except ValueError: - reader.raiseUnexpectedValue("Invalid `health` value") + + let health = HealthStatus.init(reader.readValue(string)).valueOr: + reader.raiseUnexpectedValue("Invalid `health` value: " & $error) + + nodeHealth = some(health) of "protocolsHealth": if protocolsHealth.isSome(): reader.raiseUnexpectedField( diff --git a/waku/waku_api/rest/lightpush/handlers.nim b/waku/waku_api/rest/lightpush/handlers.nim index 4ed98162a..9c178a804 100644 --- a/waku/waku_api/rest/lightpush/handlers.nim +++ b/waku/waku_api/rest/lightpush/handlers.nim @@ -33,7 +33,7 @@ const NoPeerNoneFoundError = "No suitable service peer & none discovered" proc useSelfHostedLightPush(node: WakuNode): bool = return node.wakuLightPush != nil and node.wakuLightPushClient == nil -proc convertErrorKindToHttpStatus(statusCode: LightpushStatusCode): HttpCode = +proc convertErrorKindToHttpStatus(statusCode: LightPushStatusCode): HttpCode = ## Lightpush status codes are matching HTTP status codes by design return toHttpCode(statusCode.int).get(Http500) diff --git a/waku/waku_api/rest/relay/handlers.nim b/waku/waku_api/rest/relay/handlers.nim index 252375208..c268870d7 100644 --- a/waku/waku_api/rest/relay/handlers.nim +++ b/waku/waku_api/rest/relay/handlers.nim @@ -67,9 +67,7 @@ proc installRelayApiHandlers*( for pubsubTopic in newTopics: cache.pubsubSubscribe(pubsubTopic) - node.subscribe( - (kind: PubsubSub, topic: pubsubTopic), some(messageCacheHandler(cache)) - ).isOkOr: + node.subscribe((kind: PubsubSub, topic: pubsubTopic), messageCacheHandler(cache)).isOkOr: let errorMsg = "Subscribe failed:" & $error error "SUBSCRIBE failed", error = errorMsg return RestApiResponse.internalServerError(errorMsg) @@ -202,7 +200,7 @@ proc installRelayApiHandlers*( cache.contentSubscribe(contentTopic) node.subscribe( - (kind: ContentSub, topic: contentTopic), some(messageCacheHandler(cache)) + (kind: ContentSub, topic: contentTopic), messageCacheHandler(cache) ).isOkOr: let errorMsg = "Subscribe failed:" & $error error "SUBSCRIBE failed", error = errorMsg @@ -274,11 +272,16 @@ proc installRelayApiHandlers*( var message: WakuMessage = req.toWakuMessage(version = 0).valueOr: return RestApiResponse.badRequest() - let pubsubTopic = node.wakuSharding.getShard(message.contentTopic).valueOr: - let msg = "Autosharding error: " & error + if node.wakuAutoSharding.isNone(): + let msg = "Autosharding is disabled" error "publish error", err = msg return RestApiResponse.badRequest("Failed to publish. " & msg) + let pubsubTopic = node.wakuAutoSharding.get().getShard(message.contentTopic).valueOr: + let msg = "Autosharding error: " & error + error "publish error", err = msg + return RestApiResponse.badRequest("Failed to publish. " & msg) + # if RLN is mounted, append the proof to the message if not node.wakuRlnRelay.isNil(): node.wakuRlnRelay.appendRLNProof(message, float64(getTime().toUnix())).isOkOr: diff --git a/waku/waku_archive/common.nim b/waku/waku_archive/common.nim index 5e5b2a1a9..f26c73da9 100644 --- a/waku/waku_archive/common.nim +++ b/waku/waku_archive/common.nim @@ -44,7 +44,7 @@ type proc `$`*(err: ArchiveError): string = case err.kind of ArchiveErrorKind.DRIVER_ERROR: - "DIRVER_ERROR: " & err.cause + "DRIVER_ERROR: " & err.cause of ArchiveErrorKind.INVALID_QUERY: "INVALID_QUERY: " & err.cause of ArchiveErrorKind.UNKNOWN: diff --git a/waku/waku_archive_legacy/common.nim b/waku/waku_archive_legacy/common.nim index e068e0f0c..ee45181cb 100644 --- a/waku/waku_archive_legacy/common.nim +++ b/waku/waku_archive_legacy/common.nim @@ -78,7 +78,7 @@ type proc `$`*(err: ArchiveError): string = case err.kind of ArchiveErrorKind.DRIVER_ERROR: - "DIRVER_ERROR: " & err.cause + "DRIVER_ERROR: " & err.cause of ArchiveErrorKind.INVALID_QUERY: "INVALID_QUERY: " & err.cause of ArchiveErrorKind.UNKNOWN: diff --git a/waku/waku_core/topics/content_topic.nim b/waku/waku_core/topics/content_topic.nim index b897c4c44..5984a760b 100644 --- a/waku/waku_core/topics/content_topic.nim +++ b/waku/waku_core/topics/content_topic.nim @@ -122,6 +122,18 @@ proc parse*( "Invalid content topic structure. Expected either //// or /////" return err(ParsingError.invalidFormat(errMsg)) +proc parse*( + T: type NsContentTopic, topics: seq[ContentTopic] +): ParsingResult[seq[NsContentTopic]] = + var res: seq[NsContentTopic] = @[] + for contentTopic in topics: + let parseRes = NsContentTopic.parse(contentTopic) + if parseRes.isErr(): + let error: ParsingError = parseRes.error + return ParsingResult[seq[NsContentTopic]].err(error) + res.add(parseRes.value) + return ParsingResult[seq[NsContentTopic]].ok(res) + # Content topic compatibility converter toContentTopic*(topic: NsContentTopic): ContentTopic = diff --git a/waku/waku_core/topics/sharding.nim b/waku/waku_core/topics/sharding.nim index 4a4af4cb5..d2f652161 100644 --- a/waku/waku_core/topics/sharding.nim +++ b/waku/waku_core/topics/sharding.nim @@ -8,6 +8,7 @@ import nimcrypto, std/options, std/tables, stew/endians2, results, stew/byteutil import ./content_topic, ./pubsub_topic +# TODO: this is autosharding, not just "sharding" type Sharding* = object clusterId*: uint16 # TODO: generations could be stored in a table here @@ -50,48 +51,32 @@ proc getShard*(s: Sharding, topic: ContentTopic): Result[RelayShard, string] = ok(shard) -proc parseSharding*( - s: Sharding, - pubsubTopic: Option[PubsubTopic], - contentTopics: ContentTopic | seq[ContentTopic], +proc getShardsFromContentTopics*( + s: Sharding, contentTopics: ContentTopic | seq[ContentTopic] ): Result[Table[RelayShard, seq[NsContentTopic]], string] = - var topics: seq[ContentTopic] - when contentTopics is seq[ContentTopic]: - topics = contentTopics - else: - topics = @[contentTopics] + let topics = + when contentTopics is seq[ContentTopic]: + contentTopics + else: + @[contentTopics] + + let parseRes = NsContentTopic.parse(topics) + let nsContentTopics = + if parseRes.isErr(): + return err("Cannot parse content topic: " & $parseRes.error) + else: + parseRes.get() var topicMap = initTable[RelayShard, seq[NsContentTopic]]() - for contentTopic in topics: - let parseRes = NsContentTopic.parse(contentTopic) + for content in nsContentTopics: + let shard = s.getShard(content).valueOr: + return err("Cannot deduce shard from content topic: " & $error) - let content = - if parseRes.isErr(): - return err("Cannot parse content topic: " & $parseRes.error) - else: - parseRes.get() - - let pubsub = - if pubsubTopic.isSome(): - let parseRes = RelayShard.parse(pubsubTopic.get()) - - if parseRes.isErr(): - return err("Cannot parse pubsub topic: " & $parseRes.error) - else: - parseRes.get() - else: - let shardsRes = s.getShard(content) - - if shardsRes.isErr(): - return err("Cannot autoshard content topic: " & $shardsRes.error) - else: - shardsRes.get() - - if not topicMap.hasKey(pubsub): - topicMap[pubsub] = @[] + if not topicMap.hasKey(shard): + topicMap[shard] = @[] try: - topicMap[pubsub].add(content) + topicMap[shard].add(content) except CatchableError: return err(getCurrentExceptionMsg()) diff --git a/waku/waku_filter_v2/client.nim b/waku/waku_filter_v2/client.nim index 2007371c7..2ad275a94 100644 --- a/waku/waku_filter_v2/client.nim +++ b/waku/waku_filter_v2/client.nim @@ -174,7 +174,7 @@ proc registerPushHandler*(wfc: WakuFilterClient, handler: FilterPushHandler) = wfc.pushHandlers.add(handler) proc initProtocolHandler(wfc: WakuFilterClient) = - proc handler(conn: Connection, proto: string) {.async.} = + proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} = ## Notice that the client component is acting as a server of WakuFilterPushCodec messages while not conn.atEof(): var buf: seq[byte] diff --git a/waku/waku_filter_v2/protocol.nim b/waku/waku_filter_v2/protocol.nim index c3a4683f7..80f60fdd3 100644 --- a/waku/waku_filter_v2/protocol.nim +++ b/waku/waku_filter_v2/protocol.nim @@ -287,14 +287,20 @@ proc handleMessage*( waku_filter_handle_message_duration_seconds.observe(handleMessageDurationSec) proc initProtocolHandler(wf: WakuFilter) = - proc handler(conn: Connection, proto: string) {.async.} = + proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} = debug "filter subscribe request handler triggered", peerId = shortLog(conn.peerId), conn var response: FilterSubscribeResponse wf.peerRequestRateLimiter.checkUsageLimit(WakuFilterSubscribeCodec, conn): - let buf = await conn.readLp(int(DefaultMaxSubscribeSize)) + var buf: seq[byte] + try: + buf = await conn.readLp(int(DefaultMaxSubscribeSize)) + except LPStreamError: + error "failed to read stream in readLp", + remote_peer_id = conn.peerId, error = getCurrentExceptionMsg() + return waku_service_network_bytes.inc( amount = buf.len().int64, labelValues = [WakuFilterSubscribeCodec, "in"] @@ -302,14 +308,19 @@ proc initProtocolHandler(wf: WakuFilter) = let decodeRes = FilterSubscribeRequest.decode(buf) if decodeRes.isErr(): - error "Failed to decode filter subscribe request", + error "failed to decode filter subscribe request", peer_id = conn.peerId, err = decodeRes.error waku_filter_errors.inc(labelValues = [decodeRpcFailure]) return let request = decodeRes.value #TODO: toAPI() split here - response = await wf.handleSubscribeRequest(conn.peerId, request) + try: + response = await wf.handleSubscribeRequest(conn.peerId, request) + except CatchableError: + error "handleSubscribeRequest failed", + remote_peer_id = conn.peerId, err = getCurrentExceptionMsg() + return debug "sending filter subscribe response", peer_id = shortLog(conn.peerId), response = response @@ -322,7 +333,11 @@ proc initProtocolHandler(wf: WakuFilter) = statusDesc: some("filter request rejected due rate limit exceeded"), ) - await conn.writeLp(response.encode().buffer) #TODO: toRPC() separation here + try: + await conn.writeLp(response.encode().buffer) #TODO: toRPC() separation here + except LPStreamError: + error "failed to write stream in writeLp", + remote_peer_id = conn.peerId, error = getCurrentExceptionMsg() return wf.handler = handler @@ -355,8 +370,16 @@ proc new*( peerRequestRateLimiter: PerPeerRateLimiter(setting: rateLimitSetting), ) - proc peerEventHandler(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe.} = - wf.onPeerEventHandler(peerId, event) + proc peerEventHandler( + peerId: PeerId, event: PeerEvent + ): Future[void] {.gcsafe, async: (raises: [CancelledError]).} = + try: + await wf.onPeerEventHandler(peerId, event) + except CatchableError: + error "onPeerEventHandler failed", + remote_peer_id = shortLog(peerId), + event = event, + error = getCurrentExceptionMsg() peerManager.addExtPeerEventHandler(peerEventHandler, PeerEventKind.Left) diff --git a/waku/waku_lightpush/callbacks.nim b/waku/waku_lightpush/callbacks.nim index 3cfc3fe90..4b362e6bb 100644 --- a/waku/waku_lightpush/callbacks.nim +++ b/waku/waku_lightpush/callbacks.nim @@ -44,10 +44,10 @@ proc getRelayPushHandler*( ): Future[WakuLightPushResult] {.async.} = # append RLN proof let msgWithProof = checkAndGenerateRLNProof(rlnPeer, message).valueOr: - return lighpushErrorResult(OUT_OF_RLN_PROOF, error) + return lighpushErrorResult(LightPushErrorCode.OUT_OF_RLN_PROOF, error) (await wakuRelay.validateMessage(pubSubTopic, msgWithProof)).isOkOr: - return lighpushErrorResult(INVALID_MESSAGE_ERROR, $error) + return lighpushErrorResult(LightPushErrorCode.INVALID_MESSAGE, $error) let publishedResult = await wakuRelay.publish(pubsubTopic, msgWithProof) diff --git a/waku/waku_lightpush/client.nim b/waku/waku_lightpush/client.nim index 61f061cc9..e193627ac 100644 --- a/waku/waku_lightpush/client.nim +++ b/waku/waku_lightpush/client.nim @@ -36,7 +36,8 @@ proc sendPushRequest( let connection = (await wl.peerManager.dialPeer(peer, WakuLightPushCodec)).valueOr: waku_lightpush_v3_errors.inc(labelValues = [dialFailure]) return lighpushErrorResult( - NO_PEERS_TO_RELAY, dialFailure & ": " & $peer & " is not accessible" + LightPushErrorCode.NO_PEERS_TO_RELAY, + dialFailure & ": " & $peer & " is not accessible", ) await connection.writeLP(req.encode().buffer) @@ -45,9 +46,13 @@ proc sendPushRequest( try: buffer = await connection.readLp(DefaultMaxRpcSize.int) except LPStreamRemoteClosedError: +<<<<<<< HEAD error "Failed to read responose from peer", error = getCurrentExceptionMsg() if wl.peerManager.reputationManager.isSome: wl.peerManager.reputationManager.get().setReputation(peer.peerId, some(false)) +======= + error "Failed to read response from peer", error = getCurrentExceptionMsg() +>>>>>>> master return lightpushResultInternalError( "Failed to read response from peer: " & getCurrentExceptionMsg() ) @@ -60,7 +65,7 @@ proc sendPushRequest( return lightpushResultInternalError(decodeRpcFailure) if response.requestId != req.requestId and - response.statusCode != TOO_MANY_REQUESTS.uint32: + response.statusCode != LightPushErrorCode.TOO_MANY_REQUESTS: error "response failure, requestId mismatch", requestId = req.requestId, responseRequestId = response.requestId if wl.peerManager.reputationManager.isSome: @@ -121,6 +126,8 @@ proc publishToAny*( info "publishToAny", msg_hash = computeMessageHash(pubsubTopic, message).to0xHex let peer = wl.peerManager.selectPeer(WakuLightPushCodec).valueOr: # TODO: check if it is matches the situation - shall we distinguish client side missing peers from server side? - return lighpushErrorResult(NO_PEERS_TO_RELAY, "no suitable remote peers") + return lighpushErrorResult( + LightPushErrorCode.NO_PEERS_TO_RELAY, "no suitable remote peers" + ) return await wl.publish(some(pubSubTopic), message, eligibilityproof, peer) diff --git a/waku/waku_lightpush/common.nim b/waku/waku_lightpush/common.nim index 20c177e39..ecda5c365 100644 --- a/waku/waku_lightpush/common.nim +++ b/waku/waku_lightpush/common.nim @@ -5,7 +5,9 @@ import ../waku_core, ./rpc, ../waku_relay/protocol from ../waku_core/codecs import WakuLightPushCodec export WakuLightPushCodec +export LightPushStatusCode +<<<<<<< HEAD type LightpushStatusCode* = enum SUCCESS = uint32(200) BAD_REQUEST = uint32(400) @@ -18,6 +20,21 @@ type LightpushStatusCode* = enum SERVICE_NOT_AVAILABLE = uint32(503) OUT_OF_RLN_PROOF = uint32(504) NO_PEERS_TO_RELAY = uint32(505) +======= +const LightPushSuccessCode* = (SUCCESS: LightPushStatusCode(200)) + +const LightPushErrorCode* = ( + BAD_REQUEST: LightPushStatusCode(400), + PAYLOAD_TOO_LARGE: LightPushStatusCode(413), + INVALID_MESSAGE: LightPushStatusCode(420), + UNSUPPORTED_PUBSUB_TOPIC: LightPushStatusCode(421), + TOO_MANY_REQUESTS: LightPushStatusCode(429), + INTERNAL_SERVER_ERROR: LightPushStatusCode(500), + SERVICE_NOT_AVAILABLE: LightPushStatusCode(503), + OUT_OF_RLN_PROOF: LightPushStatusCode(504), + NO_PEERS_TO_RELAY: LightPushStatusCode(505), +) +>>>>>>> master type ErrorStatus* = tuple[code: LightpushStatusCode, desc: Option[string]] type WakuLightPushResult* = Result[uint32, ErrorStatus] @@ -29,25 +46,25 @@ type PushMessageHandler* = proc( const TooManyRequestsMessage* = "Request rejected due to too many requests" func isSuccess*(response: LightPushResponse): bool = - return response.statusCode == LightpushStatusCode.SUCCESS.uint32 + return response.statusCode == LightPushSuccessCode.SUCCESS func toPushResult*(response: LightPushResponse): WakuLightPushResult = if isSuccess(response): return ok(response.relayPeerCount.get(0)) else: - return err((response.statusCode.LightpushStatusCode, response.statusDesc)) + return err((response.statusCode, response.statusDesc)) func lightpushSuccessResult*(relayPeerCount: uint32): WakuLightPushResult = return ok(relayPeerCount) func lightpushResultInternalError*(msg: string): WakuLightPushResult = - return err((LightpushStatusCode.INTERNAL_SERVER_ERROR, some(msg))) + return err((LightPushErrorCode.INTERNAL_SERVER_ERROR, some(msg))) func lightpushResultBadRequest*(msg: string): WakuLightPushResult = - return err((LightpushStatusCode.BAD_REQUEST, some(msg))) + return err((LightPushErrorCode.BAD_REQUEST, some(msg))) func lightpushResultServiceUnavailable*(msg: string): WakuLightPushResult = - return err((LightpushStatusCode.SERVICE_NOT_AVAILABLE, some(msg))) + return err((LightPushErrorCode.SERVICE_NOT_AVAILABLE, some(msg))) func lighpushErrorResult*( statusCode: LightpushStatusCode, desc: Option[string] @@ -64,24 +81,22 @@ func mapPubishingErrorToPushResult*( ): WakuLightPushResult = case publishOutcome of NoTopicSpecified: - return err( - (LightpushStatusCode.INVALID_MESSAGE_ERROR, some("Empty topic, skipping publish")) - ) + return + err((LightPushErrorCode.INVALID_MESSAGE, some("Empty topic, skipping publish"))) of DuplicateMessage: - return err( - (LightpushStatusCode.INVALID_MESSAGE_ERROR, some("Dropping already-seen message")) - ) + return + err((LightPushErrorCode.INVALID_MESSAGE, some("Dropping already-seen message"))) of NoPeersToPublish: return err( ( - LightpushStatusCode.NO_PEERS_TO_RELAY, + LightPushErrorCode.NO_PEERS_TO_RELAY, some("No peers for topic, skipping publish"), ) ) of CannotGenerateMessageId: return err( ( - LightpushStatusCode.INTERNAL_SERVER_ERROR, + LightPushErrorCode.INTERNAL_SERVER_ERROR, some("Error generating message id, skipping publish"), ) ) diff --git a/waku/waku_lightpush/protocol.nim b/waku/waku_lightpush/protocol.nim index 1619b6bae..912d208b3 100644 --- a/waku/waku_lightpush/protocol.nim +++ b/waku/waku_lightpush/protocol.nim @@ -27,18 +27,66 @@ type WakuLightPush* = ref object of LPProtocol peerManager*: PeerManager pushHandler*: PushMessageHandler requestRateLimiter*: RequestRateLimiter - sharding: Sharding + autoSharding: Option[Sharding] + +proc handleRequest( + wl: WakuLightPush, peerId: PeerId, pushRequest: LightpushRequest +): Future[WakuLightPushResult] {.async.} = + let pubsubTopic = pushRequest.pubSubTopic.valueOr: + if wl.autoSharding.isNone(): + let msg = "Pubsub topic must be specified when static sharding is enabled" + error "lightpush request handling error", error = msg + return WakuLightPushResult.err( + (code: LightPushErrorCode.INVALID_MESSAGE, desc: some(msg)) + ) + + let parsedTopic = NsContentTopic.parse(pushRequest.message.contentTopic).valueOr: + let msg = "Invalid content-topic:" & $error + error "lightpush request handling error", error = msg + return WakuLightPushResult.err( + (code: LightPushErrorCode.INVALID_MESSAGE, desc: some(msg)) + ) + + wl.autoSharding.get().getShard(parsedTopic).valueOr: + let msg = "Auto-sharding error: " & error + error "lightpush request handling error", error = msg + return WakuLightPushResult.err( + (code: LightPushErrorCode.INTERNAL_SERVER_ERROR, desc: some(msg)) + ) + + # ensure checking topic will not cause error at gossipsub level + if pubsubTopic.isEmptyOrWhitespace(): + let msg = "topic must not be empty" + error "lightpush request handling error", error = msg + return + WakuLightPushResult.err((code: LightPushErrorCode.BAD_REQUEST, desc: some(msg))) + + waku_lightpush_v3_messages.inc(labelValues = ["PushRequest"]) + + let msg_hash = pubsubTopic.computeMessageHash(pushRequest.message).to0xHex() + notice "handling lightpush request", + my_peer_id = wl.peerManager.switch.peerInfo.peerId, + peer_id = peerId, + requestId = pushRequest.requestId, + pubsubTopic = pushRequest.pubsubTopic, + msg_hash = msg_hash, + receivedTime = getNowInNanosecondTime() + + let res = (await wl.pushHandler(peerId, pubsubTopic, pushRequest.message)).valueOr: + return err((code: error.code, desc: error.desc)) + return ok(res) proc handleRequest*( wl: WakuLightPush, peerId: PeerId, buffer: seq[byte] ): Future[LightPushResponse] {.async.} = - let reqDecodeRes = LightpushRequest.decode(buffer) - var isSuccess = false - var pushResponse: LightpushResponse - - if reqDecodeRes.isErr(): - pushResponse = LightpushResponse( + let pushRequest = LightPushRequest.decode(buffer).valueOr: + let desc = decodeRpcFailure & ": " & $error + error "failed to push message", error = desc + let errorCode = LightPushErrorCode.BAD_REQUEST + waku_lightpush_v3_errors.inc(labelValues = [$errorCode]) + return LightPushResponse( requestId: "N/A", # due to decode failure we don't know requestId +<<<<<<< HEAD statusCode: LightpushStatusCode.BAD_REQUEST.uint32, statusDesc: some(decodeRpcFailure & ": " & $reqDecodeRes.error), ) @@ -148,47 +196,70 @@ proc handleRequest*( some(handleRes.get()) else: none[uint32](), +======= + statusCode: errorCode, + statusDesc: some(desc), +>>>>>>> master ) - if not isSuccess: - waku_lightpush_v3_errors.inc( - labelValues = [pushResponse.statusDesc.valueOr("unknown")] + let relayPeerCount = (await handleRequest(wl, peerId, pushRequest)).valueOr: + let desc = error.desc + waku_lightpush_v3_errors.inc(labelValues = [$error.code]) + error "failed to push message", error = desc + return LightPushResponse( + requestId: pushRequest.requestId, statusCode: error.code, statusDesc: desc ) - error "failed to push message", error = pushResponse.statusDesc - return pushResponse + + return LightPushResponse( + requestId: pushRequest.requestId, + statusCode: LightPushSuccessCode.SUCCESS, + statusDesc: none[string](), + relayPeerCount: some(relayPeerCount), + ) proc initProtocolHandler(wl: WakuLightPush) = - proc handle(conn: Connection, proto: string) {.async.} = - var rpc: LightpushResponse + proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} = + var rpc: LightPushResponse wl.requestRateLimiter.checkUsageLimit(WakuLightPushCodec, conn): - let buffer = await conn.readLp(DefaultMaxRpcSize) + var buffer: seq[byte] + try: + buffer = await conn.readLp(DefaultMaxRpcSize) + except LPStreamError: + error "lightpush read stream failed", error = getCurrentExceptionMsg() + return waku_service_network_bytes.inc( amount = buffer.len().int64, labelValues = [WakuLightPushCodec, "in"] ) - rpc = await handleRequest(wl, conn.peerId, buffer) + try: + rpc = await handleRequest(wl, conn.peerId, buffer) + except CatchableError: + error "lightpush failed handleRequest", error = getCurrentExceptionMsg() do: debug "lightpush request rejected due rate limit exceeded", peerId = conn.peerId, limit = $wl.requestRateLimiter.setting rpc = static( - LightpushResponse( + LightPushResponse( ## We will not copy and decode RPC buffer from stream only for requestId ## in reject case as it is comparably too expensive and opens possible ## attack surface requestId: "N/A", - statusCode: LightpushStatusCode.TOO_MANY_REQUESTS.uint32, + statusCode: LightPushErrorCode.TOO_MANY_REQUESTS, statusDesc: some(TooManyRequestsMessage), ) ) - await conn.writeLp(rpc.encode().buffer) + try: + await conn.writeLp(rpc.encode().buffer) + except LPStreamError: + error "lightpush write stream failed", error = getCurrentExceptionMsg() - ## For lightpush might not worth to measure outgoing trafic as it is only - ## small respones about success/failure + ## For lightpush might not worth to measure outgoing traffic as it is only + ## small response about success/failure - wl.handler = handle + wl.handler = handler wl.codec = WakuLightPushCodec proc new*( @@ -196,7 +267,7 @@ proc new*( peerManager: PeerManager, rng: ref rand.HmacDrbgContext, pushHandler: PushMessageHandler, - sharding: Sharding, + autoSharding: Option[Sharding], rateLimitSetting: Option[RateLimitSetting] = none[RateLimitSetting](), ): T = let wl = WakuLightPush( @@ -204,7 +275,7 @@ proc new*( peerManager: peerManager, pushHandler: pushHandler, requestRateLimiter: newRequestRateLimiter(rateLimitSetting), - sharding: sharding, + autoSharding: autoSharding, ) wl.initProtocolHandler() setServiceLimitMetric(WakuLightpushCodec, rateLimitSetting) diff --git a/waku/waku_lightpush/rpc.nim b/waku/waku_lightpush/rpc.nim index 929752182..3e370e25d 100644 --- a/waku/waku_lightpush/rpc.nim +++ b/waku/waku_lightpush/rpc.nim @@ -4,6 +4,10 @@ import std/options import ../waku_core import ../incentivization/rpc +type LightPushStatusCode* = distinct uint32 +proc `==`*(a, b: LightPushStatusCode): bool {.borrow.} +proc `$`*(code: LightPushStatusCode): string {.borrow.} + type LightpushRequest* = object requestId*: string @@ -13,6 +17,6 @@ type LightPushResponse* = object requestId*: string - statusCode*: uint32 + statusCode*: LightPushStatusCode statusDesc*: Option[string] relayPeerCount*: Option[uint32] diff --git a/waku/waku_lightpush/rpc_codec.nim b/waku/waku_lightpush/rpc_codec.nim index b9faac61c..05c7fdfd2 100644 --- a/waku/waku_lightpush/rpc_codec.nim +++ b/waku/waku_lightpush/rpc_codec.nim @@ -54,7 +54,7 @@ proc encode*(rpc: LightPushResponse): ProtoBuffer = var pb = initProtoBuffer() pb.write3(1, rpc.requestId) - pb.write3(10, rpc.statusCode) + pb.write3(10, rpc.statusCode.uint32) pb.write3(11, rpc.statusDesc) pb.write3(12, rpc.relayPeerCount) pb.finish3() @@ -75,7 +75,7 @@ proc decode*(T: type LightPushResponse, buffer: seq[byte]): ProtobufResult[T] = if not ?pb.getField(10, statusCode): return err(ProtobufError.missingRequiredField("status_code")) else: - rpc.statusCode = statusCode + rpc.statusCode = statusCode.LightPushStatusCode var statusDesc: string if not ?pb.getField(11, statusDesc): diff --git a/waku/waku_lightpush_legacy/protocol.nim b/waku/waku_lightpush_legacy/protocol.nim index 5de25ead9..75bededaf 100644 --- a/waku/waku_lightpush_legacy/protocol.nim +++ b/waku/waku_lightpush_legacy/protocol.nim @@ -45,7 +45,8 @@ proc handleRequest*( let msg_hash = pubsubTopic.computeMessageHash(message).to0xHex() waku_lightpush_messages.inc(labelValues = ["PushRequest"]) - notice "handling lightpush request", + notice "handling legacy lightpush request", + my_peer_id = wl.peerManager.switch.peerInfo.peerId, peer_id = peerId, requestId = requestId, pubsubTopic = pubsubTopic, @@ -64,16 +65,24 @@ proc handleRequest*( return rpc proc initProtocolHandler(wl: WakuLegacyLightPush) = - proc handle(conn: Connection, proto: string) {.async.} = + proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} = var rpc: PushRPC wl.requestRateLimiter.checkUsageLimit(WakuLegacyLightPushCodec, conn): - let buffer = await conn.readLp(DefaultMaxRpcSize) + var buffer: seq[byte] + try: + buffer = await conn.readLp(DefaultMaxRpcSize) + except LPStreamError: + error "lightpush legacy read stream failed", error = getCurrentExceptionMsg() + return waku_service_network_bytes.inc( amount = buffer.len().int64, labelValues = [WakuLegacyLightPushCodec, "in"] ) - rpc = await handleRequest(wl, conn.peerId, buffer) + try: + rpc = await handleRequest(wl, conn.peerId, buffer) + except CatchableError: + error "lightpush legacy handleRequest failed", error = getCurrentExceptionMsg() do: debug "lightpush request rejected due rate limit exceeded", peerId = conn.peerId, limit = $wl.requestRateLimiter.setting @@ -89,12 +98,15 @@ proc initProtocolHandler(wl: WakuLegacyLightPush) = ) ) - await conn.writeLp(rpc.encode().buffer) + try: + await conn.writeLp(rpc.encode().buffer) + except LPStreamError: + error "lightpush legacy write stream failed", error = getCurrentExceptionMsg() ## For lightpush might not worth to measure outgoing trafic as it is only ## small respones about success/failure - wl.handler = handle + wl.handler = handler wl.codec = WakuLegacyLightPushCodec proc new*( diff --git a/waku/waku_metadata/protocol.nim b/waku/waku_metadata/protocol.nim index 8e4640ce7..75f021dbe 100644 --- a/waku/waku_metadata/protocol.nim +++ b/waku/waku_metadata/protocol.nim @@ -29,7 +29,7 @@ proc respond( m: WakuMetadata, conn: Connection ): Future[Result[void, string]] {.async, gcsafe.} = let response = - WakuMetadataResponse(clusterId: some(m.clusterId), shards: toSeq(m.shards)) + WakuMetadataResponse(clusterId: some(m.clusterId.uint32), shards: toSeq(m.shards)) let res = catch: await conn.writeLP(response.encode().buffer) @@ -70,7 +70,11 @@ proc request*( return ok(response) proc initProtocolHandler(m: WakuMetadata) = - proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} = + proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} = + defer: + # close, no data is expected + await conn.closeWithEof() + let res = catch: await conn.readLp(RpcResponseMaxBytes) let buffer = res.valueOr: @@ -88,12 +92,13 @@ proc initProtocolHandler(m: WakuMetadata) = localShards = m.shards, peer = conn.peerId - discard await m.respond(conn) + try: + discard await m.respond(conn) + except CatchableError: + error "Failed to respond to WakuMetadata request", + error = getCurrentExceptionMsg() - # close, no data is expected - await conn.closeWithEof() - - m.handler = handle + m.handler = handler m.codec = WakuMetadataCodec proc new*( diff --git a/waku/waku_peer_exchange/protocol.nim b/waku/waku_peer_exchange/protocol.nim index 2732cb1c1..14de77c67 100644 --- a/waku/waku_peer_exchange/protocol.nim +++ b/waku/waku_peer_exchange/protocol.nim @@ -243,7 +243,7 @@ proc updatePxEnrCache(wpx: WakuPeerExchange) {.async.} = wpx.populateEnrCache() proc initProtocolHandler(wpx: WakuPeerExchange) = - proc handler(conn: Connection, proto: string) {.async, gcsafe, closure.} = + proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} = var buffer: seq[byte] wpx.requestRateLimiter.checkUsageLimit(WakuPeerExchangeCodec, conn): try: @@ -253,9 +253,13 @@ proc initProtocolHandler(wpx: WakuPeerExchange) = waku_px_errors.inc(labelValues = [exc.msg]) ( - await wpx.respondError( - PeerExchangeResponseStatusCode.BAD_REQUEST, some(exc.msg), conn - ) + try: + await wpx.respondError( + PeerExchangeResponseStatusCode.BAD_REQUEST, some(exc.msg), conn + ) + except CatchableError: + error "could not send error response", error = getCurrentExceptionMsg() + return ).isOkOr: error "Failed to respond with BAD_REQUEST:", error = $error return @@ -266,26 +270,41 @@ proc initProtocolHandler(wpx: WakuPeerExchange) = error "Failed to decode PeerExchange request", error = $decBuf.error ( - await wpx.respondError( - PeerExchangeResponseStatusCode.BAD_REQUEST, some($decBuf.error), conn - ) + try: + await wpx.respondError( + PeerExchangeResponseStatusCode.BAD_REQUEST, some($decBuf.error), conn + ) + except CatchableError: + error "could not send error response decode", + error = getCurrentExceptionMsg() + return ).isOkOr: error "Failed to respond with BAD_REQUEST:", error = $error return let enrs = wpx.getEnrsFromCache(decBuf.get().request.numPeers) debug "peer exchange request received", enrs = $enrs - (await wpx.respond(enrs, conn)).isErrOr: - waku_px_peers_sent.inc(enrs.len().int64()) + + try: + (await wpx.respond(enrs, conn)).isErrOr: + waku_px_peers_sent.inc(enrs.len().int64()) + except CatchableError: + error "could not send response", error = getCurrentExceptionMsg() do: - ( - await wpx.respondError( - PeerExchangeResponseStatusCode.TOO_MANY_REQUESTS, none(string), conn - ) - ).isOkOr: - error "Failed to respond with TOO_MANY_REQUESTS:", error = $error - # close, no data is expected - await conn.closeWithEof() + defer: + # close, no data is expected + await conn.closeWithEof() + + try: + ( + await wpx.respondError( + PeerExchangeResponseStatusCode.TOO_MANY_REQUESTS, none(string), conn + ) + ).isOkOr: + error "Failed to respond with TOO_MANY_REQUESTS:", error = $error + except CatchableError: + error "could not send error response", error = getCurrentExceptionMsg() + return wpx.handler = handler wpx.codec = WakuPeerExchangeCodec diff --git a/waku/waku_relay/protocol.nim b/waku/waku_relay/protocol.nim index daaf056b7..18d60dcef 100644 --- a/waku/waku_relay/protocol.nim +++ b/waku/waku_relay/protocol.nim @@ -131,6 +131,8 @@ type # a map of validators to error messages to return when validation fails topicValidator: Table[PubsubTopic, ValidatorHandler] # map topic with its assigned validator within pubsub + topicHandlers: Table[PubsubTopic, TopicHandler] + # map topic with the TopicHandler proc in charge of attending topic's incoming message events publishObservers: seq[PublishObserver] topicsHealth*: Table[string, TopicHealth] onTopicHealthChange*: TopicHealthChangeHandler @@ -144,7 +146,7 @@ type PublishOutcome* {.pure.} = enum CannotGenerateMessageId proc initProtocolHandler(w: WakuRelay) = - proc handler(conn: Connection, proto: string) {.async.} = + proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} = ## main protocol handler that gets triggered on every ## connection for a protocol string ## e.g. ``/wakusub/0.0.1``, etc... @@ -330,6 +332,13 @@ proc getPubSubPeersInMesh*( ## Returns the list of PubSubPeers in a mesh defined by the passed pubsub topic. ## The 'mesh' atribute is defined in the GossipSub ref object. + # If pubsubTopic is empty, we return all peers in mesh for any pubsub topic + if pubsubTopic == "": + var allPeers = initHashSet[PubSubPeer]() + for topic, topicMesh in w.mesh.pairs: + allPeers = allPeers.union(topicMesh) + return ok(allPeers) + if not w.mesh.hasKey(pubsubTopic): debug "getPubSubPeersInMesh - there is no mesh peer for the given pubsub topic", pubsubTopic = pubsubTopic @@ -346,7 +355,7 @@ proc getPubSubPeersInMesh*( return ok(peers) proc getPeersInMesh*( - w: WakuRelay, pubsubTopic: PubsubTopic + w: WakuRelay, pubsubTopic: PubsubTopic = "" ): Result[seq[PeerId], string] = ## Returns the list of peerIds in a mesh defined by the passed pubsub topic. ## The 'mesh' atribute is defined in the GossipSub ref object. @@ -488,13 +497,11 @@ proc validateMessage*( return ok() -proc subscribe*( - w: WakuRelay, pubsubTopic: PubsubTopic, handler: WakuRelayHandler -): TopicHandler = +proc subscribe*(w: WakuRelay, pubsubTopic: PubsubTopic, handler: WakuRelayHandler) = debug "subscribe", pubsubTopic = pubsubTopic # We need to wrap the handler since gossipsub doesnt understand WakuMessage - let wrappedHandler = proc( + let topicHandler = proc( pubsubTopic: string, data: seq[byte] ): Future[void] {.gcsafe, raises: [].} = let decMsg = WakuMessage.decode(data) @@ -526,9 +533,9 @@ proc subscribe*( w.topicParams[pubsubTopic] = TopicParameters # subscribe to the topic with our wrapped handler - procCall GossipSub(w).subscribe(pubsubTopic, wrappedHandler) + procCall GossipSub(w).subscribe(pubsubTopic, topicHandler) - return wrappedHandler + w.topicHandlers[pubsubTopic] = topicHandler proc unsubscribeAll*(w: WakuRelay, pubsubTopic: PubsubTopic) = ## Unsubscribe all handlers on this pubsub topic @@ -537,35 +544,32 @@ proc unsubscribeAll*(w: WakuRelay, pubsubTopic: PubsubTopic) = procCall GossipSub(w).unsubscribeAll(pubsubTopic) w.topicValidator.del(pubsubTopic) + w.topicHandlers.del(pubsubTopic) proc unsubscribe*(w: WakuRelay, pubsubTopic: PubsubTopic) = if not w.topicValidator.hasKey(pubsubTopic): error "unsubscribe no validator for this topic", pubsubTopic return - if pubsubtopic notin Pubsub(w).topics: + if not w.topicHandlers.hasKey(pubsubTopic): error "not subscribed to the given topic", pubsubTopic return - var topicHandlerSeq: seq[TopicHandler] + var topicHandler: TopicHandler var topicValidator: ValidatorHandler try: - topicHandlerSeq = Pubsub(w).topics[pubsubTopic] - if topicHandlerSeq.len == 0: - error "unsubscribe no handler for this topic", pubsubTopic - return + topicHandler = w.topicHandlers[pubsubTopic] topicValidator = w.topicValidator[pubsubTopic] except KeyError: error "exception in unsubscribe", pubsubTopic, error = getCurrentExceptionMsg() return - let topicHandler = topicHandlerSeq[0] - debug "unsubscribe", pubsubTopic - procCall GossipSub(w).unsubscribe($pubsubTopic, topicHandler) - ## TODO: uncomment the following line when https://github.com/vacp2p/nim-libp2p/pull/1356 - ## is available in a nim-libp2p release. - # procCall GossipSub(w).removeValidator(pubsubTopic, topicValidator) + procCall GossipSub(w).unsubscribe(pubsubTopic, topicHandler) + procCall GossipSub(w).removeValidator(pubsubTopic, topicValidator) + + w.topicValidator.del(pubsubTopic) + w.topicHandlers.del(pubsubTopic) proc publish*( w: WakuRelay, pubsubTopic: PubsubTopic, wakuMessage: WakuMessage diff --git a/waku/waku_rendezvous/protocol.nim b/waku/waku_rendezvous/protocol.nim index 9f1aa69cb..a26eaca6f 100644 --- a/waku/waku_rendezvous/protocol.nim +++ b/waku/waku_rendezvous/protocol.nim @@ -123,7 +123,7 @@ proc batchRequest*( conn let reqCatch = catch: - await self.rendezvous.request(namespace, count, peers) + await self.rendezvous.request(Opt.some(namespace), count, peers) for conn in conns: await conn.close() diff --git a/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim b/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim index 600291ecf..4f2fb5228 100644 --- a/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim +++ b/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim @@ -30,23 +30,29 @@ logScope: # using the when predicate does not work within the contract macro, hence need to dupe contract(WakuRlnContract): # this serves as an entrypoint into the rln membership set - proc register(idCommitment: UInt256, userMessageLimit: UInt32) + proc register( + idCommitment: UInt256, userMessageLimit: UInt32, idCommitmentsToErase: seq[UInt256] + ) + # Initializes the implementation contract (only used in unit tests) proc initialize(maxMessageLimit: UInt256) - # this event is raised when a new member is registered - proc MemberRegistered(rateCommitment: UInt256, index: UInt32) {.event.} + # this event is emitted when a new member is registered + proc MembershipRegistered( + idCommitment: UInt256, membershipRateLimit: UInt256, index: UInt32 + ) {.event.} + # this function denotes existence of a given user - proc memberExists(idCommitment: UInt256): UInt256 {.view.} + proc isInMembershipSet(idCommitment: Uint256): bool {.view.} # this constant describes the next index of a new member - proc commitmentIndex(): UInt256 {.view.} + proc nextFreeIndex(): UInt256 {.view.} # this constant describes the block number this contract was deployed on proc deployedBlockNumber(): UInt256 {.view.} # this constant describes max message limit of rln contract - proc MAX_MESSAGE_LIMIT(): UInt256 {.view.} - # this function returns the merkleProof for a given index - # proc merkleProofElements(index: UInt40): seq[byte] {.view.} - # this function returns the merkle root - proc root(): UInt256 {.view.} + proc maxMembershipRateLimit(): UInt256 {.view.} + # this function returns the merkleProof for a given index + # proc getMerkleProof(index: EthereumUInt40): seq[array[32, byte]] {.view.} + # this function returns the Merkle root + proc root(): Uint256 {.view.} type WakuRlnContractWithSender = Sender[WakuRlnContract] @@ -57,7 +63,7 @@ type ethRpc*: Option[Web3] wakuRlnContract*: Option[WakuRlnContractWithSender] registrationTxHash*: Option[TxHash] - chainId*: uint + chainId*: UInt256 keystorePath*: Option[string] keystorePassword*: Option[string] registrationHandler*: Option[RegistrationHandler] @@ -67,11 +73,7 @@ type proc setMetadata*( g: OnchainGroupManager, lastProcessedBlock = none(BlockNumber) ): GroupManagerResult[void] = - let normalizedBlock = - if lastProcessedBlock.isSome(): - lastProcessedBlock.get() - else: - g.latestProcessedBlock + let normalizedBlock = lastProcessedBlock.get(g.latestProcessedBlock) try: let metadataSetRes = g.rlnInstance.setMetadata( RlnMetadata( @@ -87,14 +89,68 @@ proc setMetadata*( return err("failed to persist rln metadata: " & getCurrentExceptionMsg()) return ok() +proc sendEthCallWithChainId( + ethRpc: Web3, + functionSignature: string, + fromAddress: Address, + toAddress: Address, + chainId: UInt256, +): Future[Result[UInt256, string]] {.async.} = + ## Workaround for web3 chainId=null issue on some networks (e.g., linea-sepolia) + ## Makes contract calls with explicit chainId for view functions with no parameters + let functionHash = + keccak256.digest(functionSignature.toOpenArrayByte(0, functionSignature.len - 1)) + let functionSelector = functionHash.data[0 .. 3] + let dataSignature = "0x" & functionSelector.mapIt(it.toHex(2)).join("") + + var tx: TransactionArgs + tx.`from` = Opt.some(fromAddress) + tx.to = Opt.some(toAddress) + tx.value = Opt.some(0.u256) + tx.data = Opt.some(byteutils.hexToSeqByte(dataSignature)) + tx.chainId = Opt.some(chainId) + + let resultBytes = await ethRpc.provider.eth_call(tx, "latest") + if resultBytes.len == 0: + return err("No result returned for function call: " & functionSignature) + return ok(UInt256.fromBytesBE(resultBytes)) + +proc sendEthCallWithParams( + ethRpc: Web3, + functionSignature: string, + params: seq[byte], + fromAddress: Address, + toAddress: Address, + chainId: UInt256, +): Future[Result[seq[byte], string]] {.async.} = + ## Workaround for web3 chainId=null issue with parameterized contract calls + let functionHash = + keccak256.digest(functionSignature.toOpenArrayByte(0, functionSignature.len - 1)) + let functionSelector = functionHash.data[0 .. 3] + let callData = functionSelector & params + + var tx: TransactionArgs + tx.`from` = Opt.some(fromAddress) + tx.to = Opt.some(toAddress) + tx.value = Opt.some(0.u256) + tx.data = Opt.some(callData) + tx.chainId = Opt.some(chainId) + + let resultBytes = await ethRpc.provider.eth_call(tx, "latest") + return ok(resultBytes) + proc fetchMerkleProofElements*( g: OnchainGroupManager ): Future[Result[seq[byte], string]] {.async.} = try: + # let merkleRootInvocation = g.wakuRlnContract.get().root() + # let merkleRoot = await merkleRootInvocation.call() + # The above code is not working with the latest web3 version due to chainId being null (specifically on linea-sepolia) + # TODO: find better solution than this custom sendEthCallWithChainId call let membershipIndex = g.membershipIndex.get() let index40 = stuint(membershipIndex, 40) - let methodSig = "merkleProofElements(uint40)" + let methodSig = "getMerkleProof(uint40)" let methodIdDigest = keccak.keccak256.digest(methodSig) let methodId = methodIdDigest.data[0 .. 3] @@ -111,6 +167,7 @@ proc fetchMerkleProofElements*( var tx: TransactionArgs tx.to = Opt.some(fromHex(Address, g.ethContractAddress)) tx.data = Opt.some(callData) + tx.chainId = Opt.some(g.chainId) # Explicitly set the chain ID let responseBytes = await g.ethRpc.get().provider.eth_call(tx, "latest") @@ -123,8 +180,17 @@ proc fetchMerkleRoot*( g: OnchainGroupManager ): Future[Result[UInt256, string]] {.async.} = try: - let merkleRootInvocation = g.wakuRlnContract.get().root() - let merkleRoot = await merkleRootInvocation.call() + let merkleRoot = ( + await sendEthCallWithChainId( + ethRpc = g.ethRpc.get(), + functionSignature = "root()", + fromAddress = g.ethRpc.get().defaultAccount, + toAddress = fromHex(Address, g.ethContractAddress), + chainId = g.chainId, + ) + ).valueOr: + error "Failed to fetch Merkle root", error = $error + return err("Failed to fetch merkle root: " & $error) return ok(merkleRoot) except CatchableError: error "Failed to fetch Merkle root", error = getCurrentExceptionMsg() @@ -151,6 +217,7 @@ proc updateRoots*(g: OnchainGroupManager): Future[bool] {.async.} = return false let merkleRoot = UInt256ToField(rootRes.get()) + if g.validRoots.len == 0: g.validRoots.addLast(merkleRoot) return true @@ -183,8 +250,26 @@ proc trackRootChanges*(g: OnchainGroupManager) {.async: (raises: [CatchableError error "Failed to fetch Merkle proof", error = proofResult.error g.merkleProofCache = proofResult.get() - # also need update registerd membership - let memberCount = cast[int64](await wakuRlnContract.commitmentIndex().call()) + # also need to update registered membership + # g.rlnRelayMaxMessageLimit = + # cast[uint64](await wakuRlnContract.nextFreeIndex().call()) + # The above code is not working with the latest web3 version due to chainId being null (specifically on linea-sepolia) + # TODO: find better solution than this custom sendEthCallWithChainId call + let nextFreeIndex = await sendEthCallWithChainId( + ethRpc = ethRpc, + functionSignature = "nextFreeIndex()", + fromAddress = ethRpc.defaultAccount, + toAddress = fromHex(Address, g.ethContractAddress), + chainId = g.chainId, + ) + + if nextFreeIndex.isErr(): + error "Failed to fetch next free index", error = nextFreeIndex.error + raise newException( + CatchableError, "Failed to fetch next free index: " & nextFreeIndex.error + ) + + let memberCount = cast[int64](nextFreeIndex.get()) waku_rln_number_registered_memberships.set(float64(memberCount)) await sleepAsync(rpcDelay) @@ -219,15 +304,19 @@ method register*( var gasPrice: int g.retryWrapper(gasPrice, "Failed to get gas price"): int(await ethRpc.provider.eth_gasPrice()) * 2 + let idCommitmentHex = identityCredential.idCommitment.inHex() + debug "identityCredential idCommitmentHex", idCommitment = idCommitmentHex let idCommitment = identityCredential.idCommitment.toUInt256() - + let idCommitmentsToErase: seq[UInt256] = @[] debug "registering the member", - idCommitment = idCommitment, userMessageLimit = userMessageLimit + idCommitment = idCommitment, + userMessageLimit = userMessageLimit, + idCommitmentsToErase = idCommitmentsToErase var txHash: TxHash g.retryWrapper(txHash, "Failed to register the member"): - await wakuRlnContract.register(idCommitment, userMessageLimit.stuint(32)).send( - gasPrice = gasPrice - ) + await wakuRlnContract + .register(idCommitment, userMessageLimit.stuint(32), idCommitmentsToErase) + .send(gasPrice = gasPrice) # wait for the transaction to be mined var tsReceipt: ReceiptObject @@ -239,24 +328,30 @@ method register*( # TODO: make this robust. search within the event list for the event debug "ts receipt", receipt = tsReceipt[] - if tsReceipt.status.isNone() or tsReceipt.status.get() != 1.Quantity: - raise newException(ValueError, "register: transaction failed") + if tsReceipt.status.isNone(): + raise newException(ValueError, "Transaction failed: status is None") + if tsReceipt.status.get() != 1.Quantity: + raise newException( + ValueError, "Transaction failed with status: " & $tsReceipt.status.get() + ) - let firstTopic = tsReceipt.logs[0].topics[0] - # the hash of the signature of MemberRegistered(uint256,uint32) event is equal to the following hex value - if firstTopic != - cast[FixedBytes[32]](keccak.keccak256.digest("MemberRegistered(uint256,uint32)").data): + ## Extract MembershipRegistered event from transaction logs (third event) + let thirdTopic = tsReceipt.logs[2].topics[0] + debug "third topic", thirdTopic = thirdTopic + if thirdTopic != + cast[FixedBytes[32]](keccak.keccak256.digest( + "MembershipRegistered(uint256,uint256,uint32)" + ).data): raise newException(ValueError, "register: unexpected event signature") - # the arguments of the raised event i.e., MemberRegistered are encoded inside the data field - # data = rateCommitment encoded as 256 bits || index encoded as 32 bits - let arguments = tsReceipt.logs[0].data + ## Parse MembershipRegistered event data: rateCommitment(256) || membershipRateLimit(256) || index(32) + let arguments = tsReceipt.logs[2].data debug "tx log data", arguments = arguments let - # In TX log data, uints are encoded in big endian - membershipIndex = UInt256.fromBytesBE(arguments[32 ..^ 1]) + ## Extract membership index from transaction log data (big endian) + membershipIndex = UInt256.fromBytesBE(arguments[64 .. 95]) - debug "parsed membershipIndex", membershipIndex + trace "parsed membershipIndex", membershipIndex g.userMessageLimit = some(userMessageLimit) g.membershipIndex = some(membershipIndex.toMembershipIndex()) g.idCredentials = some(identityCredential) @@ -372,7 +467,7 @@ method generateProof*( var proofValue = cast[ptr array[320, byte]](output_witness_buffer.`ptr`) let proofBytes: array[320, byte] = proofValue[] - ## parse the proof as [ proof<128> | root<32> | external_nullifier<32> | share_x<32> | share_y<32> | nullifier<32> ] + ## Parse the proof as [ proof<128> | root<32> | external_nullifier<32> | share_x<32> | share_y<32> | nullifier<32> ] let proofOffset = 128 rootOffset = proofOffset + 32 @@ -414,9 +509,7 @@ method generateProof*( return ok(output) method verifyProof*( - g: OnchainGroupManager, # verifier context - input: seq[byte], # raw message data (signal) - proof: RateLimitProof, # proof received from the peer + g: OnchainGroupManager, input: seq[byte], proof: RateLimitProof ): GroupManagerResult[bool] {.gcsafe, raises: [].} = ## -- Verifies an RLN rate-limit proof against the set of valid Merkle roots -- @@ -485,9 +578,9 @@ method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.} let ethRpc: Web3 = (await establishConnection(g)).valueOr: return err("failed to connect to Ethereum clients: " & $error) - var fetchedChainId: uint + var fetchedChainId: UInt256 g.retryWrapper(fetchedChainId, "Failed to get the chain id"): - uint(await ethRpc.provider.eth_chainId()) + await ethRpc.provider.eth_chainId() # Set the chain id if g.chainId == 0: @@ -539,11 +632,31 @@ method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.} g.membershipIndex = some(keystoreCred.treeIndex) g.userMessageLimit = some(keystoreCred.userMessageLimit) # now we check on the contract if the commitment actually has a membership + let idCommitmentBytes = keystoreCred.identityCredential.idCommitment + let idCommitmentUInt256 = keystoreCred.identityCredential.idCommitment.toUInt256() + let idCommitmentHex = idCommitmentBytes.inHex() + debug "Keystore idCommitment in bytes", idCommitmentBytes = idCommitmentBytes + debug "Keystore idCommitment in UInt256 ", idCommitmentUInt256 = idCommitmentUInt256 + debug "Keystore idCommitment in hex ", idCommitmentHex = idCommitmentHex + let idCommitment = idCommitmentUInt256 try: - let membershipExists = await wakuRlnContract - .memberExists(keystoreCred.identityCredential.idCommitment.toUInt256()) - .call() - if membershipExists == 0: + let commitmentBytes = keystoreCred.identityCredential.idCommitment + let params = commitmentBytes.reversed() + let resultBytes = await sendEthCallWithParams( + ethRpc = g.ethRpc.get(), + functionSignature = "isInMembershipSet(uint256)", + params = params, + fromAddress = ethRpc.defaultAccount, + toAddress = contractAddress, + chainId = g.chainId, + ) + if resultBytes.isErr(): + return err("Failed to check membership: " & resultBytes.error) + let responseBytes = resultBytes.get() + let membershipExists = responseBytes.len == 32 and responseBytes[^1] == 1'u8 + + debug "membershipExists", membershipExists = membershipExists + if membershipExists == false: return err("the commitment does not have a membership") except CatchableError: return err("failed to check if the commitment has a membership") @@ -555,13 +668,23 @@ method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.} warn "could not initialize with persisted rln metadata" elif metadataGetOptRes.get().isSome(): let metadata = metadataGetOptRes.get().get() - if metadata.chainId != uint(g.chainId): + if metadata.chainId != g.chainId: return err("persisted data: chain id mismatch") if metadata.contractAddress != g.ethContractAddress.toLower(): return err("persisted data: contract address mismatch") - g.rlnRelayMaxMessageLimit = - cast[uint64](await wakuRlnContract.MAX_MESSAGE_LIMIT().call()) + let maxMembershipRateLimit = ( + await sendEthCallWithChainId( + ethRpc = ethRpc, + functionSignature = "maxMembershipRateLimit()", + fromAddress = ethRpc.defaultAccount, + toAddress = contractAddress, + chainId = g.chainId, + ) + ).valueOr: + return err("Failed to fetch max membership rate limit: " & $error) + + g.rlnRelayMaxMessageLimit = cast[uint64](maxMembershipRateLimit) proc onDisconnect() {.async.} = error "Ethereum client disconnected" diff --git a/waku/waku_rln_relay/protocol_types.nim b/waku/waku_rln_relay/protocol_types.nim index c6f52e00b..867878886 100644 --- a/waku/waku_rln_relay/protocol_types.nim +++ b/waku/waku_rln_relay/protocol_types.nim @@ -131,6 +131,13 @@ proc encode*(nsp: RateLimitProof): ProtoBuffer = output.finish3() return output +func encode*(x: UInt32): seq[byte] = + ## the Ethereum ABI imposes a 32 byte width for every type + let numTargetBytes = 32 div 8 + let paddingBytes = 32 - numTargetBytes + let paddingZeros = newSeq[byte](paddingBytes) + paddingZeros & @(stint.toBytesBE(x)) + type SpamHandler* = proc(wakuMessage: WakuMessage): void {.gcsafe, closure, raises: [Defect].} diff --git a/waku/waku_rln_relay/rln/wrappers.nim b/waku/waku_rln_relay/rln/wrappers.nim index 24682dda6..2e10c7e37 100644 --- a/waku/waku_rln_relay/rln/wrappers.nim +++ b/waku/waku_rln_relay/rln/wrappers.nim @@ -4,6 +4,7 @@ import options, eth/keys, stew/[arrayops, byteutils, endians2], + stint, results, std/[sequtils, strutils, tables] @@ -410,7 +411,7 @@ proc getMerkleRoot*(rlnInstance: ptr RLN): MerkleNodeResult = type RlnMetadata* = object lastProcessedBlock*: uint64 - chainId*: uint64 + chainId*: UInt256 contractAddress*: string validRoots*: seq[MerkleNode] @@ -419,7 +420,7 @@ proc serialize(metadata: RlnMetadata): seq[byte] = ## returns the serialized metadata return concat( @(metadata.lastProcessedBlock.toBytes()), - @(metadata.chainId.toBytes()), + @(metadata.chainId.toBytes(Endianness.littleEndian)[0 .. 7]), @(hexToSeqByte(toLower(metadata.contractAddress))), @(uint64(metadata.validRoots.len()).toBytes()), @(serialize(metadata.validRoots)), @@ -427,7 +428,7 @@ proc serialize(metadata: RlnMetadata): seq[byte] = type MerkleNodeSeq = seq[MerkleNode] -proc deserialize*(T: type MerkleNodeSeq, merkleNodeByteSeq: seq[byte]): T = +proc deserialize(T: type MerkleNodeSeq, merkleNodeByteSeq: seq[byte]): T = ## deserializes a byte seq to a seq of MerkleNodes ## the order of serialization is |merkle_node_len<8>|merkle_node[len]| @@ -489,7 +490,7 @@ proc getMetadata*(rlnInstance: ptr RLN): RlnRelayResult[Option[RlnMetadata]] = var lastProcessedBlock: uint64 - chainId: uint64 + chainId: UInt256 contractAddress: string validRoots: MerkleNodeSeq @@ -500,7 +501,9 @@ proc getMetadata*(rlnInstance: ptr RLN): RlnRelayResult[Option[RlnMetadata]] = lastProcessedBlock = uint64.fromBytes(metadataBytes[lastProcessedBlockOffset .. chainIdOffset - 1]) - chainId = uint64.fromBytes(metadataBytes[chainIdOffset .. contractAddressOffset - 1]) + chainId = UInt256.fromBytes( + metadataBytes[chainIdOffset .. contractAddressOffset - 1], Endianness.littleEndian + ) contractAddress = byteutils.toHex(metadataBytes[contractAddressOffset .. validRootsOffset - 1]) let validRootsBytes = metadataBytes[validRootsOffset .. metadataBytes.high] diff --git a/waku/waku_rln_relay/rln_relay.nim b/waku/waku_rln_relay/rln_relay.nim index 5dae3bd51..965c8c021 100644 --- a/waku/waku_rln_relay/rln_relay.nim +++ b/waku/waku_rln_relay/rln_relay.nim @@ -44,7 +44,7 @@ type RlnRelayConf* = object of RootObj credIndex*: Option[uint] ethContractAddress*: string ethClientUrls*: seq[string] - chainId*: uint + chainId*: UInt256 creds*: Option[RlnRelayCreds] treePath*: string epochSizeSec*: uint64 @@ -93,6 +93,7 @@ type WakuRLNRelay* = ref object of RootObj nullifierLog*: OrderedTable[Epoch, Table[Nullifier, ProofMetadata]] lastEpoch*: Epoch # the epoch of the last published rln message rlnEpochSizeSec*: uint64 + rlnMaxTimestampGap*: uint64 rlnMaxEpochGap*: uint64 groupManager*: GroupManager onFatalErrorAction*: OnFatalErrorHandler @@ -103,6 +104,7 @@ type WakuRLNRelay* = ref object of RootObj proc calcEpoch*(rlnPeer: WakuRLNRelay, t: float64): Epoch = ## gets time `t` as `flaot64` with subseconds resolution in the fractional part ## and returns its corresponding rln `Epoch` value + let e = uint64(t / rlnPeer.rlnEpochSizeSec.float64) return toEpoch(e) @@ -211,25 +213,26 @@ proc validateMessage*( # track message count for metrics waku_rln_messages_total.inc() - # checks if the `msg`'s epoch is far from the current epoch - # it corresponds to the validation of rln external nullifier - # get current rln epoch - let epoch: Epoch = rlnPeer.getCurrentEpoch() + # checks if the message's timestamp is within acceptable range + let currentTime = getTime().toUnixFloat() + let messageTime = msg.timestamp.float64 / 1e9 - let - msgEpoch = proof.epoch - # calculate the gaps - gap = absDiff(epoch, msgEpoch) + let timeDiff = uint64(abs(currentTime - messageTime)) - trace "epoch info", currentEpoch = fromEpoch(epoch), msgEpoch = fromEpoch(msgEpoch) + debug "time info", + currentTime = currentTime, messageTime = messageTime, msgHash = msg.hash - # validate the epoch - if gap > rlnPeer.rlnMaxEpochGap: - # message's epoch is too old or too ahead - # accept messages whose epoch is within +-MaxEpochGap from the current epoch - warn "invalid message: epoch gap exceeds a threshold", - gap = gap, payloadLen = msg.payload.len, msgEpoch = fromEpoch(proof.epoch) - waku_rln_invalid_messages_total.inc(labelValues = ["invalid_epoch"]) + if timeDiff > rlnPeer.rlnMaxTimestampGap: + warn "invalid message: timestamp difference exceeds threshold", + timeDiff = timeDiff, maxTimestampGap = rlnPeer.rlnMaxTimestampGap + waku_rln_invalid_messages_total.inc(labelValues = ["invalid_timestamp"]) + return MessageValidationResult.Invalid + + let computedEpoch = rlnPeer.calcEpoch(messageTime) + if proof.epoch != computedEpoch: + warn "invalid message: timestamp mismatches epoch", + proofEpoch = fromEpoch(proof.epoch), computedEpoch = fromEpoch(computedEpoch) + waku_rln_invalid_messages_total.inc(labelValues = ["timestamp_mismatch"]) return MessageValidationResult.Invalid let rootValidationRes = rlnPeer.groupManager.validateRoot(proof.merkleRoot) @@ -242,8 +245,9 @@ proc validateMessage*( # verify the proof let - contentTopicBytes = msg.contentTopic.toBytes - input = concat(msg.payload, contentTopicBytes) + contentTopicBytes = toBytes(msg.contentTopic) + timestampBytes = toBytes(msg.timestamp.uint64) + input = concat(msg.payload, contentTopicBytes, @(timestampBytes)) waku_rln_proof_verification_total.inc() waku_rln_proof_verification_duration_seconds.nanosecondTime: @@ -265,6 +269,8 @@ proc validateMessage*( if proofMetadataRes.isErr(): waku_rln_errors_total.inc(labelValues = ["proof_metadata_extraction"]) return MessageValidationResult.Invalid + + let msgEpoch = proof.epoch let hasDup = rlnPeer.hasDuplicate(msgEpoch, proofMetadataRes.get()) if hasDup.isErr(): waku_rln_errors_total.inc(labelValues = ["duplicate_check"]) @@ -305,10 +311,12 @@ proc validateMessageAndUpdateLog*( proc toRLNSignal*(wakumessage: WakuMessage): seq[byte] = ## it is a utility proc that prepares the `data` parameter of the proof generation procedure i.e., `proofGen` that resides in the current module - ## it extracts the `contentTopic` and the `payload` of the supplied `wakumessage` and serializes them into a byte sequence + ## it extracts the `contentTopic`, `timestamp` and the `payload` of the supplied `wakumessage` and serializes them into a byte sequence + let - contentTopicBytes = wakumessage.contentTopic.toBytes() - output = concat(wakumessage.payload, contentTopicBytes) + contentTopicBytes = toBytes(wakumessage.contentTopic) + timestampBytes = toBytes(wakumessage.timestamp.uint64) + output = concat(wakumessage.payload, contentTopicBytes, @(timestampBytes)) return output proc appendRLNProof*( @@ -479,6 +487,7 @@ proc mount( nonceManager: NonceManager.init(conf.userMessageLimit, conf.epochSizeSec.float), rlnEpochSizeSec: conf.epochSizeSec, rlnMaxEpochGap: max(uint64(MaxClockGapSeconds / float64(conf.epochSizeSec)), 1), + rlnMaxTimestampGap: uint64(MaxClockGapSeconds), onFatalErrorAction: conf.onFatalErrorAction, ) diff --git a/waku/waku_store/protocol.nim b/waku/waku_store/protocol.nim index aa22fe5cd..5e13c9a77 100644 --- a/waku/waku_store/protocol.nim +++ b/waku/waku_store/protocol.nim @@ -88,7 +88,7 @@ proc initProtocolHandler(self: WakuStore) = statusDesc: $ErrorCode.TOO_MANY_REQUESTS, ).encode().buffer - proc handler(conn: Connection, proto: string) {.async, gcsafe, closure.} = + proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} = var successfulQuery = false ## only consider the correct queries in metrics var resBuf: StoreResp var queryDuration: float @@ -106,7 +106,14 @@ proc initProtocolHandler(self: WakuStore) = let queryStartTime = getTime().toUnixFloat() - resBuf = await self.handleQueryRequest(conn.peerId, reqBuf) + try: + resBuf = await self.handleQueryRequest(conn.peerId, reqBuf) + except CatchableError: + error "store query failed in handler", + remote_peer_id = conn.peerId, + requestId = resBuf.requestId, + error = getCurrentExceptionMsg() + return queryDuration = getTime().toUnixFloat() - queryStartTime waku_store_time_seconds.set(queryDuration, ["query-db-time"]) diff --git a/waku/waku_store_legacy/protocol.nim b/waku/waku_store_legacy/protocol.nim index d72308e63..79d0f03a1 100644 --- a/waku/waku_store_legacy/protocol.nim +++ b/waku/waku_store_legacy/protocol.nim @@ -110,7 +110,7 @@ proc initProtocolHandler(ws: WakuStore) = ), ).encode().buffer - proc handler(conn: Connection, proto: string) {.async, closure.} = + proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} = var successfulQuery = false ## only consider the correct queries in metrics var resBuf: StoreResp var queryDuration: float @@ -127,7 +127,13 @@ proc initProtocolHandler(ws: WakuStore) = ) let queryStartTime = getTime().toUnixFloat() - resBuf = await ws.handleLegacyQueryRequest(conn.peerId, reqBuf) + try: + resBuf = await ws.handleLegacyQueryRequest(conn.peerId, reqBuf) + except CatchableError: + error "legacy store query handler failed", + remote_peer_id = conn.peerId, error = getCurrentExceptionMsg() + return + queryDuration = getTime().toUnixFloat() - queryStartTime waku_legacy_store_time_seconds.set(queryDuration, ["query-db-time"]) successfulQuery = true diff --git a/waku/waku_store_sync/reconciliation.nim b/waku/waku_store_sync/reconciliation.nim index d9912a3df..0601d2c23 100644 --- a/waku/waku_store_sync/reconciliation.nim +++ b/waku/waku_store_sync/reconciliation.nim @@ -65,6 +65,8 @@ type SyncReconciliation* = ref object of LPProtocol proc messageIngress*( self: SyncReconciliation, pubsubTopic: PubsubTopic, msg: WakuMessage ) = + trace "message ingress", pubsub_topic = pubsubTopic, msg = msg + if msg.ephemeral: return @@ -78,6 +80,8 @@ proc messageIngress*( proc messageIngress*( self: SyncReconciliation, msgHash: WakuMessageHash, msg: WakuMessage ) = + trace "message ingress", msg_hash = msgHash.toHex(), msg = msg + if msg.ephemeral: return @@ -87,6 +91,8 @@ proc messageIngress*( error "failed to insert new message", msg_hash = msgHash.toHex(), err = error proc messageIngress*(self: SyncReconciliation, id: SyncID) = + trace "message ingress", id = id + self.storage.insert(id).isOkOr: error "failed to insert new message", msg_hash = id.hash.toHex(), err = error @@ -135,6 +141,9 @@ proc processRequest( recvPayload.shards.toPackedSet() == self.shards: sendPayload = self.storage.processPayload(recvPayload, hashToSend, hashToRecv) + trace "sync payload processed", + hash_to_send = hashToSend, hash_to_recv = hashToRecv + sendPayload.cluster = self.cluster sendPayload.shards = self.shards.toSeq() @@ -265,7 +274,7 @@ proc initFillStorage( debug "initial storage filling started" - var ids = newSeq[SyncID](DefaultStorageCap) + var ids = newSeqOfCap[SyncID](DefaultStorageCap) # we assume IDs are in order @@ -322,16 +331,18 @@ proc new*( remoteNeedsTx: remoteNeedsTx, ) - let handler = proc(conn: Connection, proto: string) {.async, closure.} = - (await sync.processRequest(conn)).isOkOr: - error "request processing error", error = error - - return + proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} = + try: + (await sync.processRequest(conn)).isOkOr: + error "request processing error", error = error + except CatchableError: + error "exception in reconciliation handler", error = getCurrentExceptionMsg() sync.handler = handler sync.codec = WakuReconciliationCodec - info "Store Reconciliation protocol initialized" + info "Store Reconciliation protocol initialized", + sync_range = syncRange, sync_interval = syncInterval, relay_jitter = relayJitter return ok(sync) diff --git a/waku/waku_store_sync/transfer.nim b/waku/waku_store_sync/transfer.nim index c1e5d3e37..783cbffb6 100644 --- a/waku/waku_store_sync/transfer.nim +++ b/waku/waku_store_sync/transfer.nim @@ -97,7 +97,13 @@ proc needsReceiverLoop(self: SyncTransfer) {.async.} = while true: # infinite loop let (peerId, fingerprint) = await self.remoteNeedsRx.popFirst() - if not self.outSessions.hasKey(peerId): + if (not self.outSessions.hasKey(peerId)) or self.outSessions[peerId].closed() or + ## sanity check, should not be possible + self.outSessions[peerId].isClosedRemotely: + ## quite possibly remote end has closed the connection, believing transfer to be done + debug "opening transfer connection to remote peer", + my_peer_id = self.peerManager.switch.peerInfo.peerId, remote_peer_id = peerId + let connection = (await self.openConnection(peerId)).valueOr: error "failed to establish transfer connection", error = error continue @@ -121,6 +127,11 @@ proc needsReceiverLoop(self: SyncTransfer) {.async.} = let msg = WakuMessageAndTopic(pubsub: response.topics[0], message: response.messages[0]) + trace "sending transfer message", + my_peer_id = self.peerManager.switch.peerInfo.peerId, + remote_peer_id = peerId, + msg = msg + (await sendMessage(connection, msg)).isOkOr: self.outSessions.del(peerId) await connection.close() @@ -130,11 +141,12 @@ proc needsReceiverLoop(self: SyncTransfer) {.async.} = return proc initProtocolHandler(self: SyncTransfer) = - let handler = proc(conn: Connection, proto: string) {.async, closure.} = + proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} = while true: - if not self.inSessions.contains(conn.peerId): + ## removed DOS prototection until we can design something better + #[ if not self.inSessions.contains(conn.peerId): error "unwanted peer, disconnecting", remote = conn.peerId - break + break ]# let readRes = catch: await conn.readLp(int64(DefaultMaxWakuMessageSize)) @@ -156,10 +168,14 @@ proc initProtocolHandler(self: SyncTransfer) = let hash = computeMessageHash(pubsub, msg) - #TODO verify msg RLN proof... - - (await self.wakuArchive.syncMessageIngress(hash, pubsub, msg)).isOkOr: - error "failed to archive message", error = $error + try: + #TODO verify msg RLN proof... + (await self.wakuArchive.syncMessageIngress(hash, pubsub, msg)).isOkOr: + error "failed to archive message", error = $error + continue + except CatchableError: + error "syncMessageIngress failed", + remote_peer_id = conn.peerId, error = getCurrentExceptionMsg() continue let id = SyncID(time: msg.timestamp, hash: hash)