From 1ace0154d3c351207ec1423361802342bf291747 Mon Sep 17 00:00:00 2001 From: Darshan <35736874+darshankabariya@users.noreply.github.com> Date: Thu, 12 Mar 2026 23:17:47 +0530 Subject: [PATCH 1/5] chore: correct dynamic library extension on mac and update OS detection (#3754) --- examples/python/waku.py | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/examples/python/waku.py b/examples/python/waku.py index 65eb5d750..b2303e5e3 100644 --- a/examples/python/waku.py +++ b/examples/python/waku.py @@ -1,23 +1,32 @@ -from flask import Flask import ctypes import argparse +import sys + +if sys.platform == "darwin": + _lib_ext = "dylib" +elif sys.platform == "win32": + _lib_ext = "dll" +else: + _lib_ext = "so" + +_lib_path = f"build/libwaku.{_lib_ext}" libwaku = object try: # This python script should be run from the root repo folder - libwaku = ctypes.CDLL("build/libwaku.so") -except Exception as e: - print("Exception: ", e) - print(""" -The 'libwaku.so' library can be created with the next command from + libwaku = ctypes.CDLL(_lib_path) +except OSError as e: + print(f"Exception: {e}") + print(f""" +The '{_lib_path}' library can be created with the next command from the repo's root folder: `make libwaku`. -And it should build the library in 'build/libwaku.so'. +And it should build the library in '{_lib_path}'. -Therefore, make sure the LD_LIBRARY_PATH env var points at the location that -contains the 'libwaku.so' library. +Therefore, make sure the library path env var points at the location that +contains the '{_lib_path}' library. """) - exit(-1) + exit(1) def handle_event(ret, msg, user_data): print("Event received: %s" % msg) From a77870782a46115161c8bd4780e79f92d0579067 Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Thu, 12 Mar 2026 19:13:09 +0100 Subject: [PATCH 2/5] Change release process (#3750) * Simplify release process and leave the DST validation for deployment process * Rename prepare_full_release.md to prepare_release.md * Remove release-process.md as it duplicates info and causes confusion --- .../ISSUE_TEMPLATE/prepare_beta_release.md | 63 ------- ...are_full_release.md => prepare_release.md} | 39 ++--- docs/contributors/release-process.md | 164 ------------------ 3 files changed, 17 insertions(+), 249 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/prepare_beta_release.md rename .github/ISSUE_TEMPLATE/{prepare_full_release.md => prepare_release.md} (71%) delete mode 100644 docs/contributors/release-process.md diff --git a/.github/ISSUE_TEMPLATE/prepare_beta_release.md b/.github/ISSUE_TEMPLATE/prepare_beta_release.md deleted file mode 100644 index 3c4e76854..000000000 --- a/.github/ISSUE_TEMPLATE/prepare_beta_release.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -name: Prepare Beta Release -about: Execute tasks for the creation and publishing of a new beta release -title: 'Prepare beta release 0.0.0' -labels: beta-release -assignees: '' - ---- - - - -### Items to complete - -All items below are to be completed by the owner of the given release. - -- [ ] Create release branch with major and minor only ( e.g. release/v0.X ) if it doesn't exist. -- [ ] Assign release candidate tag to the release branch HEAD (e.g. `v0.X.0-beta-rc.0`, `v0.X.0-beta-rc.1`, ... `v0.X.0-beta-rc.N`). -- [ ] Generate and edit release notes in CHANGELOG.md. - -- [ ] **Validation of release candidate** - - [ ] **Automated testing** - - [ ] Ensure all the unit tests (specifically logos-messaging-js tests) are green against the release candidate. - - [ ] **Waku fleet testing** - - [ ] Deploy the release candidate to `waku.test` through [deploy-waku-test job](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-test/) and wait for it to finish (Jenkins access required; ask the infra team if you don't have it). - - After completion, disable fleet so that daily CI does not override your release candidate. - - Verify at https://fleets.waku.org/ that the fleet is locked to the release candidate image. - - Confirm the container image exists on [Harbor](https://harbor.status.im/harbor/projects/9/repositories/nwaku/artifacts-tab). - - [ ] Search [Kibana logs](https://kibana.infra.status.im/app/discover) from the previous month (since the last release was deployed) for possible crashes or errors in `waku.test`. - - Set time range to "Last 30 days" (or since last release). - - Most relevant search query: `(fleet: "waku.test" AND message: "SIGSEGV")`, `(fleet: "waku.test" AND message: "exception")`, `(fleet: "waku.test" AND message: "error")`. - - Document any crashes or errors found. - - [ ] If `waku.test` validation is successful, deploy to `waku.sandbox` using the [deploy-waku-sandbox job](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-sandbox/). - - [ ] Search [Kibana logs](https://kibana.infra.status.im/app/discover) for `waku.sandbox`: `(fleet: "waku.sandbox" AND message: "SIGSEGV")`, `(fleet: "waku.sandbox" AND message: "exception")`, `(fleet: "waku.sandbox" AND message: "error")`. most probably if there are no crashes or errors in `waku.test`, there will be no crashes or errors in `waku.sandbox`. - - [ ] Enable the `waku.test` fleet again to resume auto-deployment of the latest `master` commit. - -- [ ] **Proceed with release** - - - [ ] Assign a final release tag (`v0.X.0-beta`) to the same commit that contains the validated release-candidate tag (e.g. `v0.X.0-beta-rc.N`) and submit a PR from the release branch to `master`. - - [ ] Update [logos-delivery-compose](https://github.com/logos-messaging/logos-delivery-compose) and [logos-delivery-simulator](https://github.com/logos-messaging/waku-simulator) according to the new release. - - [ ] Bump logos-delivery dependency in [logos-delivery-rust-bindings](https://github.com/logos-messaging/logos-delivery-rust-bindings) and make sure all examples and tests work. - - [ ] Bump logos-delivery dependency in [logos-delivery-go-bindings](https://github.com/logos-messaging/logos-delivery-go-bindings) and make sure all tests work. - - [ ] Create GitHub release (https://github.com/logos-messaging/logos-delivery/releases). - - [ ] Submit a PR to merge the release branch back to `master`. Make sure you use the option "Merge pull request (Create a merge commit)" to perform the merge. Ping repo admin if this option is not available. - -- [ ] **Promote release to fleets** - - [ ] Ask the PM lead to announce the release. - - [ ] Update infra config with any deprecated arguments or changed options. - - [ ] Update waku.sandbox with [this deployment job](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-sandbox/). - -### Links - -- [Release process](https://github.com/logos-messaging/logos-delivery/blob/master/docs/contributors/release-process.md) -- [Release notes](https://github.com/logos-messaging/logos-delivery/blob/master/CHANGELOG.md) -- [Fleet ownership](https://www.notion.so/Fleet-Ownership-7532aad8896d46599abac3c274189741?pvs=4#d2d2f0fe4b3c429fbd860a1d64f89a64) -- [Infra-nim-waku](https://github.com/status-im/infra-nim-waku) -- [Jenkins](https://ci.infra.status.im/job/nim-waku/) -- [Fleets](https://fleets.waku.org/) -- [Harbor](https://harbor.status.im/harbor/projects/9/repositories/nwaku/artifacts-tab) -- [Kibana](https://kibana.infra.status.im/app/) diff --git a/.github/ISSUE_TEMPLATE/prepare_full_release.md b/.github/ISSUE_TEMPLATE/prepare_release.md similarity index 71% rename from .github/ISSUE_TEMPLATE/prepare_full_release.md rename to .github/ISSUE_TEMPLATE/prepare_release.md index 4df808bd4..83456e79a 100644 --- a/.github/ISSUE_TEMPLATE/prepare_full_release.md +++ b/.github/ISSUE_TEMPLATE/prepare_release.md @@ -1,7 +1,7 @@ --- -name: Prepare Full Release +name: Prepare Release about: Execute tasks for the creation and publishing of a new full release -title: 'Prepare full release 0.0.0' +title: 'Prepare release 0.0.0' labels: full-release assignees: '' @@ -26,6 +26,9 @@ All items below are to be completed by the owner of the given release. - [ ] **Automated testing** - [ ] Ensure all the unit tests (specifically logos-messaging-js tests) are green against the release candidate. + - [ ] **QA testing** + - [ ] Ask QA to run their available tests against the release candidate. + - [ ] **Waku fleet testing** - [ ] Deploy the release candidate to `waku.test` fleet. - Start the [deployment job](https://ci.infra.status.im/job/nim-waku/) and wait for it to finish (Jenkins access required; ask the infra team if you don't have it). @@ -36,40 +39,32 @@ All items below are to be completed by the owner of the given release. - Set time range to "Last 30 days" (or since last release). - Most relevant search query: `(fleet: "waku.test" AND message: "SIGSEGV")`, `(fleet: "waku.test" AND message: "exception")`, `(fleet: "waku.test" AND message: "error")`. - Document any crashes or errors found. - - [ ] If `waku.test` validation is successful, deploy to `waku.sandbox` using the same [deployment job](https://ci.infra.status.im/job/nim-waku/). - - [ ] Search [Kibana logs](https://kibana.infra.status.im/app/discover) for `waku.sandbox`: `(fleet: "waku.sandbox" AND message: "SIGSEGV")`, `(fleet: "waku.sandbox" AND message: "exception")`, `(fleet: "waku.sandbox" AND message: "error")`. most probably if there are no crashes or errors in `waku.test`, there will be no crashes or errors in `waku.sandbox`. + - [ ] Ask QA to perform tests against `waku.test`, if any. Then, after that, review Kibana for possible issues or unexpected restart. - [ ] Enable the `waku.test` fleet again to resume auto-deployment of the latest `master` commit. - - [ ] **QA and DST testing** - - [ ] Ask Vac-QA and Vac-DST to run their available tests against the release candidate; share all release candidates with both teams. - - [ ] Vac-DST: An additional report is needed ([see this example](https://www.notion.so/DST-Reports-1228f96fb65c80729cd1d98a7496fe6f)). Inform DST team about what are the expectations for this rc. For example, if we expect higher or lower bandwidth consumption. - - - [ ] **Status fleet testing** - - [ ] Deploy release candidate to `status.staging` + - [ ] **Status testing** + - [ ] Get QA approval to deploy a new version in `status.staging`. + - [ ] Deploy release candidate to `status.staging`. - [ ] Perform [sanity check](https://www.notion.so/How-to-test-Nwaku-on-Status-12c6e4b9bf06420ca868bd199129b425) and log results as comments in this issue. - - [ ] Connect 2 instances to `status.staging` fleet, one in relay mode, the other one in light client. - - 1:1 Chats with each other - - Send and receive messages in a community - - Close one instance, send messages with second instance, reopen first instance and confirm messages sent while offline are retrieved from store + - [ ] Connect 2 instances to `status.staging` fleet, one in relay mode, the other one in light client mode. + - 1:1 Chats with each other. + - Send and receive messages in a community. + - Close one instance, send messages with second instance, reopen first instance and confirm messages sent while offline are retrieved from store. - [ ] Perform checks based on _end user impact_ - [ ] Inform other (Waku and Status) CCs to point their instances to `status.staging` for a few days. Ping Status colleagues on their Discord server or in the [Status community](https://status.app/c/G3kAAMSQtb05kog3aGbr3kiaxN4tF5xy4BAGEkkLwILk2z3GcoYlm5hSJXGn7J3laft-tnTwDWmYJ18dP_3bgX96dqr_8E3qKAvxDf3NrrCMUBp4R9EYkQez9XSM4486mXoC3mIln2zc-TNdvjdfL9eHVZ-mGgs=#zQ3shZeEJqTC1xhGUjxuS4rtHSrhJ8vUYp64v6qWkLpvdy9L9) (this is not a blocking point.) - - [ ] Ask Status-QA to perform sanity checks (as described above) and checks based on _end user impact_; specify the version being tested - - [ ] Ask Status-QA or infra to run the automated Status e2e tests against `status.staging` + - [ ] Ask QA to perform sanity checks (as described above) and checks based on _end user impact_; specify the version being tested + - [ ] Ask QA or infra to run the automated Status e2e tests against `status.staging` - [ ] Get other CCs' sign-off: they should comment on this PR, e.g., "Used the app for a week, no problem." If problems are reported, resolve them and create a new RC. - - [ ] **Get Status-QA sign-off**, ensuring that the `status.test` update will not disturb ongoing activities. - [ ] **Proceed with release** - - [ ] Assign a final release tag (`v0.X.0`) to the same commit that contains the validated release-candidate tag (e.g. `v0.X.0`). + - [ ] Assign a final release tag (`v0.X.0`) to the same commit that contains the validated release-candidate tag (e.g. `git tag -as v0.X.0 -m "final release."`). - [ ] Update [logos-delivery-compose](https://github.com/logos-messaging/logos-delivery-compose) and [logos-delivery-simulator](https://github.com/logos-messaging/logos-delivery-simulator) according to the new release. - [ ] Bump logos-delivery dependency in [logos-delivery-rust-bindings](https://github.com/logos-messaging/logos-delivery-rust-bindings) and make sure all examples and tests work. - [ ] Bump logos-delivery dependency in [logos-delivery-go-bindings](https://github.com/logos-messaging/logos-delivery-go-bindings) and make sure all tests work. - [ ] Create GitHub release (https://github.com/logos-messaging/logos-delivery/releases). - [ ] Submit a PR to merge the release branch back to `master`. Make sure you use the option "Merge pull request (Create a merge commit)" to perform the merge. Ping repo admin if this option is not available. - -- [ ] **Promote release to fleets** - - [ ] Ask the PM lead to announce the release. - - [ ] Update infra config with any deprecated arguments or changed options. + - [ ] Create a deployment issue with the recently created release. ### Links diff --git a/docs/contributors/release-process.md b/docs/contributors/release-process.md deleted file mode 100644 index 8aa9282cd..000000000 --- a/docs/contributors/release-process.md +++ /dev/null @@ -1,164 +0,0 @@ -# Release Process - -How to do releases. - -For more context, see https://trunkbaseddevelopment.com/branch-for-release/ - -## How to do releases - -### Prerequisites - -- All issues under the corresponding release [milestone](https://github.com/waku-org/nwaku/milestones) have been closed or, after consultation, deferred to the next release. -- All submodules are up to date. - > Updating submodules requires a PR (and very often several "fixes" to maintain compatibility with the changes in submodules). That PR process must be done and merged a couple of days before the release. - - > In case the submodules update has a low effort and/or risk for the release, follow the ["Update submodules"](./git-submodules.md) instructions. - - > If the effort or risk is too high, consider postponing the submodules upgrade for the subsequent release or delaying the current release until the submodules updates are included in the release candidate. - -### Release types - -- **Full release**: follow the entire [Release process](#release-process--step-by-step). - -- **Beta release**: skip just `6c` and `6d` steps from [Release process](#release-process--step-by-step). - -- Choose the appropriate release process based on the release type: - - [Full Release](../../.github/ISSUE_TEMPLATE/prepare_full_release.md) - - [Beta Release](../../.github/ISSUE_TEMPLATE/prepare_beta_release.md) - -### Release process ( step by step ) - -1. Checkout a release branch from master - - ``` - git checkout -b release/v0.X.0 - ``` - -2. Update `CHANGELOG.md` and ensure it is up to date. Use the helper Make target to get PR based release-notes/changelog update. - - ``` - make release-notes - ``` - -3. Create a release-candidate tag with the same name as release and `-rc.N` suffix a few days before the official release and push it - - ``` - git tag -as v0.X.0-rc.0 -m "Initial release." - git push origin v0.X.0-rc.0 - ``` - - This will trigger a [workflow](../../.github/workflows/pre-release.yml) which will build RC artifacts and create and publish a GitHub release - -4. Open a PR from the release branch for others to review the included changes and the release-notes - -5. In case additional changes are needed, create a new RC tag - - Make sure the new tag is associated - with CHANGELOG update. - - ``` - # Make changes, rebase and create new tag - # Squash to one commit and make a nice commit message - git rebase -i origin/master - git tag -as v0.X.0-rc.1 -m "Initial release." - git push origin v0.X.0-rc.1 - ``` - - Similarly use v0.X.0-rc.2, v0.X.0-rc.3 etc. for additional RC tags. - -6. **Validation of release candidate** - - 6a. **Automated testing** - - Ensure all the unit tests (specifically js-waku tests) are green against the release candidate. - - 6b. **Waku fleet testing** - - Start job on `waku.test` [Deployment job](https://ci.infra.status.im/job/nim-waku/), wait for completion of the job. If it fails, then debug it. - - After completion, disable fleet so that daily ci not override your release candidate. - - Verify at https://fleets.waku.org/ that the fleet is locked to the release candidate image. - - Check if the image is created at [Harbor](https://harbor.status.im/harbor/projects/9/repositories/nwaku/artifacts-tab). - - Search [Kibana logs](https://kibana.infra.status.im/app/discover) from the previous month (since the last release was deployed) for possible crashes or errors in `waku.test`. - - Set time range to "Last 30 days" (or since last release). - - Most relevant search query: `(fleet: "waku.test" AND message: "SIGSEGV")`, `(fleet: "waku.test" AND message: "exception")`, `(fleet: "waku.test" AND message: "error")`. - - Document any crashes or errors found. - - If `waku.test` validation is successful, deploy to `waku.sandbox` using the same [Deployment job](https://ci.infra.status.im/job/nim-waku/). - - Search [Kibana logs](https://kibana.infra.status.im/app/discover) for `waku.sandbox`: `(fleet: "waku.sandbox" AND message: "SIGSEGV")`, `(fleet: "waku.sandbox" AND message: "exception")`, `(fleet: "waku.sandbox" AND message: "error")`. most probably if there are no crashes or errors in `waku.test`, there will be no crashes or errors in `waku.sandbox`. - - Enable the `waku.test` fleet again to resume auto-deployment of the latest `master` commit. - - 6c. **QA and DST testing** - - Ask Vac-QA and Vac-DST to run their available tests against the release candidate; share all release candidates with both teams. - - > We need an additional report like [this](https://www.notion.so/DST-Reports-1228f96fb65c80729cd1d98a7496fe6f) specifically from the DST team. Inform DST team about what are the expectations for this rc. For example, if we expect higher or lower bandwidth consumption. - - 6d. **Status fleet testing** - - Deploy release candidate to `status.staging` - - Perform [sanity check](https://www.notion.so/How-to-test-Nwaku-on-Status-12c6e4b9bf06420ca868bd199129b425) and log results as comments in this issue. - - Connect 2 instances to `status.staging` fleet, one in relay mode, the other one in light client. - - 1:1 Chats with each other - - Send and receive messages in a community - - Close one instance, send messages with second instance, reopen first instance and confirm messages sent while offline are retrieved from store - - Perform checks based on _end-user impact_. - - Inform other (Waku and Status) CCs to point their instances to `status.staging` for a few days. Ping Status colleagues from their Discord server or [Status community](https://status.app) (not a blocking point). - - Ask Status-QA to perform sanity checks (as described above) and checks based on _end user impact_; specify the version being tested. - - Ask Status-QA or infra to run the automated Status e2e tests against `status.staging`. - - Get other CCs' sign-off: they should comment on this PR, e.g., "Used the app for a week, no problem." If problems are reported, resolve them and create a new RC. - - **Get Status-QA sign-off**, ensuring that the `status.test` update will not disturb ongoing activities. - -7. Once the release-candidate has been validated, create a final release tag and push it. -We also need to merge the release branch back into master as a final step. - - ``` - git checkout release/v0.X.0 - git tag -as v0.X.0 -m "final release." (use v0.X.0-beta as the tag if you are creating a beta release) - git push origin v0.X.0 - git switch master - git pull - git merge release/v0.X.0 - ``` -8. Update `waku-rust-bindings`, `waku-simulator` and `nwaku-compose` to use the new release. - -9. Create a [GitHub release](https://github.com/waku-org/nwaku/releases) from the release tag. - - * Add binaries produced by the ["Upload Release Asset"](https://github.com/waku-org/nwaku/actions/workflows/release-assets.yml) workflow. Where possible, test the binaries before uploading to the release. - -### After the release - -1. Announce the release on Twitter, Discord and other channels. -2. Deploy the release image to [Dockerhub](https://hub.docker.com/r/wakuorg/nwaku) by triggering [the manual Jenkins deployment job](https://ci.infra.status.im/job/nim-waku/job/docker-manual/). - > Ensure the following build parameters are set: - > - `MAKE_TARGET`: `wakunode2` - > - `IMAGE_TAG`: the release tag (e.g. `v0.38.0`) - > - `IMAGE_NAME`: `wakuorg/nwaku` - > - `NIMFLAGS`: `--colors:off -d:disableMarchNative -d:chronicles_colors:none -d:postgres` - > - `GIT_REF` the release tag (e.g. `v0.38.0`) - -### Performing a patch release - -1. Cherry-pick the relevant commits from master to the release branch - - ``` - git cherry-pick - ``` - -2. Create a release-candidate tag with the same name as release and `-rc.N` suffix - -3. Update `CHANGELOG.md`. From the release branch, use the helper Make target after having cherry-picked the commits. - - ``` - make release-notes - ``` - Create a new branch and raise a PR with the changelog updates to master. - -4. Once the release-candidate has been validated and changelog PR got merged, cherry-pick the changelog update from master to the release branch. Create a final release tag and push it. - -5. Create a [GitHub release](https://github.com/waku-org/nwaku/releases) from the release tag and follow the same post-release process as usual. - -### Links - -- [Release process](https://github.com/waku-org/nwaku/blob/master/docs/contributors/release-process.md) -- [Release notes](https://github.com/waku-org/nwaku/blob/master/CHANGELOG.md) -- [Fleet ownership](https://www.notion.so/Fleet-Ownership-7532aad8896d46599abac3c274189741?pvs=4#d2d2f0fe4b3c429fbd860a1d64f89a64) -- [Infra-nim-waku](https://github.com/status-im/infra-nim-waku) -- [Jenkins](https://ci.infra.status.im/job/nim-waku/) -- [Fleets](https://fleets.waku.org/) -- [Harbor](https://harbor.status.im/harbor/projects/9/repositories/nwaku/artifacts-tab) -- [Kibana](https://kibana.infra.status.im/app/) \ No newline at end of file From 03249df715aa04dbe689649145ae7e059eda8e6d Mon Sep 17 00:00:00 2001 From: Ivan FB <128452529+Ivansete-status@users.noreply.github.com> Date: Thu, 12 Mar 2026 19:13:40 +0100 Subject: [PATCH 3/5] Add deployment process (#3751) --- .github/ISSUE_TEMPLATE/deploy_release.md | 50 ++++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/deploy_release.md diff --git a/.github/ISSUE_TEMPLATE/deploy_release.md b/.github/ISSUE_TEMPLATE/deploy_release.md new file mode 100644 index 000000000..68557bf46 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/deploy_release.md @@ -0,0 +1,50 @@ +--- +name: Deploy Release +about: Execute tasks for deploying a new version in a fleet +title: 'Deploy release vX.X.X in waku.sandbox and/or status.prod fleet' +labels: deploy-release +assignees: '' + +--- + + + +### Link to the Release PR + + + +### Items to complete, in order + + + +- [ ] Receive sign-off from DST. + - [ ] Inform DST team about what are the expectations for this release. For example, if we expect higher, same or lower bandwidth consumption. Or a new protocol appears, etc. + - [ ] Ask DST to add a comment approving this deployment and add a link to the analysis report. + +- [ ] Update waku.sandbox with [this deployment job](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-sandbox/). + +- [ ] Deploy to status.prod + - [ ] Ask Status admin to add a comment approving that this deployment to happen now. + - [ ] Update status.prod with [this deployment job](https://ci.infra.status.im/job/nim-waku/job/deploy-status-prod/). + +- [ ] Update infra config + - [ ] Submit PRs into infra repos to adjust deprecated or changed arguments (review CHANGELOG.md for that release). And confirm the fleet can run after that. This requires coordination with infra team. + +### Reference Links + +- [Release process](https://github.com/logos-messaging/logos-delivery/blob/master/docs/contributors/release-process.md) +- [Release notes](https://github.com/logos-messaging/logos-delivery/blob/master/CHANGELOG.md) +- [Infra-role-nim-waku](https://github.com/status-im/infra-role-nim-waku) +- [Infra-nim-waku](https://github.com/status-im/infra-nim-waku) +- [Infra-Status](https://github.com/status-im/infra-status) +- [Jenkins](https://ci.infra.status.im/job/nim-waku/) +- [Fleets](https://fleets.waku.org/) +- [Harbor](https://harbor.status.im/harbor/projects/9/repositories/nwaku/artifacts-tab) +- [Kibana](https://kibana.infra.status.im/app/) From bc9454db5e0258eaa964f36c37d7204244ece9a5 Mon Sep 17 00:00:00 2001 From: Tanya S <120410716+stubbsta@users.noreply.github.com> Date: Thu, 12 Mar 2026 22:27:50 +0200 Subject: [PATCH 4/5] Chore: Simplify on chain group manager error handling (#3678) --- tests/node/test_wakunode_legacy_lightpush.nim | 7 +- tests/node/test_wakunode_lightpush.nim | 7 +- .../test_rln_group_manager_onchain.nim | 99 ++--- tests/waku_rln_relay/test_waku_rln_relay.nim | 26 +- .../test_wakunode_rln_relay.nim | 49 +-- tests/wakunode_rest/test_rest_relay.nim | 39 +- .../rln_keystore_generator.nim | 4 +- .../group_manager/group_manager_base.nim | 4 +- .../group_manager/on_chain/group_manager.nim | 358 +++++++++--------- .../group_manager/on_chain/retry_wrapper.nim | 41 +- waku/waku_rln_relay/rln_relay.nim | 4 +- 11 files changed, 288 insertions(+), 350 deletions(-) diff --git a/tests/node/test_wakunode_legacy_lightpush.nim b/tests/node/test_wakunode_legacy_lightpush.nim index 902464bcd..68c6cacde 100644 --- a/tests/node/test_wakunode_legacy_lightpush.nim +++ b/tests/node/test_wakunode_legacy_lightpush.nim @@ -134,11 +134,8 @@ suite "RLN Proofs as a Lightpush Service": let manager1 = cast[OnchainGroupManager](server.wakuRlnRelay.groupManager) let idCredentials1 = generateCredentials() - try: - waitFor manager1.register(idCredentials1, UserMessageLimit(20)) - except Exception, CatchableError: - assert false, - "exception raised when calling register: " & getCurrentExceptionMsg() + (waitFor manager1.register(idCredentials1, UserMessageLimit(20))).isOkOr: + assert false, "error returned when calling register: " & error let rootUpdated1 = waitFor manager1.updateRoots() info "Updated root for node1", rootUpdated1 diff --git a/tests/node/test_wakunode_lightpush.nim b/tests/node/test_wakunode_lightpush.nim index 66b87b85e..b407327e3 100644 --- a/tests/node/test_wakunode_lightpush.nim +++ b/tests/node/test_wakunode_lightpush.nim @@ -137,11 +137,8 @@ suite "RLN Proofs as a Lightpush Service": let manager1 = cast[OnchainGroupManager](server.wakuRlnRelay.groupManager) let idCredentials1 = generateCredentials() - try: - waitFor manager1.register(idCredentials1, UserMessageLimit(20)) - except Exception, CatchableError: - assert false, - "exception raised when calling register: " & getCurrentExceptionMsg() + (waitFor manager1.register(idCredentials1, UserMessageLimit(20))).isOkOr: + assert false, "error returned when calling register: " & error let rootUpdated1 = waitFor manager1.updateRoots() info "Updated root for node1", rootUpdated1 diff --git a/tests/waku_rln_relay/test_rln_group_manager_onchain.nim b/tests/waku_rln_relay/test_rln_group_manager_onchain.nim index aac900911..29da94129 100644 --- a/tests/waku_rln_relay/test_rln_group_manager_onchain.nim +++ b/tests/waku_rln_relay/test_rln_group_manager_onchain.nim @@ -74,10 +74,11 @@ suite "Onchain group manager": raiseAssert "Expected error when keystore file doesn't exist" test "trackRootChanges: should guard against uninitialized state": - try: - discard manager.trackRootChanges() - except CatchableError: - check getCurrentExceptionMsg().len == 38 + let initializedResult = waitFor manager.trackRootChanges() + + check: + initializedResult.isErr() + initializedResult.error == "OnchainGroupManager is not initialized" test "trackRootChanges: should sync to the state of the group": let credentials = generateCredentials() @@ -86,10 +87,8 @@ suite "Onchain group manager": let merkleRootBefore = waitFor manager.fetchMerkleRoot() - try: - waitFor manager.register(credentials, UserMessageLimit(20)) - except Exception, CatchableError: - assert false, "exception raised: " & getCurrentExceptionMsg() + (waitFor manager.register(credentials, UserMessageLimit(20))).isOkOr: + assert false, "error returned when calling register: " & error discard waitFor withTimeout(trackRootChanges(manager), 15.seconds) @@ -110,13 +109,11 @@ suite "Onchain group manager": let merkleRootBefore = waitFor manager.fetchMerkleRoot() - try: - for i in 0 ..< credentials.len(): - info "Registering credential", index = i, credential = credentials[i] - waitFor manager.register(credentials[i], UserMessageLimit(20)) - discard waitFor manager.updateRoots() - except Exception, CatchableError: - assert false, "exception raised: " & getCurrentExceptionMsg() + for i in 0 ..< credentials.len(): + info "Registering credential", index = i, credential = credentials[i] + (waitFor manager.register(credentials[i], UserMessageLimit(20))).isOkOr: + assert false, "Failed to register credential " & $i & ": " & error + discard waitFor manager.updateRoots() let merkleRootAfter = waitFor manager.fetchMerkleRoot() @@ -127,16 +124,15 @@ suite "Onchain group manager": test "register: should guard against uninitialized state": let dummyCommitment = default(IDCommitment) - try: - waitFor manager.register( - RateCommitment( - idCommitment: dummyCommitment, userMessageLimit: UserMessageLimit(20) - ) + let res = waitFor manager.register( + RateCommitment( + idCommitment: dummyCommitment, userMessageLimit: UserMessageLimit(20) ) - except CatchableError: - assert true - except Exception: - assert false, "exception raised: " & getCurrentExceptionMsg() + ) + + check: + res.isErr() + res.error == "OnchainGroupManager is not initialized" test "register: should register successfully": # TODO :- similar to ```trackRootChanges: should fetch history correctly``` @@ -146,11 +142,8 @@ suite "Onchain group manager": let idCredentials = generateCredentials() let merkleRootBefore = waitFor manager.fetchMerkleRoot() - try: - waitFor manager.register(idCredentials, UserMessageLimit(20)) - except Exception, CatchableError: - assert false, - "exception raised when calling register: " & getCurrentExceptionMsg() + (waitFor manager.register(idCredentials, UserMessageLimit(20))).isOkOr: + assert false, "error returned when calling register: " & error let merkleRootAfter = waitFor manager.fetchMerkleRoot() @@ -177,26 +170,25 @@ suite "Onchain group manager": manager.onRegister(callback) - try: + ( waitFor manager.register( RateCommitment( idCommitment: idCommitment, userMessageLimit: UserMessageLimit(20) ) ) - except Exception, CatchableError: - assert false, "exception raised: " & getCurrentExceptionMsg() + ).isOkOr: + assert false, "error returned when calling register: " & error waitFor fut test "withdraw: should guard against uninitialized state": let idSecretHash = generateCredentials().idSecretHash - try: - waitFor manager.withdraw(idSecretHash) - except CatchableError: - assert true - except Exception: - assert false, "exception raised: " & getCurrentExceptionMsg() + let res = waitFor manager.withdraw(idSecretHash) + + check: + res.isErr() + res.error == "OnchainGroupManager is not initialized" test "validateRoot: should validate good root": let idCredentials = generateCredentials() @@ -217,10 +209,8 @@ suite "Onchain group manager": (waitFor manager.init()).isOkOr: raiseAssert $error - try: - waitFor manager.register(idCredentials, UserMessageLimit(20)) - except Exception, CatchableError: - assert false, "exception raised: " & getCurrentExceptionMsg() + (waitFor manager.register(idCredentials, UserMessageLimit(20))).isOkOr: + assert false, "error returned : " & getCurrentExceptionMsg() waitFor fut @@ -299,10 +289,8 @@ suite "Onchain group manager": manager.onRegister(callback) - try: - waitFor manager.register(credentials, UserMessageLimit(20)) - except Exception, CatchableError: - assert false, "exception raised: " & getCurrentExceptionMsg() + (waitFor manager.register(credentials, UserMessageLimit(20))).isOkOr: + assert false, "error returned when calling register: " & error waitFor fut let rootUpdated = waitFor manager.updateRoots() @@ -337,11 +325,8 @@ suite "Onchain group manager": let idCredential = generateCredentials() - try: - waitFor manager.register(idCredential, UserMessageLimit(20)) - except Exception, CatchableError: - assert false, - "exception raised when calling startGroupSync: " & getCurrentExceptionMsg() + (waitFor manager.register(idCredential, UserMessageLimit(20))).isOkOr: + assert false, "error returned when calling register: " & error let messageBytes = "Hello".toBytes() @@ -395,14 +380,12 @@ suite "Onchain group manager": return callback - try: - manager.onRegister(generateCallback(futures, credentials)) + manager.onRegister(generateCallback(futures, credentials)) - for i in 0 ..< credentials.len(): - waitFor manager.register(credentials[i], UserMessageLimit(20)) - discard waitFor manager.updateRoots() - except Exception, CatchableError: - assert false, "exception raised: " & getCurrentExceptionMsg() + for i in 0 ..< credentials.len(): + (waitFor manager.register(credentials[i], UserMessageLimit(20))).isOkOr: + assert false, "Failed to register credential " & $i & ": " & error + discard waitFor manager.updateRoots() waitFor allFutures(futures) diff --git a/tests/waku_rln_relay/test_waku_rln_relay.nim b/tests/waku_rln_relay/test_waku_rln_relay.nim index d9fe0d890..e41b79608 100644 --- a/tests/waku_rln_relay/test_waku_rln_relay.nim +++ b/tests/waku_rln_relay/test_waku_rln_relay.nim @@ -242,11 +242,8 @@ suite "Waku rln relay": let manager = cast[OnchainGroupManager](wakuRlnRelay.groupManager) let idCredentials = generateCredentials() - try: - waitFor manager.register(idCredentials, UserMessageLimit(20)) - except Exception, CatchableError: - assert false, - "exception raised when calling register: " & getCurrentExceptionMsg() + (waitFor manager.register(idCredentials, UserMessageLimit(20))).isOkOr: + assert false, "error returned when calling register: " & error let epoch1 = wakuRlnRelay.getCurrentEpoch() @@ -301,11 +298,8 @@ suite "Waku rln relay": let manager = cast[OnchainGroupManager](wakuRlnRelay.groupManager) let idCredentials = generateCredentials() - try: - waitFor manager.register(idCredentials, UserMessageLimit(20)) - except Exception, CatchableError: - assert false, - "exception raised when calling register: " & getCurrentExceptionMsg() + (waitFor manager.register(idCredentials, UserMessageLimit(20))).isOkOr: + assert false, "error returned when calling register: " & error # usually it's 20 seconds but we set it to 1 for testing purposes which make the test faster wakuRlnRelay.rlnMaxTimestampGap = 1 @@ -353,11 +347,9 @@ suite "Waku rln relay": let manager1 = cast[OnchainGroupManager](wakuRlnRelay1.groupManager) let idCredentials1 = generateCredentials() - try: - waitFor manager1.register(idCredentials1, UserMessageLimit(20)) - except Exception, CatchableError: + (waitFor manager1.register(idCredentials1, UserMessageLimit(20))).isOkOr: assert false, - "exception raised when calling register: " & getCurrentExceptionMsg() + "error returned when calling register: " & error let index2 = MembershipIndex(6) let rlnConf2 = getWakuRlnConfig(manager = manager, index = index2) @@ -369,11 +361,9 @@ suite "Waku rln relay": let manager2 = cast[OnchainGroupManager](wakuRlnRelay2.groupManager) let idCredentials2 = generateCredentials() - try: - waitFor manager2.register(idCredentials2, UserMessageLimit(20)) - except Exception, CatchableError: + (waitFor manager2.register(idCredentials2, UserMessageLimit(20))).isOkOr: assert false, - "exception raised when calling register: " & getCurrentExceptionMsg() + "error returned when calling register: " & error # get the current epoch time let epoch = wakuRlnRelay1.getCurrentEpoch() diff --git a/tests/waku_rln_relay/test_wakunode_rln_relay.nim b/tests/waku_rln_relay/test_wakunode_rln_relay.nim index fcf97a671..79a4d6711 100644 --- a/tests/waku_rln_relay/test_wakunode_rln_relay.nim +++ b/tests/waku_rln_relay/test_wakunode_rln_relay.nim @@ -58,11 +58,8 @@ procSuite "WakuNode - RLN relay": let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager) let idCredentials1 = generateCredentials() - try: - waitFor manager1.register(idCredentials1, UserMessageLimit(20)) - except Exception, CatchableError: - assert false, - "exception raised when calling register: " & getCurrentExceptionMsg() + (waitFor manager1.register(idCredentials1, UserMessageLimit(20))).isOkOr: + assert false, "error returned when calling register: " & error let rootUpdated1 = waitFor manager1.updateRoots() info "Updated root for node1", rootUpdated1 @@ -172,11 +169,8 @@ procSuite "WakuNode - RLN relay": let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager) let idCredentials1 = generateCredentials() - try: - waitFor manager1.register(idCredentials1, UserMessageLimit(20)) - except Exception, CatchableError: - assert false, - "exception raised when calling register: " & getCurrentExceptionMsg() + (waitFor manager1.register(idCredentials1, UserMessageLimit(20))).isOkOr: + assert false, "error returned when calling register: " & error let rootUpdated1 = waitFor manager1.updateRoots() info "Updated root for node", node = 1, rootUpdated = rootUpdated1 @@ -192,11 +186,8 @@ procSuite "WakuNode - RLN relay": let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager) let idCredentials2 = generateCredentials() - try: - waitFor manager2.register(idCredentials2, UserMessageLimit(20)) - except Exception, CatchableError: - assert false, - "exception raised when calling register: " & getCurrentExceptionMsg() + (waitFor manager2.register(idCredentials2, UserMessageLimit(20))).isOkOr: + assert false, "error returned when calling register: " & error let rootUpdated2 = waitFor manager2.updateRoots() info "Updated root for node", node = 2, rootUpdated = rootUpdated2 @@ -212,11 +203,8 @@ procSuite "WakuNode - RLN relay": let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager) let idCredentials3 = generateCredentials() - try: - waitFor manager3.register(idCredentials3, UserMessageLimit(20)) - except Exception, CatchableError: - assert false, - "exception raised when calling register: " & getCurrentExceptionMsg() + (waitFor manager3.register(idCredentials3, UserMessageLimit(20))).isOkOr: + assert false, "error returned when calling register: " & error let rootUpdated3 = waitFor manager3.updateRoots() info "Updated root for node", node = 3, rootUpdated = rootUpdated3 @@ -333,11 +321,8 @@ procSuite "WakuNode - RLN relay": let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager) let idCredentials1 = generateCredentials() - try: - waitFor manager1.register(idCredentials1, UserMessageLimit(20)) - except Exception, CatchableError: - assert false, - "exception raised when calling register: " & getCurrentExceptionMsg() + (waitFor manager1.register(idCredentials1, UserMessageLimit(20))).isOkOr: + assert false, "error returned when calling register: " & error let rootUpdated1 = waitFor manager1.updateRoots() info "Updated root for node1", rootUpdated1 @@ -448,11 +433,8 @@ procSuite "WakuNode - RLN relay": let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager) let idCredentials1 = generateCredentials() - try: - waitFor manager1.register(idCredentials1, UserMessageLimit(20)) - except Exception, CatchableError: - assert false, - "exception raised when calling register: " & getCurrentExceptionMsg() + (waitFor manager1.register(idCredentials1, UserMessageLimit(20))).isOkOr: + assert false, "error returned when calling register: " & error let rootUpdated1 = waitFor manager1.updateRoots() info "Updated root for node1", rootUpdated1 @@ -620,11 +602,8 @@ procSuite "WakuNode - RLN relay": let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager) let idCredentials1 = generateCredentials() - try: - waitFor manager1.register(idCredentials1, UserMessageLimit(20)) - except Exception, CatchableError: - assert false, - "exception raised when calling register: " & getCurrentExceptionMsg() + (waitFor manager1.register(idCredentials1, UserMessageLimit(20))).isOkOr: + assert false, "error returned when calling register: " & error let rootUpdated1 = waitFor manager1.updateRoots() info "Updated root for node1", rootUpdated1 diff --git a/tests/wakunode_rest/test_rest_relay.nim b/tests/wakunode_rest/test_rest_relay.nim index f16e5c4f4..efdd597ba 100644 --- a/tests/wakunode_rest/test_rest_relay.nim +++ b/tests/wakunode_rest/test_rest_relay.nim @@ -42,8 +42,8 @@ suite "Waku v2 Rest API - Relay": var manager {.threadVar.}: OnchainGroupManager setup: - anvilProc = runAnvil() - manager = waitFor setupOnchainGroupManager() + anvilProc = runAnvil(stateFile = some(DEFAULT_ANVIL_STATE_PATH)) + manager = waitFor setupOnchainGroupManager(deployContracts = false) teardown: stopAnvil(anvilProc) @@ -268,11 +268,8 @@ suite "Waku v2 Rest API - Relay": let manager = cast[OnchainGroupManager](node.wakuRlnRelay.groupManager) let idCredentials = generateCredentials() - try: - waitFor manager.register(idCredentials, UserMessageLimit(20)) - except Exception, CatchableError: - assert false, - "exception raised when calling register: " & getCurrentExceptionMsg() + (waitFor manager.register(idCredentials, UserMessageLimit(20))).isOkOr: + assert false, "Failed to register identity credentials" & getCurrentExceptionMsg() let rootUpdated = waitFor manager.updateRoots() info "Updated root for node", rootUpdated @@ -545,11 +542,8 @@ suite "Waku v2 Rest API - Relay": let manager = cast[OnchainGroupManager](node.wakuRlnRelay.groupManager) let idCredentials = generateCredentials() - try: - waitFor manager.register(idCredentials, UserMessageLimit(20)) - except Exception, CatchableError: - assert false, - "exception raised when calling register: " & getCurrentExceptionMsg() + (waitFor manager.register(idCredentials, UserMessageLimit(20))).isOkOr: + assert false, "Failed to register identity credentials" & getCurrentExceptionMsg() let rootUpdated = waitFor manager.updateRoots() info "Updated root for node", rootUpdated @@ -617,11 +611,8 @@ suite "Waku v2 Rest API - Relay": let manager = cast[OnchainGroupManager](node.wakuRlnRelay.groupManager) let idCredentials = generateCredentials() - try: - waitFor manager.register(idCredentials, UserMessageLimit(20)) - except Exception, CatchableError: - assert false, - "exception raised when calling register: " & getCurrentExceptionMsg() + (waitFor manager.register(idCredentials, UserMessageLimit(20))).isOkOr: + assert false, "Failed to register identity credentials" & getCurrentExceptionMsg() let rootUpdated = waitFor manager.updateRoots() info "Updated root for node", rootUpdated @@ -679,11 +670,8 @@ suite "Waku v2 Rest API - Relay": let manager = cast[OnchainGroupManager](node.wakuRlnRelay.groupManager) let idCredentials = generateCredentials() - try: - waitFor manager.register(idCredentials, UserMessageLimit(20)) - except Exception, CatchableError: - assert false, - "exception raised when calling register: " & getCurrentExceptionMsg() + (waitFor manager.register(idCredentials, UserMessageLimit(20))).isOkOr: + assert false, "Failed to register identity credentials" & getCurrentExceptionMsg() let rootUpdated = waitFor manager.updateRoots() info "Updated root for node", rootUpdated @@ -754,11 +742,8 @@ suite "Waku v2 Rest API - Relay": let manager = cast[OnchainGroupManager](node.wakuRlnRelay.groupManager) let idCredentials = generateCredentials() - try: - waitFor manager.register(idCredentials, UserMessageLimit(20)) - except Exception, CatchableError: - assert false, - "exception raised when calling register: " & getCurrentExceptionMsg() + (waitFor manager.register(idCredentials, UserMessageLimit(20))).isOkOr: + assert false, "Failed to register identity credentials" & getCurrentExceptionMsg() let rootUpdated = waitFor manager.updateRoots() info "Updated root for node", rootUpdated diff --git a/tools/rln_keystore_generator/rln_keystore_generator.nim b/tools/rln_keystore_generator/rln_keystore_generator.nim index 85df37982..503e8d58e 100644 --- a/tools/rln_keystore_generator/rln_keystore_generator.nim +++ b/tools/rln_keystore_generator/rln_keystore_generator.nim @@ -73,7 +73,9 @@ proc doRlnKeystoreGenerator*(conf: RlnKeystoreGeneratorConf) = # 4. register on-chain try: - waitFor groupManager.register(credential, conf.userMessageLimit) + (waitFor groupManager.register(credential, conf.userMessageLimit)).isOkOr: + error "Failed to register on-chain", error = error + quit(QuitFailure) except Exception, CatchableError: error "failure while registering credentials on-chain", error = getCurrentExceptionMsg() diff --git a/waku/waku_rln_relay/group_manager/group_manager_base.nim b/waku/waku_rln_relay/group_manager/group_manager_base.nim index de2962e42..9c088d4c5 100644 --- a/waku/waku_rln_relay/group_manager/group_manager_base.nim +++ b/waku/waku_rln_relay/group_manager/group_manager_base.nim @@ -144,6 +144,4 @@ method generateProof*( return err("generateProof is not implemented") method isReady*(g: GroupManager): Future[bool] {.base, async.} = - raise newException( - CatchableError, "isReady proc for " & $g.type & " is not implemented yet" - ) + return true diff --git a/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim b/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim index 2ce7d4423..2e4882891 100644 --- a/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim +++ b/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim @@ -50,109 +50,85 @@ type proc fetchMerkleProofElements*( g: OnchainGroupManager ): Future[Result[seq[byte], string]] {.async.} = - try: - let membershipIndex = g.membershipIndex.get() - let index40 = stuint(membershipIndex, 40) + let membershipIndex = g.membershipIndex.get() + let index40 = stuint(membershipIndex, 40) - let methodSig = "getMerkleProof(uint40)" - var paddedParam = newSeq[byte](32) - let indexBytes = index40.toBytesBE() - for i in 0 ..< min(indexBytes.len, paddedParam.len): - paddedParam[paddedParam.len - indexBytes.len + i] = indexBytes[i] + let methodSig = "getMerkleProof(uint40)" + var paddedParam = newSeq[byte](32) + let indexBytes = index40.toBytesBE() + for i in 0 ..< min(indexBytes.len, paddedParam.len): + paddedParam[paddedParam.len - indexBytes.len + i] = indexBytes[i] - let response = await sendEthCallWithParams( - ethRpc = g.ethRpc.get(), - functionSignature = methodSig, - params = paddedParam, - fromAddress = g.ethRpc.get().defaultAccount, - toAddress = fromHex(Address, g.ethContractAddress), - chainId = g.chainId, - ) + let response = await sendEthCallWithParams( + ethRpc = g.ethRpc.get(), + functionSignature = methodSig, + params = paddedParam, + fromAddress = g.ethRpc.get().defaultAccount, + toAddress = fromHex(Address, g.ethContractAddress), + chainId = g.chainId, + ) - return response - except CatchableError: - error "Failed to fetch Merkle proof elements", error = getCurrentExceptionMsg() - return err("Failed to fetch merkle proof elements: " & getCurrentExceptionMsg()) + return response proc fetchMerkleRoot*( g: OnchainGroupManager ): Future[Result[UInt256, string]] {.async.} = - try: - let merkleRoot = await sendEthCallWithoutParams( - ethRpc = g.ethRpc.get(), - functionSignature = "root()", - fromAddress = g.ethRpc.get().defaultAccount, - toAddress = fromHex(Address, g.ethContractAddress), - chainId = g.chainId, - ) - return merkleRoot - except CatchableError: - error "Failed to fetch Merkle root", error = getCurrentExceptionMsg() - return err("Failed to fetch merkle root: " & getCurrentExceptionMsg()) + let merkleRoot = await sendEthCallWithoutParams( + ethRpc = g.ethRpc.get(), + functionSignature = "root()", + fromAddress = g.ethRpc.get().defaultAccount, + toAddress = fromHex(Address, g.ethContractAddress), + chainId = g.chainId, + ) + return merkleRoot proc fetchNextFreeIndex*( g: OnchainGroupManager ): Future[Result[UInt256, string]] {.async.} = - try: - let nextFreeIndex = await sendEthCallWithoutParams( - ethRpc = g.ethRpc.get(), - functionSignature = "nextFreeIndex()", - fromAddress = g.ethRpc.get().defaultAccount, - toAddress = fromHex(Address, g.ethContractAddress), - chainId = g.chainId, - ) - return nextFreeIndex - except CatchableError: - error "Failed to fetch next free index", error = getCurrentExceptionMsg() - return err("Failed to fetch next free index: " & getCurrentExceptionMsg()) + let nextFreeIndex = await sendEthCallWithoutParams( + ethRpc = g.ethRpc.get(), + functionSignature = "nextFreeIndex()", + fromAddress = g.ethRpc.get().defaultAccount, + toAddress = fromHex(Address, g.ethContractAddress), + chainId = g.chainId, + ) + return nextFreeIndex proc fetchMembershipStatus*( g: OnchainGroupManager, idCommitment: IDCommitment ): Future[Result[bool, string]] {.async.} = - try: - let params = idCommitment.reversed() - let responseBytes = ( - await sendEthCallWithParams( - ethRpc = g.ethRpc.get(), - functionSignature = "isInMembershipSet(uint256)", - params = params, - fromAddress = g.ethRpc.get().defaultAccount, - toAddress = fromHex(Address, g.ethContractAddress), - chainId = g.chainId, - ) - ).valueOr: - return err("Failed to check membership: " & error) - - return ok(responseBytes.len == 32 and responseBytes[^1] == 1'u8) - except CatchableError: - error "Failed to fetch membership set membership", error = getCurrentExceptionMsg() - return err("Failed to fetch membership set membership: " & getCurrentExceptionMsg()) - -proc fetchMaxMembershipRateLimit*( - g: OnchainGroupManager -): Future[Result[UInt256, string]] {.async.} = - try: - let maxMembershipRateLimit = await sendEthCallWithoutParams( + let params = idCommitment.reversed() + let responseBytes = ( + await sendEthCallWithParams( ethRpc = g.ethRpc.get(), - functionSignature = "maxMembershipRateLimit()", + functionSignature = "isInMembershipSet(uint256)", + params = params, fromAddress = g.ethRpc.get().defaultAccount, toAddress = fromHex(Address, g.ethContractAddress), chainId = g.chainId, ) - return maxMembershipRateLimit - except CatchableError: - error "Failed to fetch max membership rate limit", error = getCurrentExceptionMsg() - return err("Failed to fetch max membership rate limit: " & getCurrentExceptionMsg()) + ).valueOr: + return err("Failed to check membership: " & error) -template initializedGuard(g: OnchainGroupManager): untyped = + return ok(responseBytes.len == 32 and responseBytes[^1] == 1'u8) + +proc fetchMaxMembershipRateLimit*( + g: OnchainGroupManager +): Future[Result[UInt256, string]] {.async.} = + let maxMembershipRateLimit = await sendEthCallWithoutParams( + ethRpc = g.ethRpc.get(), + functionSignature = "maxMembershipRateLimit()", + fromAddress = g.ethRpc.get().defaultAccount, + toAddress = fromHex(Address, g.ethContractAddress), + chainId = g.chainId, + ) + + return maxMembershipRateLimit + +proc checkInitialized(g: OnchainGroupManager): Result[void, string] = if not g.initialized: - raise newException(CatchableError, "OnchainGroupManager is not initialized") - -template retryWrapper( - g: OnchainGroupManager, res: auto, errStr: string, body: untyped -): auto = - retryWrapper(res, RetryStrategy.new(), errStr, g.onFatalErrorAction): - body + return err("OnchainGroupManager is not initialized") + return ok() proc updateRoots*(g: OnchainGroupManager): Future[bool] {.async.} = let rootRes = (await g.fetchMerkleRoot()).valueOr: @@ -172,40 +148,37 @@ proc updateRoots*(g: OnchainGroupManager): Future[bool] {.async.} = return false -proc trackRootChanges*(g: OnchainGroupManager) {.async: (raises: [CatchableError]).} = - try: - initializedGuard(g) - const rpcDelay = 5.seconds +proc trackRootChanges*(g: OnchainGroupManager): Future[Result[void, string]] {.async.} = + ?checkInitialized(g) - while true: - await sleepAsync(rpcDelay) - let rootUpdated = await g.updateRoots() + const rpcDelay = 5.seconds - if rootUpdated: - ## The membership set on-chain has changed (some new members have joined or some members have left) - if g.membershipIndex.isSome(): - ## A membership index exists only if the node has registered with RLN. - ## Non-registered nodes cannot have Merkle proof elements. - let proofResult = await g.fetchMerkleProofElements() - if proofResult.isErr(): - error "Failed to fetch Merkle proof", error = proofResult.error - else: - g.merkleProofCache = proofResult.get() + while true: + await sleepAsync(rpcDelay) + let rootUpdated = await g.updateRoots() - let nextFreeIndex = (await g.fetchNextFreeIndex()).valueOr: - error "Failed to fetch next free index", error = error - raise - newException(CatchableError, "Failed to fetch next free index: " & error) + if rootUpdated: + ## The membership set on-chain has changed (some new members have joined or some members have left) + if g.membershipIndex.isSome(): + ## A membership index exists only if the node has registered with RLN. + ## Non-registered nodes cannot have Merkle proof elements. + let proofResult = await g.fetchMerkleProofElements() + if proofResult.isErr(): + error "Failed to fetch Merkle proof", error = proofResult.error + else: + g.merkleProofCache = proofResult.get() - let memberCount = cast[int64](nextFreeIndex) - waku_rln_number_registered_memberships.set(float64(memberCount)) - except CatchableError: - error "Fatal error in trackRootChanges", error = getCurrentExceptionMsg() + let nextFreeIndex = (await g.fetchNextFreeIndex()).valueOr: + error "Failed to fetch next free index", error = error + return err("Failed to fetch next free index: " & error) + + let memberCount = cast[int64](nextFreeIndex) + waku_rln_number_registered_memberships.set(float64(memberCount)) method register*( g: OnchainGroupManager, rateCommitment: RateCommitment -): Future[void] {.async: (raises: [Exception]).} = - initializedGuard(g) +): Future[Result[void, string]] {.async.} = + ?checkInitialized(g) try: let leaf = rateCommitment.toLeaf().get() @@ -214,33 +187,40 @@ method register*( info "registering member via callback", rateCommitment = leaf, index = idx await g.registerCb.get()(@[Membership(rateCommitment: leaf, index: idx)]) g.latestIndex.inc() - except CatchableError: - raise newException(ValueError, getCurrentExceptionMsg()) + except Exception as e: + return err("Failed to call register callback: " & e.msg) + + return ok() method register*( g: OnchainGroupManager, identityCredential: IdentityCredential, userMessageLimit: UserMessageLimit, -): Future[void] {.async: (raises: [Exception]).} = - initializedGuard(g) +): Future[Result[void, string]] {.async.} = + ?checkInitialized(g) let ethRpc = g.ethRpc.get() let wakuRlnContract = g.wakuRlnContract.get() - var gasPrice: int - g.retryWrapper(gasPrice, "Failed to get gas price"): - let fetchedGasPrice = uint64(await ethRpc.provider.eth_gasPrice()) - ## Multiply by 2 to speed up the transaction - ## Check for overflow when casting to int - if fetchedGasPrice > uint64(high(int) div 2): - warn "Gas price overflow detected, capping at maximum int value", - fetchedGasPrice = fetchedGasPrice, maxInt = high(int) - high(int) - else: - let calculatedGasPrice = int(fetchedGasPrice) * 2 - debug "Gas price calculated", - fetchedGasPrice = fetchedGasPrice, gasPrice = calculatedGasPrice - calculatedGasPrice + let gasPrice = ( + await retryWrapper( + RetryStrategy.new(), + "Failed to get gas price", + proc(): Future[int] {.async.} = + let fetchedGasPrice = uint64(await ethRpc.provider.eth_gasPrice()) + if fetchedGasPrice > uint64(high(int) div 2): + warn "Gas price overflow detected, capping at maximum int value", + fetchedGasPrice = fetchedGasPrice, maxInt = high(int) + return high(int) + else: + let calculatedGasPrice = int(fetchedGasPrice) * 2 + debug "Gas price calculated", + fetchedGasPrice = fetchedGasPrice, gasPrice = calculatedGasPrice + return calculatedGasPrice, + ) + ).valueOr: + return err("Failed to get gas price: " & error) + let idCommitmentHex = identityCredential.idCommitment.inHex() debug "identityCredential idCommitmentHex", idCommitment = idCommitmentHex let idCommitment = identityCredential.idCommitment.toUInt256() @@ -249,27 +229,37 @@ method register*( idCommitment = idCommitment, userMessageLimit = userMessageLimit, idCommitmentsToErase = idCommitmentsToErase - var txHash: TxHash - g.retryWrapper(txHash, "Failed to register the member"): - await wakuRlnContract - .register(idCommitment, userMessageLimit.stuint(32), idCommitmentsToErase) - .send(gasPrice = gasPrice) + let txHash = ( + await retryWrapper( + RetryStrategy.new(), + "Failed to register the member", + proc(): Future[TxHash] {.async.} = + return await wakuRlnContract + .register(idCommitment, userMessageLimit.stuint(32), idCommitmentsToErase) + .send(gasPrice = gasPrice), + ) + ).valueOr: + return err("Failed to register member: " & error) # wait for the transaction to be mined - var tsReceipt: ReceiptObject - g.retryWrapper(tsReceipt, "Failed to get the transaction receipt"): - await ethRpc.getMinedTransactionReceipt(txHash) + let tsReceipt = ( + await retryWrapper( + RetryStrategy.new(), + "Failed to get the transaction receipt", + proc(): Future[ReceiptObject] {.async.} = + return await ethRpc.getMinedTransactionReceipt(txHash), + ) + ).valueOr: + return err("Failed to get transaction receipt: " & error) debug "registration transaction mined", txHash = txHash g.registrationTxHash = some(txHash) # the receipt topic holds the hash of signature of the raised events debug "ts receipt", receipt = tsReceipt[] if tsReceipt.status.isNone(): - raise newException(ValueError, "Transaction failed: status is None") + return err("Transaction failed: status is None") if tsReceipt.status.get() != 1.Quantity: - raise newException( - ValueError, "Transaction failed with status: " & $tsReceipt.status.get() - ) + return err("Transaction failed with status: " & $tsReceipt.status.get()) ## Search through all transaction logs to find the MembershipRegistered event let expectedEventSignature = cast[FixedBytes[32]](keccak.keccak256.digest( @@ -283,9 +273,7 @@ method register*( break if membershipRegisteredLog.isNone(): - raise newException( - ValueError, "register: MembershipRegistered event not found in transaction logs" - ) + return err("register: MembershipRegistered event not found in transaction logs") let registrationLog = membershipRegisteredLog.get() @@ -309,20 +297,28 @@ method register*( if g.registerCb.isSome(): let member = Membership(rateCommitment: rateCommitment, index: g.latestIndex) - await g.registerCb.get()(@[member]) + try: + await g.registerCb.get()(@[member]) + except Exception as e: + return err("Failed to call register callback: " & e.msg) g.latestIndex.inc() - return + return ok() method withdraw*( g: OnchainGroupManager, idCommitment: IDCommitment -): Future[void] {.async: (raises: [Exception]).} = - initializedGuard(g) # TODO: after slashing is enabled on the contract +): Future[Result[void, string]] {.async.} = + checkInitialized(g).isOkOr: + return err(error) + return ok() method withdrawBatch*( g: OnchainGroupManager, idCommitments: seq[IDCommitment] -): Future[void] {.async: (raises: [Exception]).} = - initializedGuard(g) +): Future[Result[void, string]] {.async.} = + checkInitialized(g).isOkOr: + return err(error) + + return ok() proc getRootFromProofAndIndex( g: OnchainGroupManager, elements: seq[byte], bits: seq[byte] @@ -354,7 +350,7 @@ method generateProof*( epoch: Epoch, messageId: MessageId, rlnIdentifier = DefaultRlnIdentifier, -): GroupManagerResult[RateLimitProof] {.gcsafe, raises: [].} = +): GroupManagerResult[RateLimitProof] {.gcsafe.} = ## Generates an RLN proof using the cached Merkle proof and custom witness # Ensure identity credentials and membership index are set if g.idCredentials.isNone(): @@ -452,7 +448,7 @@ method generateProof*( method verifyProof*( g: OnchainGroupManager, input: seq[byte], proof: RateLimitProof -): GroupManagerResult[bool] {.gcsafe, raises: [].} = +): GroupManagerResult[bool] {.gcsafe.} = ## -- Verifies an RLN rate-limit proof against the set of valid Merkle roots -- var normalizedProof = proof @@ -492,25 +488,31 @@ method onWithdraw*(g: OnchainGroupManager, cb: OnWithdrawCallback) {.gcsafe.} = proc establishConnection( g: OnchainGroupManager ): Future[GroupManagerResult[Web3]] {.async.} = - var ethRpc: Web3 + let ethRpc = ( + await retryWrapper( + RetryStrategy.new(), + "Failed to connect to the Ethereum client", + proc(): Future[Web3] {.async.} = + var innerEthRpc: Web3 + var connected = false + for clientUrl in g.ethClientUrls: + ## We give a chance to the user to provide multiple clients + ## and we try to connect to each of them + try: + innerEthRpc = await newWeb3(clientUrl) + connected = true + break + except CatchableError: + error "failed connect Eth client", error = getCurrentExceptionMsg() - g.retryWrapper(ethRpc, "Failed to connect to the Ethereum client"): - var innerEthRpc: Web3 - var connected = false - for clientUrl in g.ethClientUrls: - ## We give a chance to the user to provide multiple clients - ## and we try to connect to each of them - try: - innerEthRpc = await newWeb3(clientUrl) - connected = true - break - except CatchableError: - error "failed connect Eth client", error = getCurrentExceptionMsg() + ## this exception is handled by the retrywrapper + if not connected: + raise newException(CatchableError, "all failed") - if not connected: - raise newException(CatchableError, "all failed") - - innerEthRpc + return innerEthRpc, + ) + ).valueOr: + return err("Failed to establish Ethereum connection: " & error) return ok(ethRpc) @@ -519,9 +521,15 @@ method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.} let ethRpc: Web3 = (await establishConnection(g)).valueOr: return err("failed to connect to Ethereum clients: " & $error) - var fetchedChainId: UInt256 - g.retryWrapper(fetchedChainId, "Failed to get the chain id"): - await ethRpc.provider.eth_chainId() + let fetchedChainId = ( + await retryWrapper( + RetryStrategy.new(), + "Failed to get the chain id", + proc(): Future[UInt256] {.async.} = + return await ethRpc.provider.eth_chainId(), + ) + ).valueOr: + return err("Failed to get chain id: " & error) # Set the chain id if g.chainId == 0: @@ -595,8 +603,10 @@ method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.} proc onDisconnect() {.async.} = error "Ethereum client disconnected" - var newEthRpc: Web3 = (await g.establishConnection()).valueOr: - g.onFatalErrorAction("failed to connect to Ethereum clients onDisconnect") + let newEthRpc: Web3 = (await g.establishConnection()).valueOr: + error "Fatal: failed to reconnect to Ethereum clients after disconnect", + error = error + g.onFatalErrorAction("failed to reconnect to Ethereum clients: " & error) return newEthRpc.ondisconnect = ethRpc.ondisconnect @@ -616,12 +626,14 @@ method stop*(g: OnchainGroupManager): Future[void] {.async, gcsafe.} = g.initialized = false method isReady*(g: OnchainGroupManager): Future[bool] {.async.} = - initializedGuard(g) + checkInitialized(g).isOkOr: + return false if g.ethRpc.isNone(): + error "Ethereum RPC client is not configured" return false if g.wakuRlnContract.isNone(): + error "Waku RLN contract is not configured" return false - return true diff --git a/waku/waku_rln_relay/group_manager/on_chain/retry_wrapper.nim b/waku/waku_rln_relay/group_manager/on_chain/retry_wrapper.nim index df8716279..97bc0c435 100644 --- a/waku/waku_rln_relay/group_manager/on_chain/retry_wrapper.nim +++ b/waku/waku_rln_relay/group_manager/on_chain/retry_wrapper.nim @@ -1,36 +1,31 @@ -import ../../../common/error_handling import chronos import results +const + DefaultRetryDelay* = 4000.millis + DefaultRetryCount* = 15'u + type RetryStrategy* = object - shouldRetry*: bool retryDelay*: Duration retryCount*: uint proc new*(T: type RetryStrategy): RetryStrategy = - return RetryStrategy(shouldRetry: true, retryDelay: 4000.millis, retryCount: 15) + return RetryStrategy(retryDelay: DefaultRetryDelay, retryCount: DefaultRetryCount) -template retryWrapper*( - res: auto, - retryStrategy: RetryStrategy, - errStr: string, - errCallback: OnFatalErrorHandler, - body: untyped, -): auto = - if errCallback == nil: - raise newException(CatchableError, "Ensure that the errCallback is set") +proc retryWrapper*[T]( + retryStrategy: RetryStrategy, errStr: string, body: proc(): Future[T] {.async.} +): Future[Result[T, string]] {.async.} = var retryCount = retryStrategy.retryCount - var shouldRetry = retryStrategy.shouldRetry - var exceptionMessage = "" + var lastError = "" - while shouldRetry and retryCount > 0: + while retryCount > 0: try: - res = body - shouldRetry = false - except: + let value = await body() + return ok(value) + except CatchableError as e: retryCount -= 1 - exceptionMessage = getCurrentExceptionMsg() - await sleepAsync(retryStrategy.retryDelay) - if shouldRetry: - errCallback(errStr & ": " & exceptionMessage) - return + lastError = e.msg + if retryCount > 0: + await sleepAsync(retryStrategy.retryDelay) + + return err(errStr & ": " & lastError) diff --git a/waku/waku_rln_relay/rln_relay.nim b/waku/waku_rln_relay/rln_relay.nim index 5c893e2a2..8559dcd66 100644 --- a/waku/waku_rln_relay/rln_relay.nim +++ b/waku/waku_rln_relay/rln_relay.nim @@ -68,7 +68,7 @@ type WakuRLNRelay* = ref object of RootObj onFatalErrorAction*: OnFatalErrorHandler nonceManager*: NonceManager epochMonitorFuture*: Future[void] - rootChangesFuture*: Future[void] + rootChangesFuture*: Future[Result[void, string]] brokerCtx*: BrokerContext proc calcEpoch*(rlnPeer: WakuRLNRelay, t: float64): Epoch = @@ -467,7 +467,7 @@ proc mount( return ok(wakuRlnRelay) -proc isReady*(rlnPeer: WakuRLNRelay): Future[bool] {.async: (raises: [Exception]).} = +proc isReady*(rlnPeer: WakuRLNRelay): Future[bool] {.async.} = ## returns true if the rln-relay protocol is ready to relay messages ## returns false otherwise From 6a1cf578ef65e427a66a1e4eec24b57b35900ef5 Mon Sep 17 00:00:00 2001 From: Ivan Folgueira Bande Date: Fri, 13 Mar 2026 12:10:40 +0100 Subject: [PATCH 5/5] Revert "Release : patch release v0.37.1-beta (#3661)" We are going to update the CHANGELOG with another PR today This reverts commit 868d43164e9b5ad0c3a856e872448e9e80531e0c. --- CHANGELOG.md | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3c80a3b79..61e818afd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,10 +1,4 @@ -## v0.37.1-beta (2025-12-10) - -### Bug Fixes - -- Remove ENR cache from peer exchange ([#3652](https://github.com/logos-messaging/logos-messaging-nim/pull/3652)) ([7920368a](https://github.com/logos-messaging/logos-messaging-nim/commit/7920368a36687cd5f12afa52d59866792d8457ca)) - -## v0.37.0-beta (2025-10-01) +## v0.37.0 (2025-10-01) ### Notes