diff --git a/.github/ISSUE_TEMPLATE/prepare_beta_release.md b/.github/ISSUE_TEMPLATE/prepare_beta_release.md new file mode 100644 index 000000000..3c4e76854 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/prepare_beta_release.md @@ -0,0 +1,63 @@ +--- +name: Prepare Beta Release +about: Execute tasks for the creation and publishing of a new beta release +title: 'Prepare beta release 0.0.0' +labels: beta-release +assignees: '' + +--- + + + +### Items to complete + +All items below are to be completed by the owner of the given release. + +- [ ] Create release branch with major and minor only ( e.g. release/v0.X ) if it doesn't exist. +- [ ] Assign release candidate tag to the release branch HEAD (e.g. `v0.X.0-beta-rc.0`, `v0.X.0-beta-rc.1`, ... `v0.X.0-beta-rc.N`). +- [ ] Generate and edit release notes in CHANGELOG.md. + +- [ ] **Validation of release candidate** + - [ ] **Automated testing** + - [ ] Ensure all the unit tests (specifically logos-messaging-js tests) are green against the release candidate. + - [ ] **Waku fleet testing** + - [ ] Deploy the release candidate to `waku.test` through [deploy-waku-test job](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-test/) and wait for it to finish (Jenkins access required; ask the infra team if you don't have it). + - After completion, disable fleet so that daily CI does not override your release candidate. + - Verify at https://fleets.waku.org/ that the fleet is locked to the release candidate image. + - Confirm the container image exists on [Harbor](https://harbor.status.im/harbor/projects/9/repositories/nwaku/artifacts-tab). + - [ ] Search [Kibana logs](https://kibana.infra.status.im/app/discover) from the previous month (since the last release was deployed) for possible crashes or errors in `waku.test`. + - Set time range to "Last 30 days" (or since last release). + - Most relevant search query: `(fleet: "waku.test" AND message: "SIGSEGV")`, `(fleet: "waku.test" AND message: "exception")`, `(fleet: "waku.test" AND message: "error")`. + - Document any crashes or errors found. + - [ ] If `waku.test` validation is successful, deploy to `waku.sandbox` using the [deploy-waku-sandbox job](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-sandbox/). + - [ ] Search [Kibana logs](https://kibana.infra.status.im/app/discover) for `waku.sandbox`: `(fleet: "waku.sandbox" AND message: "SIGSEGV")`, `(fleet: "waku.sandbox" AND message: "exception")`, `(fleet: "waku.sandbox" AND message: "error")`. most probably if there are no crashes or errors in `waku.test`, there will be no crashes or errors in `waku.sandbox`. + - [ ] Enable the `waku.test` fleet again to resume auto-deployment of the latest `master` commit. + +- [ ] **Proceed with release** + + - [ ] Assign a final release tag (`v0.X.0-beta`) to the same commit that contains the validated release-candidate tag (e.g. `v0.X.0-beta-rc.N`) and submit a PR from the release branch to `master`. + - [ ] Update [logos-delivery-compose](https://github.com/logos-messaging/logos-delivery-compose) and [logos-delivery-simulator](https://github.com/logos-messaging/waku-simulator) according to the new release. + - [ ] Bump logos-delivery dependency in [logos-delivery-rust-bindings](https://github.com/logos-messaging/logos-delivery-rust-bindings) and make sure all examples and tests work. + - [ ] Bump logos-delivery dependency in [logos-delivery-go-bindings](https://github.com/logos-messaging/logos-delivery-go-bindings) and make sure all tests work. + - [ ] Create GitHub release (https://github.com/logos-messaging/logos-delivery/releases). + - [ ] Submit a PR to merge the release branch back to `master`. Make sure you use the option "Merge pull request (Create a merge commit)" to perform the merge. Ping repo admin if this option is not available. + +- [ ] **Promote release to fleets** + - [ ] Ask the PM lead to announce the release. + - [ ] Update infra config with any deprecated arguments or changed options. + - [ ] Update waku.sandbox with [this deployment job](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-sandbox/). + +### Links + +- [Release process](https://github.com/logos-messaging/logos-delivery/blob/master/docs/contributors/release-process.md) +- [Release notes](https://github.com/logos-messaging/logos-delivery/blob/master/CHANGELOG.md) +- [Fleet ownership](https://www.notion.so/Fleet-Ownership-7532aad8896d46599abac3c274189741?pvs=4#d2d2f0fe4b3c429fbd860a1d64f89a64) +- [Infra-nim-waku](https://github.com/status-im/infra-nim-waku) +- [Jenkins](https://ci.infra.status.im/job/nim-waku/) +- [Fleets](https://fleets.waku.org/) +- [Harbor](https://harbor.status.im/harbor/projects/9/repositories/nwaku/artifacts-tab) +- [Kibana](https://kibana.infra.status.im/app/) diff --git a/.github/ISSUE_TEMPLATE/prepare_full_release.md b/.github/ISSUE_TEMPLATE/prepare_full_release.md new file mode 100644 index 000000000..4df808bd4 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/prepare_full_release.md @@ -0,0 +1,83 @@ +--- +name: Prepare Full Release +about: Execute tasks for the creation and publishing of a new full release +title: 'Prepare full release 0.0.0' +labels: full-release +assignees: '' + +--- + + + +### Items to complete + +All items below are to be completed by the owner of the given release. + +- [ ] Create release branch with major and minor only ( e.g. release/v0.X ) if it doesn't exist. +- [ ] Assign release candidate tag to the release branch HEAD (e.g. `v0.X.0-rc.0`, `v0.X.0-rc.1`, ... `v0.X.0-rc.N`). +- [ ] Generate and edit release notes in CHANGELOG.md. + +- [ ] **Validation of release candidate** + + - [ ] **Automated testing** + - [ ] Ensure all the unit tests (specifically logos-messaging-js tests) are green against the release candidate. + + - [ ] **Waku fleet testing** + - [ ] Deploy the release candidate to `waku.test` fleet. + - Start the [deployment job](https://ci.infra.status.im/job/nim-waku/) and wait for it to finish (Jenkins access required; ask the infra team if you don't have it). + - After completion, disable fleet so that daily CI does not override your release candidate. + - Verify at https://fleets.waku.org/ that the fleet is locked to the release candidate image. + - Confirm the container image exists on [Harbor](https://harbor.status.im/harbor/projects/9/repositories/nwaku/artifacts-tab). + - [ ] Search [Kibana logs](https://kibana.infra.status.im/app/discover) from the previous month (since the last release was deployed) for possible crashes or errors in `waku.test`. + - Set time range to "Last 30 days" (or since last release). + - Most relevant search query: `(fleet: "waku.test" AND message: "SIGSEGV")`, `(fleet: "waku.test" AND message: "exception")`, `(fleet: "waku.test" AND message: "error")`. + - Document any crashes or errors found. + - [ ] If `waku.test` validation is successful, deploy to `waku.sandbox` using the same [deployment job](https://ci.infra.status.im/job/nim-waku/). + - [ ] Search [Kibana logs](https://kibana.infra.status.im/app/discover) for `waku.sandbox`: `(fleet: "waku.sandbox" AND message: "SIGSEGV")`, `(fleet: "waku.sandbox" AND message: "exception")`, `(fleet: "waku.sandbox" AND message: "error")`. most probably if there are no crashes or errors in `waku.test`, there will be no crashes or errors in `waku.sandbox`. + - [ ] Enable the `waku.test` fleet again to resume auto-deployment of the latest `master` commit. + + - [ ] **QA and DST testing** + - [ ] Ask Vac-QA and Vac-DST to run their available tests against the release candidate; share all release candidates with both teams. + - [ ] Vac-DST: An additional report is needed ([see this example](https://www.notion.so/DST-Reports-1228f96fb65c80729cd1d98a7496fe6f)). Inform DST team about what are the expectations for this rc. For example, if we expect higher or lower bandwidth consumption. + + - [ ] **Status fleet testing** + - [ ] Deploy release candidate to `status.staging` + - [ ] Perform [sanity check](https://www.notion.so/How-to-test-Nwaku-on-Status-12c6e4b9bf06420ca868bd199129b425) and log results as comments in this issue. + - [ ] Connect 2 instances to `status.staging` fleet, one in relay mode, the other one in light client. + - 1:1 Chats with each other + - Send and receive messages in a community + - Close one instance, send messages with second instance, reopen first instance and confirm messages sent while offline are retrieved from store + - [ ] Perform checks based on _end user impact_ + - [ ] Inform other (Waku and Status) CCs to point their instances to `status.staging` for a few days. Ping Status colleagues on their Discord server or in the [Status community](https://status.app/c/G3kAAMSQtb05kog3aGbr3kiaxN4tF5xy4BAGEkkLwILk2z3GcoYlm5hSJXGn7J3laft-tnTwDWmYJ18dP_3bgX96dqr_8E3qKAvxDf3NrrCMUBp4R9EYkQez9XSM4486mXoC3mIln2zc-TNdvjdfL9eHVZ-mGgs=#zQ3shZeEJqTC1xhGUjxuS4rtHSrhJ8vUYp64v6qWkLpvdy9L9) (this is not a blocking point.) + - [ ] Ask Status-QA to perform sanity checks (as described above) and checks based on _end user impact_; specify the version being tested + - [ ] Ask Status-QA or infra to run the automated Status e2e tests against `status.staging` + - [ ] Get other CCs' sign-off: they should comment on this PR, e.g., "Used the app for a week, no problem." If problems are reported, resolve them and create a new RC. + - [ ] **Get Status-QA sign-off**, ensuring that the `status.test` update will not disturb ongoing activities. + +- [ ] **Proceed with release** + + - [ ] Assign a final release tag (`v0.X.0`) to the same commit that contains the validated release-candidate tag (e.g. `v0.X.0`). + - [ ] Update [logos-delivery-compose](https://github.com/logos-messaging/logos-delivery-compose) and [logos-delivery-simulator](https://github.com/logos-messaging/logos-delivery-simulator) according to the new release. + - [ ] Bump logos-delivery dependency in [logos-delivery-rust-bindings](https://github.com/logos-messaging/logos-delivery-rust-bindings) and make sure all examples and tests work. + - [ ] Bump logos-delivery dependency in [logos-delivery-go-bindings](https://github.com/logos-messaging/logos-delivery-go-bindings) and make sure all tests work. + - [ ] Create GitHub release (https://github.com/logos-messaging/logos-delivery/releases). + - [ ] Submit a PR to merge the release branch back to `master`. Make sure you use the option "Merge pull request (Create a merge commit)" to perform the merge. Ping repo admin if this option is not available. + +- [ ] **Promote release to fleets** + - [ ] Ask the PM lead to announce the release. + - [ ] Update infra config with any deprecated arguments or changed options. + +### Links + +- [Release process](https://github.com/logos-messaging/logos-delivery/blob/master/docs/contributors/release-process.md) +- [Release notes](https://github.com/logos-messaging/logos-delivery/blob/master/CHANGELOG.md) +- [Fleet ownership](https://www.notion.so/Fleet-Ownership-7532aad8896d46599abac3c274189741?pvs=4#d2d2f0fe4b3c429fbd860a1d64f89a64) +- [Infra-nim-waku](https://github.com/status-im/infra-nim-waku) +- [Jenkins](https://ci.infra.status.im/job/nim-waku/) +- [Fleets](https://fleets.waku.org/) +- [Harbor](https://harbor.status.im/harbor/projects/9/repositories/nwaku/artifacts-tab) +- [Kibana](https://kibana.infra.status.im/app/) diff --git a/.github/ISSUE_TEMPLATE/prepare_release.md b/.github/ISSUE_TEMPLATE/prepare_release.md deleted file mode 100644 index 9553d5685..000000000 --- a/.github/ISSUE_TEMPLATE/prepare_release.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -name: Prepare release -about: Execute tasks for the creation and publishing of a new release -title: 'Prepare release 0.0.0' -labels: release -assignees: '' - ---- - - - -### Items to complete - -All items below are to be completed by the owner of the given release. - -- [ ] Create release branch -- [ ] Assign release candidate tag to the release branch HEAD. e.g. v0.30.0-rc.0 -- [ ] Generate and edit releases notes in CHANGELOG.md -- [ ] Review possible update of [config-options](https://github.com/waku-org/docs.waku.org/blob/develop/docs/guides/nwaku/config-options.md) -- [ ] _End user impact_: Summarize impact of changes on Status end users (can be a comment in this issue). -- [ ] **Validate release candidate** - - [ ] Bump nwaku dependency in [waku-rust-bindings](https://github.com/waku-org/waku-rust-bindings) and make sure all examples and tests work - -- [ ] Automated testing - - [ ] Ensures js-waku tests are green against release candidate - - [ ] Ask Vac-QA and Vac-DST to perform available tests against release candidate - - [ ] Vac-QA - - [ ] Vac-DST (we need additional report. see [this](https://www.notion.so/DST-Reports-1228f96fb65c80729cd1d98a7496fe6f)) - - - [ ] **On Waku fleets** - - [ ] Lock `waku.test` fleet to release candidate version - - [ ] Continuously stress `waku.test` fleet for a week (e.g. from `wakudev`) - - [ ] Search _Kibana_ logs from the previous month (since last release was deployed), for possible crashes or errors in `waku.test` and `waku.sandbox`. - - Most relevant logs are `(fleet: "waku.test" OR fleet: "waku.sandbox") AND message: "SIGSEGV"` - - [ ] Run release candidate with `waku-simulator`, ensure that nodes connected to each other - - [ ] Unlock `waku.test` to resume auto-deployment of latest `master` commit - - - [ ] **On Status fleet** - - [ ] Deploy release candidate to `status.staging` - - [ ] Perform [sanity check](https://www.notion.so/How-to-test-Nwaku-on-Status-12c6e4b9bf06420ca868bd199129b425) and log results as comments in this issue. - - [ ] Connect 2 instances to `status.staging` fleet, one in relay mode, the other one in light client. - - [ ] 1:1 Chats with each other - - [ ] Send and receive messages in a community - - [ ] Close one instance, send messages with second instance, reopen first instance and confirm messages sent while offline are retrieved from store - - [ ] Perform checks based _end user impact_ - - [ ] Inform other (Waku and Status) CCs to point their instance to `status.staging` for a few days. Ping Status colleagues from their Discord server or [Status community](https://status.app/c/G3kAAMSQtb05kog3aGbr3kiaxN4tF5xy4BAGEkkLwILk2z3GcoYlm5hSJXGn7J3laft-tnTwDWmYJ18dP_3bgX96dqr_8E3qKAvxDf3NrrCMUBp4R9EYkQez9XSM4486mXoC3mIln2zc-TNdvjdfL9eHVZ-mGgs=#zQ3shZeEJqTC1xhGUjxuS4rtHSrhJ8vUYp64v6qWkLpvdy9L9) (not blocking point.) - - [ ] Ask Status-QA to perform sanity checks (as described above) + checks based on _end user impact_; do specify the version being tested - - [ ] Ask Status-QA or infra to run the automated Status e2e tests against `status.staging` - - [ ] Get other CCs sign-off: they comment on this PR "used app for a week, no problem", or problem reported, resolved and new RC - - [ ] **Get Status-QA sign-off**. Ensuring that `status.test` update will not disturb ongoing activities. - -- [ ] **Proceed with release** - - - [ ] Assign a release tag to the same commit that contains the validated release-candidate tag - - [ ] Create GitHub release - - [ ] Deploy the release to DockerHub - - [ ] Announce the release - -- [ ] **Promote release to fleets**. - - [ ] Update infra config with any deprecated arguments or changed options - - [ ] [Deploy final release to `waku.sandbox` fleet](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-sandbox) - - [ ] [Deploy final release to `status.staging` fleet](https://ci.infra.status.im/job/nim-waku/job/deploy-shards-staging/) - - [ ] [Deploy final release to `status.prod` fleet](https://ci.infra.status.im/job/nim-waku/job/deploy-shards-test/) - -- [ ] **Post release** - - [ ] Submit a PR from the release branch to master. Important to commit the PR with "create a merge commit" option. - - [ ] Update waku-org/nwaku-compose with the new release version. - - [ ] Update version in js-waku repo. [update only this](https://github.com/waku-org/js-waku/blob/7c0ce7b2eca31cab837da0251e1e4255151be2f7/.github/workflows/ci.yml#L135) by submitting a PR. diff --git a/.github/workflows/ci-daily.yml b/.github/workflows/ci-daily.yml new file mode 100644 index 000000000..b442014a6 --- /dev/null +++ b/.github/workflows/ci-daily.yml @@ -0,0 +1,79 @@ +name: Daily logos-delivery CI + +on: + schedule: + - cron: '30 6 * * *' + +env: + NPROC: 2 + MAKEFLAGS: "-j${NPROC}" + NIMFLAGS: "--parallelBuild:${NPROC} --colors:off -d:chronicles_colors:none" + +jobs: + build: + strategy: + fail-fast: false + matrix: + os: [ubuntu-22.04, macos-15] + runs-on: ${{ matrix.os }} + timeout-minutes: 45 + + name: build-${{ matrix.os }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Get submodules hash + id: submodules + run: | + echo "hash=$(git submodule status | awk '{print $1}' | sort | shasum -a 256 | sed 's/[ -]*//g')" >> $GITHUB_OUTPUT + + - name: Cache submodules + uses: actions/cache@v3 + with: + path: | + vendor/ + .git/modules + key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }} + + - name: Make update + run: make update + + - name: Build binaries + run: make V=1 QUICK_AND_DIRTY_COMPILER=1 examples tools + + - name: Notify Discord + if: always() + env: + DISCORD_WEBHOOK_URL: ${{ secrets.DISCORD_WEBHOOK_URL }} + run: | + STATUS="${{ job.status }}" + OS="${{ matrix.os }}" + REPO="${{ github.repository }}" + RUN_URL="https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" + + if [ "$STATUS" = "success" ]; then + COLOR=3066993 + TITLE="✅ CI Success" + else + COLOR=15158332 + TITLE="❌ CI Failed" + fi + + curl -H "Content-Type: application/json" \ + -X POST \ + -d "{ + \"embeds\": [{ + \"title\": \"$TITLE\", + \"color\": $COLOR, + \"fields\": [ + {\"name\": \"Repository\", \"value\": \"$REPO\", \"inline\": true}, + {\"name\": \"OS\", \"value\": \"$OS\", \"inline\": true}, + {\"name\": \"Status\", \"value\": \"$STATUS\", \"inline\": true} + ], + \"url\": \"$RUN_URL\", + \"footer\": {\"text\": \"Daily logos-delivery CI\"} + }] + }" \ + "$DISCORD_WEBHOOK_URL" + diff --git a/.github/workflows/ci-nix.yml b/.github/workflows/ci-nix.yml new file mode 100644 index 000000000..8fc7ac985 --- /dev/null +++ b/.github/workflows/ci-nix.yml @@ -0,0 +1,48 @@ +name: ci / nix +permissions: + contents: read + pull-requests: read + checks: write +on: + pull_request: + branches: [master] + +jobs: + build: + strategy: + fail-fast: false + matrix: + system: + - aarch64-darwin + - x86_64-linux + nixpkg: + - libwaku + - libwaku-android-arm64 + - wakucanary + + exclude: + # Android SDK limitation + - system: aarch64-darwin + nixpkg: libwaku-android-arm64 + + include: + - system: aarch64-darwin + runs_on: [self-hosted, macOS, ARM64] + + - system: x86_64-linux + runs_on: [self-hosted, Linux, X64] + + name: '${{ matrix.system }} / ${{ matrix.nixpkg }}' + runs-on: ${{ matrix.runs_on }} + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + + - name: 'Run Nix build for {{ matrix.nixpkg }}' + shell: bash + run: nix build -L '.?submodules=1#${{ matrix.nixpkg }}' + + - name: 'Show result contents' + shell: bash + run: find result -type f diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5cf64b66a..3c84f5c6f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -76,9 +76,12 @@ jobs: .git/modules key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }} + - name: Make update + run: make update + - name: Build binaries - run: make V=1 QUICK_AND_DIRTY_COMPILER=1 all tools - + run: make V=1 QUICK_AND_DIRTY_COMPILER=1 all + build-windows: needs: changes if: ${{ needs.changes.outputs.v2 == 'true' || needs.changes.outputs.common == 'true' }} @@ -114,6 +117,9 @@ jobs: .git/modules key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }} + - name: Make update + run: make update + - name: Run tests run: | postgres_enabled=0 @@ -121,7 +127,7 @@ jobs: sudo docker run --rm -d -e POSTGRES_PASSWORD=test123 -p 5432:5432 postgres:15.4-alpine3.18 postgres_enabled=1 fi - + export MAKEFLAGS="-j1" export NIMFLAGS="--colors:off -d:chronicles_colors:none" export USE_LIBBACKTRACE=0 @@ -132,12 +138,12 @@ jobs: build-docker-image: needs: changes if: ${{ needs.changes.outputs.v2 == 'true' || needs.changes.outputs.common == 'true' || needs.changes.outputs.docker == 'true' }} - uses: waku-org/nwaku/.github/workflows/container-image.yml@master + uses: logos-messaging/logos-delivery/.github/workflows/container-image.yml@10dc3d3eb4b6a3d4313f7b2cc4a85a925e9ce039 secrets: inherit nwaku-nwaku-interop-tests: needs: build-docker-image - uses: waku-org/waku-interop-tests/.github/workflows/nim_waku_PR.yml@SMOKE_TEST_0.0.1 + uses: logos-messaging/logos-delivery-interop-tests/.github/workflows/nim_waku_PR.yml@SMOKE_TEST_STABLE with: node_nwaku: ${{ needs.build-docker-image.outputs.image }} @@ -145,14 +151,14 @@ jobs: js-waku-node: needs: build-docker-image - uses: waku-org/js-waku/.github/workflows/test-node.yml@master + uses: logos-messaging/logos-delivery-js/.github/workflows/test-node.yml@master with: nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }} test_type: node js-waku-node-optional: needs: build-docker-image - uses: waku-org/js-waku/.github/workflows/test-node.yml@master + uses: logos-messaging/logos-delivery-js/.github/workflows/test-node.yml@master with: nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }} test_type: node-optional diff --git a/.github/workflows/container-image.yml b/.github/workflows/container-image.yml index cfa66d20a..2bc08be2f 100644 --- a/.github/workflows/container-image.yml +++ b/.github/workflows/container-image.yml @@ -41,7 +41,7 @@ jobs: env: QUAY_PASSWORD: ${{ secrets.QUAY_PASSWORD }} QUAY_USER: ${{ secrets.QUAY_USER }} - + - name: Checkout code if: ${{ steps.secrets.outcome == 'success' }} uses: actions/checkout@v4 @@ -65,6 +65,7 @@ jobs: id: build if: ${{ steps.secrets.outcome == 'success' }} run: | + make update make -j${NPROC} V=1 QUICK_AND_DIRTY_COMPILER=1 NIMFLAGS="-d:disableMarchNative -d:postgres -d:chronicles_colors:none" wakunode2 diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index fe108e616..e145e28ae 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -47,7 +47,7 @@ jobs: - name: prep variables id: vars run: | - ARCH=${{matrix.arch}} + ARCH=${{matrix.arch}} echo "arch=${ARCH}" >> $GITHUB_OUTPUT @@ -91,14 +91,14 @@ jobs: build-docker-image: needs: tag-name - uses: waku-org/nwaku/.github/workflows/container-image.yml@master + uses: logos-messaging/logos-delivery/.github/workflows/container-image.yml@master with: image_tag: ${{ needs.tag-name.outputs.tag }} secrets: inherit js-waku-node: needs: build-docker-image - uses: waku-org/js-waku/.github/workflows/test-node.yml@master + uses: logos-messaging/logos-delivery-js/.github/workflows/test-node.yml@master with: nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }} test_type: node @@ -106,7 +106,7 @@ jobs: js-waku-node-optional: needs: build-docker-image - uses: waku-org/js-waku/.github/workflows/test-node.yml@master + uses: logos-messaging/logos-delivery-js/.github/workflows/test-node.yml@master with: nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }} test_type: node-optional @@ -150,7 +150,7 @@ jobs: -u $(id -u) \ docker.io/wakuorg/sv4git:latest \ release-notes ${RELEASE_NOTES_TAG} --previous $(git tag -l --sort -creatordate | grep -e "^v[0-9]*\.[0-9]*\.[0-9]*$") |\ - sed -E 's@#([0-9]+)@[#\1](https://github.com/waku-org/nwaku/issues/\1)@g' > release_notes.md + sed -E 's@#([0-9]+)@[#\1](https://github.com/logos-messaging/logos-delivery/issues/\1)@g' > release_notes.md sed -i "s/^## .*/Generated at $(date)/" release_notes.md diff --git a/.github/workflows/release-assets.yml b/.github/workflows/release-assets.yml index c6cfbd680..50e3c4c3d 100644 --- a/.github/workflows/release-assets.yml +++ b/.github/workflows/release-assets.yml @@ -41,25 +41,84 @@ jobs: .git/modules key: ${{ runner.os }}-${{matrix.arch}}-submodules-${{ steps.submodules.outputs.hash }} - - name: prep variables + - name: Get tag + id: version + run: | + # Use full tag, e.g., v0.37.0 + echo "version=${GITHUB_REF_NAME}" >> $GITHUB_OUTPUT + + - name: Prep variables id: vars run: | - NWAKU_ARTIFACT_NAME=$(echo "nwaku-${{matrix.arch}}-${{runner.os}}.tar.gz" | tr "[:upper:]" "[:lower:]") + VERSION=${{ steps.version.outputs.version }} - echo "nwaku=${NWAKU_ARTIFACT_NAME}" >> $GITHUB_OUTPUT + NWAKU_ARTIFACT_NAME=$(echo "waku-${{matrix.arch}}-${{runner.os}}.tar.gz" | tr "[:upper:]" "[:lower:]") + echo "waku=${NWAKU_ARTIFACT_NAME}" >> $GITHUB_OUTPUT - - name: Install dependencies + if [[ "${{ runner.os }}" == "Linux" ]]; then + LIBWAKU_ARTIFACT_NAME=$(echo "libwaku-${VERSION}-${{matrix.arch}}-${{runner.os}}-linux.deb" | tr "[:upper:]" "[:lower:]") + fi + + if [[ "${{ runner.os }}" == "macOS" ]]; then + LIBWAKU_ARTIFACT_NAME=$(echo "libwaku-${VERSION}-${{matrix.arch}}-macos.tar.gz" | tr "[:upper:]" "[:lower:]") + fi + + echo "libwaku=${LIBWAKU_ARTIFACT_NAME}" >> $GITHUB_OUTPUT + + - name: Install build dependencies + run: | + if [[ "${{ runner.os }}" == "Linux" ]]; then + sudo apt-get update && sudo apt-get install -y build-essential dpkg-dev + fi + + - name: Build Waku artifacts run: | OS=$([[ "${{runner.os}}" == "macOS" ]] && echo "macosx" || echo "linux") make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}}" V=1 update make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}} -d:postgres" CI=false wakunode2 make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}}" CI=false chat2 - tar -cvzf ${{steps.vars.outputs.nwaku}} ./build/ + tar -cvzf ${{steps.vars.outputs.waku}} ./build/ - - name: Upload asset + make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}} -d:postgres" CI=false libwaku + make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}} -d:postgres" CI=false STATIC=1 libwaku + + - name: Create distributable libwaku package + run: | + VERSION=${{ steps.version.outputs.version }} + + if [[ "${{ runner.os }}" == "Linux" ]]; then + rm -rf pkg + mkdir -p pkg/DEBIAN pkg/usr/local/lib pkg/usr/local/include + cp build/libwaku.so pkg/usr/local/lib/ + cp build/libwaku.a pkg/usr/local/lib/ + cp library/libwaku.h pkg/usr/local/include/ + + echo "Package: waku" >> pkg/DEBIAN/control + echo "Version: ${VERSION}" >> pkg/DEBIAN/control + echo "Priority: optional" >> pkg/DEBIAN/control + echo "Section: libs" >> pkg/DEBIAN/control + echo "Architecture: ${{matrix.arch}}" >> pkg/DEBIAN/control + echo "Maintainer: Waku Team " >> pkg/DEBIAN/control + echo "Description: Waku library" >> pkg/DEBIAN/control + + dpkg-deb --build pkg ${{steps.vars.outputs.libwaku}} + fi + + if [[ "${{ runner.os }}" == "macOS" ]]; then + tar -cvzf ${{steps.vars.outputs.libwaku}} ./build/libwaku.dylib ./build/libwaku.a ./library/libwaku.h + fi + + - name: Upload waku artifact uses: actions/upload-artifact@v4.4.0 with: - name: ${{steps.vars.outputs.nwaku}} - path: ${{steps.vars.outputs.nwaku}} + name: waku-${{ steps.version.outputs.version }}-${{ matrix.arch }}-${{ runner.os }} + path: ${{ steps.vars.outputs.waku }} + if-no-files-found: error + + - name: Upload libwaku artifact + uses: actions/upload-artifact@v4.4.0 + with: + name: libwaku-${{ steps.version.outputs.version }}-${{ matrix.arch }}-${{ runner.os }} + path: ${{ steps.vars.outputs.libwaku }} if-no-files-found: error diff --git a/.github/workflows/windows-build.yml b/.github/workflows/windows-build.yml index ed6d2cb17..9c1b1eab0 100644 --- a/.github/workflows/windows-build.yml +++ b/.github/workflows/windows-build.yml @@ -33,6 +33,7 @@ jobs: make cmake upx + unzip mingw-w64-x86_64-rust mingw-w64-x86_64-postgresql mingw-w64-x86_64-gcc @@ -44,6 +45,12 @@ jobs: mingw-w64-x86_64-cmake mingw-w64-x86_64-llvm mingw-w64-x86_64-clang + mingw-w64-x86_64-nasm + + - name: Manually install nasm + run: | + bash scripts/install_nasm_in_windows.sh + source $HOME/.bashrc - name: Add UPX to PATH run: | @@ -54,7 +61,7 @@ jobs: - name: Verify dependencies run: | - which upx gcc g++ make cmake cargo rustc python + which upx gcc g++ make cmake cargo rustc python nasm - name: Updating submodules run: git submodule update --init --recursive diff --git a/.gitignore b/.gitignore index 7430c3e99..5222a0d5e 100644 --- a/.gitignore +++ b/.gitignore @@ -59,6 +59,10 @@ nimbus-build-system.paths /examples/nodejs/build/ /examples/rust/target/ +# Xcode user data +xcuserdata/ +*.xcuserstate + # Coverage coverage_html_report/ @@ -79,3 +83,11 @@ waku_handler.moc.cpp # Nix build result result + +# llms +AGENTS.md +nimble.develop +nimble.paths +nimbledeps + +**/anvil_state/state-deployed-contracts-mint-and-approved.json diff --git a/.gitmodules b/.gitmodules index b7e52550a..6a63491e3 100644 --- a/.gitmodules +++ b/.gitmodules @@ -181,6 +181,17 @@ branch = master [submodule "vendor/waku-rlnv2-contract"] path = vendor/waku-rlnv2-contract - url = https://github.com/waku-org/waku-rlnv2-contract.git + url = https://github.com/logos-messaging/waku-rlnv2-contract.git + ignore = untracked + branch = master +[submodule "vendor/nim-lsquic"] + path = vendor/nim-lsquic + url = https://github.com/vacp2p/nim-lsquic +[submodule "vendor/nim-jwt"] + path = vendor/nim-jwt + url = https://github.com/vacp2p/nim-jwt.git +[submodule "vendor/nim-ffi"] + path = vendor/nim-ffi + url = https://github.com/logos-messaging/nim-ffi/ ignore = untracked branch = master diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 000000000..4f735f240 --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,509 @@ +# AGENTS.md - AI Coding Context + +This file provides essential context for LLMs assisting with Logos Messaging development. + +## Project Identity + +Logos Messaging is designed as a shared public network for generalized messaging, not application-specific infrastructure. + +This project is a Nim implementation of a libp2p protocol suite for private, censorship-resistant P2P messaging. It targets resource-restricted devices and privacy-preserving communication. + +Logos Messaging was formerly known as Waku. Waku-related terminology remains within the codebase for historical reasons. + +### Design Philosophy + +Key architectural decisions: + +Resource-restricted first: Protocols differentiate between full nodes (relay) and light clients (filter, lightpush, store). Light clients can participate without maintaining full message history or relay capabilities. This explains the client/server split in protocol implementations. + +Privacy through unlinkability: RLN (Rate Limiting Nullifier) provides DoS protection while preserving sender anonymity. Messages are routed through pubsub topics with automatic sharding across 8 shards. Code prioritizes metadata privacy alongside content encryption. + +Scalability via sharding: The network uses automatic content-topic-based sharding to distribute traffic. This is why you'll see sharding logic throughout the codebase and why pubsub topic selection is protocol-level, not application-level. + +See [documentation](https://docs.waku.org/learn/) for architectural details. + +### Core Protocols +- Relay: Pub/sub message routing using GossipSub +- Store: Historical message retrieval and persistence +- Filter: Lightweight message filtering for resource-restricted clients +- Lightpush: Lightweight message publishing for clients +- Peer Exchange: Peer discovery mechanism +- RLN Relay: Rate limiting nullifier for spam protection +- Metadata: Cluster and shard metadata exchange between peers +- Mix: Mixnet protocol for enhanced privacy through onion routing +- Rendezvous: Alternative peer discovery mechanism + +### Key Terminology +- ENR (Ethereum Node Record): Node identity and capability advertisement +- Multiaddr: libp2p addressing format (e.g., `/ip4/127.0.0.1/tcp/60000/p2p/16Uiu2...`) +- PubsubTopic: Gossipsub topic for message routing (e.g., `/waku/2/default-waku/proto`) +- ContentTopic: Application-level message categorization (e.g., `/my-app/1/chat/proto`) +- Sharding: Partitioning network traffic across topics (static or auto-sharding) +- RLN (Rate Limiting Nullifier): Zero-knowledge proof system for spam prevention + +### Specifications +All specs are at [rfc.vac.dev/waku](https://rfc.vac.dev/waku). RFCs use `WAKU2-XXX` format (not legacy `WAKU-XXX`). + +## Architecture + +### Protocol Module Pattern +Each protocol typically follows this structure: +``` +waku_/ +├── protocol.nim # Main protocol type and handler logic +├── client.nim # Client-side API +├── rpc.nim # RPC message types +├── rpc_codec.nim # Protobuf encoding/decoding +├── common.nim # Shared types and constants +└── protocol_metrics.nim # Prometheus metrics +``` + +### WakuNode Architecture +- WakuNode (`waku/node/waku_node.nim`) is the central orchestrator +- Protocols are "mounted" onto the node's switch (libp2p component) +- PeerManager handles peer selection and connection management +- Switch provides libp2p transport, security, and multiplexing + +Example protocol type definition: +```nim +type WakuFilter* = ref object of LPProtocol + subscriptions*: FilterSubscriptions + peerManager: PeerManager + messageCache: TimedCache[string] +``` + +## Development Essentials + +### Build Requirements +- Nim 2.x (check `waku.nimble` for minimum version) +- Rust toolchain (required for RLN dependencies) +- Build system: Make with nimbus-build-system + +### Build System +The project uses Makefile with nimbus-build-system (Status's Nim build framework): +```bash +# Initial build (updates submodules) +make wakunode2 + +# After git pull, update submodules +make update + +# Build with custom flags +make wakunode2 NIMFLAGS="-d:chronicles_log_level=DEBUG" +``` + +Note: The build system uses `--mm:refc` memory management (automatically enforced). Only relevant if compiling outside the standard build system. + +### Common Make Targets +```bash +make wakunode2 # Build main node binary +make test # Run all tests +make testcommon # Run common tests only +make libwakuStatic # Build static C library +make chat2 # Build chat example +make install-nph # Install git hook for auto-formatting +``` + +### Testing +```bash +# Run all tests +make test + +# Run specific test file +make test tests/test_waku_enr.nim + +# Run specific test case from file +make test tests/test_waku_enr.nim "check capabilities support" + +# Build and run test separately (for development iteration) +make test tests/test_waku_enr.nim +``` + +Test structure uses `testutils/unittests`: +```nim +import testutils/unittests + +suite "Waku ENR - Capabilities": + test "check capabilities support": + ## Given + let bitfield: CapabilitiesBitfield = 0b0000_1101u8 + + ## Then + check: + bitfield.supportsCapability(Capabilities.Relay) + not bitfield.supportsCapability(Capabilities.Store) +``` + +### Code Formatting +Mandatory: All code must be formatted with `nph` (vendored in `vendor/nph`) +```bash +# Format specific file +make nph/waku/waku_core.nim + +# Install git pre-commit hook (auto-formats on commit) +make install-nph +``` +The nph formatter handles all formatting details automatically, especially with the pre-commit hook installed. Focus on semantic correctness. + +### Logging +Uses `chronicles` library with compile-time configuration: +```nim +import chronicles + +logScope: + topics = "waku lightpush" + +info "handling request", peerId = peerId, topic = pubsubTopic +error "request failed", error = msg +``` + +Compile with log level: +```bash +nim c -d:chronicles_log_level=TRACE myfile.nim +``` + + +## Code Conventions + +Common pitfalls: +- Always handle Result types explicitly +- Avoid global mutable state: Pass state through parameters +- Keep functions focused: Under 50 lines when possible +- Prefer compile-time checks (`static assert`) over runtime checks + +### Naming +- Files/Directories: `snake_case` (e.g., `waku_lightpush`, `peer_manager`) +- Procedures: `camelCase` (e.g., `handleRequest`, `pushMessage`) +- Types: `PascalCase` (e.g., `WakuFilter`, `PubsubTopic`) +- Constants: `PascalCase` (e.g., `MaxContentTopicsPerRequest`) +- Constructors: `func init(T: type Xxx, params): T` +- For ref types: `func new(T: type Xxx, params): ref T` +- Exceptions: `XxxError` for CatchableError, `XxxDefect` for Defect +- ref object types: `XxxRef` suffix + +### Imports Organization +Group imports: stdlib, external libs, internal modules: +```nim +import + std/[options, sequtils], # stdlib + results, chronicles, chronos, # external + libp2p/peerid +import + ../node/peer_manager, # internal (separate import block) + ../waku_core, + ./common +``` + +### Async Programming +Uses chronos, not stdlib `asyncdispatch`: +```nim +proc handleRequest( + wl: WakuLightPush, peerId: PeerId +): Future[WakuLightPushResult] {.async.} = + let res = await wl.pushHandler(peerId, pubsubTopic, message) + return res +``` + +### Error Handling +The project uses both Result types and exceptions: + +Result types from nim-results are used for protocol and API-level errors: +```nim +proc subscribe( + wf: WakuFilter, peerId: PeerID +): Future[FilterSubscribeResult] {.async.} = + if contentTopics.len > MaxContentTopicsPerRequest: + return err(FilterSubscribeError.badRequest("exceeds maximum")) + + # Handle Result with isOkOr + (await wf.subscriptions.addSubscription(peerId, criteria)).isOkOr: + return err(FilterSubscribeError.serviceUnavailable(error)) + + ok() +``` + +Exceptions still used for: +- chronos async failures (CancelledError, etc.) +- Database/system errors +- Library interop + +Most files start with `{.push raises: [].}` to disable exception tracking, then use try/catch blocks where needed. + +### Pragma Usage +```nim +{.push raises: [].} # Disable default exception tracking (at file top) + +proc myProc(): Result[T, E] {.async.} = # Async proc +``` + +### Protocol Inheritance +Protocols inherit from libp2p's `LPProtocol`: +```nim +type WakuLightPush* = ref object of LPProtocol + rng*: ref rand.HmacDrbgContext + peerManager*: PeerManager + pushHandler*: PushMessageHandler +``` + +### Type Visibility +- Public exports use `*` suffix: `type WakuFilter* = ...` +- Fields without `*` are module-private + +## Style Guide Essentials + +This section summarizes key Nim style guidelines relevant to this project. Full guide: https://status-im.github.io/nim-style-guide/ + +### Language Features + +Import and Export +- Use explicit import paths with std/ prefix for stdlib +- Group imports: stdlib, external, internal (separate blocks) +- Export modules whose types appear in public API +- Avoid include + +Macros and Templates +- Avoid macros and templates - prefer simple constructs +- Avoid generating public API with macros +- Put logic in templates, use macros only for glue code + +Object Construction +- Prefer Type(field: value) syntax +- Use Type.init(params) convention for constructors +- Default zero-initialization should be valid state +- Avoid using result variable for construction + +ref object Types +- Avoid ref object unless needed for: + - Resource handles requiring reference semantics + - Shared ownership + - Reference-based data structures (trees, lists) + - Stable pointer for FFI +- Use explicit ref MyType where possible +- Name ref object types with Ref suffix: XxxRef + +Memory Management +- Prefer stack-based and statically sized types in core code +- Use heap allocation in glue layers +- Avoid alloca +- For FFI: use create/dealloc or createShared/deallocShared + +Variable Usage +- Use most restrictive of const, let, var (prefer const over let over var) +- Prefer expressions for initialization over var then assignment +- Avoid result variable - use explicit return or expression-based returns + +Functions +- Prefer func over proc +- Avoid public (*) symbols not part of intended API +- Prefer openArray over seq for function parameters + +Methods (runtime polymorphism) +- Avoid method keyword for dynamic dispatch +- Prefer manual vtable with proc closures for polymorphism +- Methods lack support for generics + +Miscellaneous +- Annotate callback proc types with {.raises: [], gcsafe.} +- Avoid explicit {.inline.} pragma +- Avoid converters +- Avoid finalizers + +Type Guidelines + +Binary Data +- Use byte for binary data +- Use seq[byte] for dynamic arrays +- Convert string to seq[byte] early if stdlib returns binary as string + +Integers +- Prefer signed (int, int64) for counting, lengths, indexing +- Use unsigned with explicit size (uint8, uint64) for binary data, bit ops +- Avoid Natural +- Check ranges before converting to int +- Avoid casting pointers to int +- Avoid range types + +Strings +- Use string for text +- Use seq[byte] for binary data instead of string + +### Error Handling + +Philosophy +- Prefer Result, Opt for explicit error handling +- Use Exceptions only for legacy code compatibility + +Result Types +- Use Result[T, E] for operations that can fail +- Use cstring for simple error messages: Result[T, cstring] +- Use enum for errors needing differentiation: Result[T, SomeErrorEnum] +- Use Opt[T] for simple optional values +- Annotate all modules: {.push raises: [].} at top + +Exceptions (when unavoidable) +- Inherit from CatchableError, name XxxError +- Use Defect for panics/logic errors, name XxxDefect +- Annotate functions explicitly: {.raises: [SpecificError].} +- Catch specific error types, avoid catching CatchableError +- Use expression-based try blocks +- Isolate legacy exception code with try/except, convert to Result + +Common Defect Sources +- Overflow in signed arithmetic +- Array/seq indexing with [] +- Implicit range type conversions + +Status Codes +- Avoid status code pattern +- Use Result instead + +### Library Usage + +Standard Library +- Use judiciously, prefer focused packages +- Prefer these replacements: + - async: chronos + - bitops: stew/bitops2 + - endians: stew/endians2 + - exceptions: results + - io: stew/io2 + +Results Library +- Use cstring errors for diagnostics without differentiation +- Use enum errors when caller needs to act on specific errors +- Use complex types when additional error context needed +- Use isOkOr pattern for chaining + +Wrappers (C/FFI) +- Prefer native Nim when available +- For C libraries: use {.compile.} to build from source +- Create xxx_abi.nim for raw ABI wrapper +- Avoid C++ libraries + +Miscellaneous +- Print hex output in lowercase, accept both cases + +### Common Pitfalls + +- Defects lack tracking by {.raises.} +- nil ref causes runtime crashes +- result variable disables branch checking +- Exception hierarchy unclear between Nim versions +- Range types have compiler bugs +- Finalizers infect all instances of type + +## Common Workflows + +### Adding a New Protocol +1. Create directory: `waku/waku_myprotocol/` +2. Define core files: + - `rpc.nim` - Message types + - `rpc_codec.nim` - Protobuf encoding + - `protocol.nim` - Protocol handler + - `client.nim` - Client API + - `common.nim` - Shared types +3. Define protocol type in `protocol.nim`: + ```nim + type WakuMyProtocol* = ref object of LPProtocol + peerManager: PeerManager + # ... fields + ``` +4. Implement request handler +5. Mount in WakuNode (`waku/node/waku_node.nim`) +6. Add tests in `tests/waku_myprotocol/` +7. Export module via `waku/waku_myprotocol.nim` + +### Adding a REST API Endpoint +1. Define handler in `waku/rest_api/endpoint/myprotocol/` +2. Implement endpoint following pattern: + ```nim + proc installMyProtocolApiHandlers*( + router: var RestRouter, node: WakuNode + ) = + router.api(MethodGet, "/waku/v2/myprotocol/endpoint") do () -> RestApiResponse: + # Implementation + return RestApiResponse.jsonResponse(data, status = Http200) + ``` +3. Register in `waku/rest_api/handlers.nim` + +### Adding Database Migration +For message_store (SQLite): +1. Create `migrations/message_store/NNNNN_description.up.sql` +2. Create corresponding `.down.sql` for rollback +3. Increment version number sequentially +4. Test migration locally before committing + +For PostgreSQL: add in `migrations/message_store_postgres/` + +### Running Single Test During Development +```bash +# Build test binary +make test tests/waku_filter_v2/test_waku_client.nim + +# Binary location +./build/tests/waku_filter_v2/test_waku_client.nim.bin + +# Or combine +make test tests/waku_filter_v2/test_waku_client.nim "specific test name" +``` + +### Debugging with Chronicles +Set log level and filter topics: +```bash +nim c -r \ + -d:chronicles_log_level=TRACE \ + -d:chronicles_disabled_topics="eth,dnsdisc" \ + tests/mytest.nim +``` + +## Key Constraints + +### Vendor Directory +- Never edit files directly in vendor - it is auto-generated from git submodules +- Always run `make update` after pulling changes +- Managed by `nimbus-build-system` + +### Chronicles Performance +- Log levels are configured at compile time for performance +- Runtime filtering is available but should be used sparingly: `-d:chronicles_runtime_filtering=on` +- Default sinks are optimized for production + +### Memory Management +- Uses `refc` (reference counting with cycle collection) +- Automatically enforced by the build system (hardcoded in `waku.nimble`) +- Do not override unless absolutely necessary, as it breaks compatibility + +### RLN Dependencies +- RLN code requires a Rust toolchain, which explains Rust imports in some modules +- Pre-built `librln` libraries are checked into the repository + +## Quick Reference + +Language: Nim 2.x | License: MIT or Apache 2.0 + +### Important Files +- `Makefile` - Primary build interface +- `waku.nimble` - Package definition and build tasks (called via nimbus-build-system) +- `vendor/nimbus-build-system/` - Status's build framework +- `waku/node/waku_node.nim` - Core node implementation +- `apps/wakunode2/wakunode2.nim` - Main CLI application +- `waku/factory/waku_conf.nim` - Configuration types +- `library/libwaku.nim` - C bindings entry point + +### Testing Entry Points +- `tests/all_tests_waku.nim` - All Waku protocol tests +- `tests/all_tests_wakunode2.nim` - Node application tests +- `tests/all_tests_common.nim` - Common utilities tests + +### Key Dependencies +- `chronos` - Async framework +- `nim-results` - Result type for error handling +- `chronicles` - Logging +- `libp2p` - P2P networking +- `confutils` - CLI argument parsing +- `presto` - REST server +- `nimcrypto` - Cryptographic primitives + +Note: For specific version requirements, check `waku.nimble`. + + diff --git a/CHANGELOG.md b/CHANGELOG.md index edc4a705c..7213337ed 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,9 +3,14 @@ ### Bug Fixes - Avoid IndexDefect if DB error message is short ([#3725](https://github.com/logos-messaging/logos-delivery/pull/3725)) + +## v0.37.1-beta (2025-12-10) + +### Bug Fixes + - Remove ENR cache from peer exchange ([#3652](https://github.com/logos-messaging/logos-messaging-nim/pull/3652)) ([7920368a](https://github.com/logos-messaging/logos-messaging-nim/commit/7920368a36687cd5f12afa52d59866792d8457ca)) -## v0.37.0 (2025-10-01) +## v0.37.0-beta (2025-10-01) ### Notes diff --git a/Dockerfile b/Dockerfile index 90fb0a9c9..5b16b9eee 100644 --- a/Dockerfile +++ b/Dockerfile @@ -8,7 +8,7 @@ ARG LOG_LEVEL=TRACE ARG HEAPTRACK_BUILD=0 # Get build tools and required header files -RUN apk add --no-cache bash git build-base openssl-dev linux-headers curl jq +RUN apk add --no-cache bash git build-base openssl-dev linux-headers curl jq libbsd-dev WORKDIR /app COPY . . @@ -46,7 +46,7 @@ LABEL version="unknown" EXPOSE 30303 60000 8545 # Referenced in the binary -RUN apk add --no-cache libgcc libpq-dev bind-tools +RUN apk add --no-cache libgcc libpq-dev bind-tools libstdc++ # Copy to separate location to accomodate different MAKE_TARGET values COPY --from=nim-build /app/build/$MAKE_TARGET /usr/local/bin/ diff --git a/Dockerfile.lightpushWithMix.compile b/Dockerfile.lightpushWithMix.compile index 381ee60ef..82e076b41 100644 --- a/Dockerfile.lightpushWithMix.compile +++ b/Dockerfile.lightpushWithMix.compile @@ -1,5 +1,5 @@ # BUILD NIM APP ---------------------------------------------------------------- -FROM rust:1.81.0-alpine3.19 AS nim-build +FROM rustlang/rust:nightly-alpine3.19 AS nim-build ARG NIMFLAGS ARG MAKE_TARGET=lightpushwithmix @@ -7,7 +7,7 @@ ARG NIM_COMMIT ARG LOG_LEVEL=TRACE # Get build tools and required header files -RUN apk add --no-cache bash git build-base openssl-dev linux-headers curl jq +RUN apk add --no-cache bash git build-base openssl-dev linux-headers curl jq libbsd-dev WORKDIR /app COPY . . @@ -24,7 +24,6 @@ RUN make -j$(nproc) deps QUICK_AND_DIRTY_COMPILER=1 ${NIM_COMMIT} # Build the final node binary RUN make -j$(nproc) ${NIM_COMMIT} $MAKE_TARGET LOG_LEVEL=${LOG_LEVEL} NIMFLAGS="${NIMFLAGS}" - # REFERENCE IMAGE as BASE for specialized PRODUCTION IMAGES---------------------------------------- FROM alpine:3.18 AS base_lpt @@ -44,8 +43,8 @@ RUN apk add --no-cache libgcc libpq-dev \ wget \ iproute2 \ python3 \ - jq - + jq \ + libstdc++ COPY --from=nim-build /app/build/lightpush_publisher_mix /usr/bin/ RUN chmod +x /usr/bin/lightpush_publisher_mix diff --git a/LICENSE-APACHEv2 b/LICENSE-APACHE similarity index 98% rename from LICENSE-APACHEv2 rename to LICENSE-APACHE index 7b6a3cb27..d64569567 100644 --- a/LICENSE-APACHEv2 +++ b/LICENSE-APACHE @@ -1,6 +1,3 @@ -nim-waku is licensed under the Apache License version 2 -Copyright (c) 2018 Status Research & Development GmbH ------------------------------------------------------ Apache License Version 2.0, January 2004 @@ -190,7 +187,7 @@ Copyright (c) 2018 Status Research & Development GmbH same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2018 Status Research & Development GmbH + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/LICENSE-MIT b/LICENSE-MIT index aab8020f0..d4c697062 100644 --- a/LICENSE-MIT +++ b/LICENSE-MIT @@ -1,25 +1,21 @@ -nim-waku is licensed under the MIT License -Copyright (c) 2018 Status Research & Development GmbH ------------------------------------------------------ - The MIT License (MIT) -Copyright (c) 2018 Status Research & Development GmbH +Copyright © 2025-2026 Logos Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal +of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/Makefile b/Makefile index 37341792c..8f98e90bd 100644 --- a/Makefile +++ b/Makefile @@ -43,15 +43,21 @@ ifeq ($(detected_OS),Windows) LIBS = -lws2_32 -lbcrypt -liphlpapi -luserenv -lntdll -lminiupnpc -lnatpmp -lpq NIM_PARAMS += $(foreach lib,$(LIBS),--passL:"$(lib)") + NIM_PARAMS += --passL:"-Wl,--allow-multiple-definition" + + export PATH := /c/msys64/usr/bin:/c/msys64/mingw64/bin:/c/msys64/usr/lib:/c/msys64/mingw64/lib:$(PATH) + endif ########## ## Main ## ########## -.PHONY: all test update clean +.PHONY: all test update clean examples # default target, because it's the first one that doesn't start with '.' -all: | wakunode2 example2 chat2 chat2bridge libwaku +all: | wakunode2 libwaku + +examples: | example2 chat2 chat2bridge test_file := $(word 2,$(MAKECMDGOALS)) define test_name @@ -116,6 +122,10 @@ endif ################## .PHONY: deps libbacktrace +FOUNDRY_VERSION := 1.5.0 +PNPM_VERSION := 10.23.0 + + rustup: ifeq (, $(shell which cargo)) # Install Rustup if it's not installed @@ -125,7 +135,7 @@ ifeq (, $(shell which cargo)) endif rln-deps: rustup - ./scripts/install_rln_tests_dependencies.sh + ./scripts/install_rln_tests_dependencies.sh $(FOUNDRY_VERSION) $(PNPM_VERSION) deps: | deps-common nat-libs waku.nims @@ -143,6 +153,9 @@ ifeq ($(USE_LIBBACKTRACE), 0) NIM_PARAMS := $(NIM_PARAMS) -d:disable_libbacktrace endif +# enable experimental exit is dest feature in libp2p mix +NIM_PARAMS := $(NIM_PARAMS) -d:libp2p_mix_experimental_exit_is_dest + libbacktrace: + $(MAKE) -C vendor/nim-libbacktrace --no-print-directory BUILD_CXX_LIB=0 @@ -180,9 +193,9 @@ LIBRLN_BUILDDIR := $(CURDIR)/vendor/zerokit LIBRLN_VERSION := v0.9.0 ifeq ($(detected_OS),Windows) -LIBRLN_FILE := rln.lib +LIBRLN_FILE ?= rln.lib else -LIBRLN_FILE := librln_$(LIBRLN_VERSION).a +LIBRLN_FILE ?= librln_$(LIBRLN_VERSION).a endif $(LIBRLN_FILE): @@ -260,6 +273,10 @@ lightpushwithmix: | build deps librln echo -e $(BUILD_MSG) "build/$@" && \ $(ENV_SCRIPT) nim lightpushwithmix $(NIM_PARAMS) waku.nims +api_example: | build deps librln + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim api_example $(NIM_PARAMS) waku.nims + build/%: | build deps librln echo -e $(BUILD_MSG) "build/$*" && \ $(ENV_SCRIPT) nim buildone $(NIM_PARAMS) waku.nims $* @@ -417,20 +434,62 @@ docker-liteprotocoltester-push: ################ ## C Bindings ## ################ -.PHONY: cbindings cwaku_example libwaku +.PHONY: cbindings cwaku_example libwaku liblogosdelivery liblogosdelivery_example STATIC ?= 0 +LIBWAKU_BUILD_COMMAND ?= libwakuDynamic +LIBLOGOSDELIVERY_BUILD_COMMAND ?= liblogosdeliveryDynamic +ifeq ($(detected_OS),Windows) + LIB_EXT_DYNAMIC = dll + LIB_EXT_STATIC = lib +else ifeq ($(detected_OS),Darwin) + LIB_EXT_DYNAMIC = dylib + LIB_EXT_STATIC = a +else ifeq ($(detected_OS),Linux) + LIB_EXT_DYNAMIC = so + LIB_EXT_STATIC = a +endif + +LIB_EXT := $(LIB_EXT_DYNAMIC) +ifeq ($(STATIC), 1) + LIB_EXT = $(LIB_EXT_STATIC) + LIBWAKU_BUILD_COMMAND = libwakuStatic + LIBLOGOSDELIVERY_BUILD_COMMAND = liblogosdeliveryStatic +endif libwaku: | build deps librln - rm -f build/libwaku* + echo -e $(BUILD_MSG) "build/$@.$(LIB_EXT)" && $(ENV_SCRIPT) nim $(LIBWAKU_BUILD_COMMAND) $(NIM_PARAMS) waku.nims $@.$(LIB_EXT) -ifeq ($(STATIC), 1) - echo -e $(BUILD_MSG) "build/$@.a" && $(ENV_SCRIPT) nim libwakuStatic $(NIM_PARAMS) waku.nims +liblogosdelivery: | build deps librln + echo -e $(BUILD_MSG) "build/$@.$(LIB_EXT)" && $(ENV_SCRIPT) nim $(LIBLOGOSDELIVERY_BUILD_COMMAND) $(NIM_PARAMS) waku.nims $@.$(LIB_EXT) + +logosdelivery_example: | build liblogosdelivery + @echo -e $(BUILD_MSG) "build/$@" +ifeq ($(detected_OS),Darwin) + gcc -o build/$@ \ + liblogosdelivery/examples/logosdelivery_example.c \ + liblogosdelivery/examples/json_utils.c \ + -I./liblogosdelivery \ + -L./build \ + -llogosdelivery \ + -Wl,-rpath,./build +else ifeq ($(detected_OS),Linux) + gcc -o build/$@ \ + liblogosdelivery/examples/logosdelivery_example.c \ + liblogosdelivery/examples/json_utils.c \ + -I./liblogosdelivery \ + -L./build \ + -llogosdelivery \ + -Wl,-rpath,'$$ORIGIN' else ifeq ($(detected_OS),Windows) - echo -e $(BUILD_MSG) "build/$@.dll" && $(ENV_SCRIPT) nim libwakuDynamic $(NIM_PARAMS) waku.nims -else - echo -e $(BUILD_MSG) "build/$@.so" && $(ENV_SCRIPT) nim libwakuDynamic $(NIM_PARAMS) waku.nims + gcc -o build/$@.exe \ + liblogosdelivery/examples/logosdelivery_example.c \ + liblogosdelivery/examples/json_utils.c \ + -I./liblogosdelivery \ + -L./build \ + -llogosdelivery \ + -lws2_32 endif ##################### @@ -460,8 +519,13 @@ ifndef ANDROID_NDK_HOME endif build-libwaku-for-android-arch: - $(MAKE) rebuild-nat-libs CC=$(ANDROID_TOOLCHAIN_DIR)/bin/$(ANDROID_COMPILER) && \ - ./scripts/build_rln_android.sh $(CURDIR)/build $(LIBRLN_BUILDDIR) $(LIBRLN_VERSION) $(CROSS_TARGET) $(ABIDIR) && \ +ifneq ($(findstring /nix/store,$(LIBRLN_FILE)),) + mkdir -p $(CURDIR)/build/android/$(ABIDIR)/ + cp $(LIBRLN_FILE) $(CURDIR)/build/android/$(ABIDIR)/ +else + ./scripts/build_rln_android.sh $(CURDIR)/build $(LIBRLN_BUILDDIR) $(LIBRLN_VERSION) $(CROSS_TARGET) $(ABIDIR) +endif + $(MAKE) rebuild-nat-libs CC=$(ANDROID_TOOLCHAIN_DIR)/bin/$(ANDROID_COMPILER) CPU=$(CPU) ABIDIR=$(ABIDIR) ANDROID_ARCH=$(ANDROID_ARCH) ANDROID_COMPILER=$(ANDROID_COMPILER) ANDROID_TOOLCHAIN_DIR=$(ANDROID_TOOLCHAIN_DIR) $(ENV_SCRIPT) nim libWakuAndroid $(NIM_PARAMS) waku.nims libwaku-android-arm64: ANDROID_ARCH=aarch64-linux-android @@ -498,6 +562,51 @@ libwaku-android: # It's likely this architecture is not used so we might just not support it. # $(MAKE) libwaku-android-arm +################# +## iOS Bindings # +################# +.PHONY: libwaku-ios-precheck \ + libwaku-ios-device \ + libwaku-ios-simulator \ + libwaku-ios + +IOS_DEPLOYMENT_TARGET ?= 18.0 + +# Get SDK paths dynamically using xcrun +define get_ios_sdk_path +$(shell xcrun --sdk $(1) --show-sdk-path 2>/dev/null) +endef + +libwaku-ios-precheck: +ifeq ($(detected_OS),Darwin) + @command -v xcrun >/dev/null 2>&1 || { echo "Error: Xcode command line tools not installed"; exit 1; } +else + $(error iOS builds are only supported on macOS) +endif + +# Build for iOS architecture +build-libwaku-for-ios-arch: + IOS_SDK=$(IOS_SDK) IOS_ARCH=$(IOS_ARCH) IOS_SDK_PATH=$(IOS_SDK_PATH) $(ENV_SCRIPT) nim libWakuIOS $(NIM_PARAMS) waku.nims + +# iOS device (arm64) +libwaku-ios-device: IOS_ARCH=arm64 +libwaku-ios-device: IOS_SDK=iphoneos +libwaku-ios-device: IOS_SDK_PATH=$(call get_ios_sdk_path,iphoneos) +libwaku-ios-device: | libwaku-ios-precheck build deps + $(MAKE) build-libwaku-for-ios-arch IOS_ARCH=$(IOS_ARCH) IOS_SDK=$(IOS_SDK) IOS_SDK_PATH=$(IOS_SDK_PATH) + +# iOS simulator (arm64 - Apple Silicon Macs) +libwaku-ios-simulator: IOS_ARCH=arm64 +libwaku-ios-simulator: IOS_SDK=iphonesimulator +libwaku-ios-simulator: IOS_SDK_PATH=$(call get_ios_sdk_path,iphonesimulator) +libwaku-ios-simulator: | libwaku-ios-precheck build deps + $(MAKE) build-libwaku-for-ios-arch IOS_ARCH=$(IOS_ARCH) IOS_SDK=$(IOS_SDK) IOS_SDK_PATH=$(IOS_SDK_PATH) + +# Build all iOS targets +libwaku-ios: + $(MAKE) libwaku-ios-device + $(MAKE) libwaku-ios-simulator + cwaku_example: | build libwaku echo -e $(BUILD_MSG) "build/$@" && \ cc -o "build/$@" \ @@ -543,4 +652,3 @@ release-notes: sed -E 's@#([0-9]+)@[#\1](https://github.com/waku-org/nwaku/issues/\1)@g' # I could not get the tool to replace issue ids with links, so using sed for now, # asked here: https://github.com/bvieira/sv4git/discussions/101 - diff --git a/README.md b/README.md index ce352d6f5..8833ae131 100644 --- a/README.md +++ b/README.md @@ -1,19 +1,21 @@ -# Nwaku +# Logos Messaging Nim ## Introduction -The nwaku repository implements Waku, and provides tools related to it. +This repository implements a set of libp2p protocols aimed to bring +private communications. -- A Nim implementation of the [Waku (v2) protocol](https://specs.vac.dev/specs/waku/v2/waku-v2.html). -- CLI application `wakunode2` that allows you to run a Waku node. -- Examples of Waku usage. +- Nim implementation of [these specs](https://github.com/vacp2p/rfc-index/tree/main/waku). +- C library that exposes the implemented protocols. +- CLI application that allows you to run an lmn node. +- Examples. - Various tests of above. For more details see the [source code](waku/README.md) ## How to Build & Run ( Linux, MacOS & WSL ) -These instructions are generic. For more detailed instructions, see the Waku source code above. +These instructions are generic. For more detailed instructions, see the source code above. ### Prerequisites diff --git a/apps/chat2/chat2.nim b/apps/chat2/chat2.nim index e2a46ca1b..71d8a4e6a 100644 --- a/apps/chat2/chat2.nim +++ b/apps/chat2/chat2.nim @@ -480,7 +480,9 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = if conf.lightpushnode != "": let peerInfo = parsePeerInfo(conf.lightpushnode) if peerInfo.isOk(): - await mountLegacyLightPush(node) + (await node.mountLegacyLightPush()).isOkOr: + error "failed to mount legacy lightpush", error = error + quit(QuitFailure) node.mountLegacyLightPushClient() node.peerManager.addServicePeer(peerInfo.value, WakuLightpushCodec) else: diff --git a/apps/chat2mix/chat2mix.nim b/apps/chat2mix/chat2mix.nim index 5979e2936..558454307 100644 --- a/apps/chat2mix/chat2mix.nim +++ b/apps/chat2mix/chat2mix.nim @@ -30,6 +30,7 @@ import protobuf/minprotobuf, # message serialisation/deserialisation from and to protobufs nameresolving/dnsresolver, protocols/mix/curve25519, + protocols/mix/mix_protocol, ] # define DNS resolution import waku/[ @@ -38,6 +39,7 @@ import waku_lightpush/rpc, waku_enr, discovery/waku_dnsdisc, + discovery/waku_kademlia, waku_node, node/waku_metrics, node/peer_manager, @@ -82,6 +84,8 @@ type PrivateKey* = crypto.PrivateKey Topic* = waku_core.PubsubTopic +const MinMixNodePoolSize = 4 + ##################### ## chat2 protobufs ## ##################### @@ -124,7 +128,7 @@ proc encode*(message: Chat2Message): ProtoBuffer = return serialised -proc toString*(message: Chat2Message): string = +proc `$`*(message: Chat2Message): string = # Get message date and timestamp in local time let time = message.timestamp.fromUnix().local().format("'<'MMM' 'dd,' 'HH:mm'>'") @@ -331,13 +335,14 @@ proc maintainSubscription( const maxFailedServiceNodeSwitches = 10 var noFailedSubscribes = 0 var noFailedServiceNodeSwitches = 0 - const RetryWaitMs = 2.seconds # Quick retry interval - const SubscriptionMaintenanceMs = 30.seconds # Subscription maintenance interval + # Use chronos.Duration explicitly to avoid mismatch with std/times.Duration + let RetryWait = chronos.seconds(2) # Quick retry interval + let SubscriptionMaintenance = chronos.seconds(30) # Subscription maintenance interval while true: info "maintaining subscription at", peer = constructMultiaddrStr(actualFilterPeer) # First use filter-ping to check if we have an active subscription let pingErr = (await wakuNode.wakuFilterClient.ping(actualFilterPeer)).errorOr: - await sleepAsync(SubscriptionMaintenanceMs) + await sleepAsync(SubscriptionMaintenance) info "subscription is live." continue @@ -350,7 +355,7 @@ proc maintainSubscription( some(filterPubsubTopic), filterContentTopic, actualFilterPeer ) ).errorOr: - await sleepAsync(SubscriptionMaintenanceMs) + await sleepAsync(SubscriptionMaintenance) if noFailedSubscribes > 0: noFailedSubscribes -= 1 notice "subscribe request successful." @@ -365,7 +370,7 @@ proc maintainSubscription( # wakunode.peerManager.peerStore.delete(actualFilterPeer) if noFailedSubscribes < maxFailedSubscribes: - await sleepAsync(RetryWaitMs) # Wait a bit before retrying + await sleepAsync(RetryWait) # Wait a bit before retrying elif not preventPeerSwitch: # try again with new peer without delay let actualFilterPeer = selectRandomServicePeer( @@ -380,7 +385,7 @@ proc maintainSubscription( noFailedSubscribes = 0 else: - await sleepAsync(SubscriptionMaintenanceMs) + await sleepAsync(SubscriptionMaintenance) {.pop.} # @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError @@ -450,12 +455,48 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = (await node.mountMix(conf.clusterId, mixPrivKey, conf.mixnodes)).isOkOr: error "failed to mount waku mix protocol: ", error = $error quit(QuitFailure) + + # Setup extended kademlia discovery if bootstrap nodes are provided + if conf.kadBootstrapNodes.len > 0: + var kadBootstrapPeers: seq[(PeerId, seq[MultiAddress])] + for nodeStr in conf.kadBootstrapNodes: + let (peerId, ma) = parseFullAddress(nodeStr).valueOr: + error "Failed to parse kademlia bootstrap node", node = nodeStr, error = error + continue + kadBootstrapPeers.add((peerId, @[ma])) + + if kadBootstrapPeers.len > 0: + node.wakuKademlia = WakuKademlia.new( + node.switch, + ExtendedKademliaDiscoveryParams( + bootstrapNodes: kadBootstrapPeers, + mixPubKey: some(mixPubKey), + advertiseMix: false, + ), + node.peerManager, + getMixNodePoolSize = proc(): int {.gcsafe, raises: [].} = + if node.wakuMix.isNil(): + 0 + else: + node.getMixNodePoolSize(), + isNodeStarted = proc(): bool {.gcsafe, raises: [].} = + node.started, + ).valueOr: + error "failed to setup kademlia discovery", error = error + quit(QuitFailure) + + #await node.mountRendezvousClient(conf.clusterId) + await node.start() node.peerManager.start() + if not node.wakuKademlia.isNil(): + (await node.wakuKademlia.start(minMixPeers = MinMixNodePoolSize)).isOkOr: + error "failed to start kademlia discovery", error = error + quit(QuitFailure) await node.mountLibp2pPing() - await node.mountPeerExchangeClient() + #await node.mountPeerExchangeClient() let pubsubTopic = conf.getPubsubTopic(node, conf.contentTopic) echo "pubsub topic is: " & pubsubTopic let nick = await readNick(transp) @@ -587,22 +628,17 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = error "Couldn't find any service peer" quit(QuitFailure) - #await mountLegacyLightPush(node) node.peerManager.addServicePeer(servicePeerInfo, WakuLightpushCodec) node.peerManager.addServicePeer(servicePeerInfo, WakuPeerExchangeCodec) + #node.peerManager.addServicePeer(servicePeerInfo, WakuRendezVousCodec) # Start maintaining subscription asyncSpawn maintainSubscription( node, pubsubTopic, conf.contentTopic, servicePeerInfo, false ) echo "waiting for mix nodes to be discovered..." - while true: - if node.getMixNodePoolSize() >= 3: - break - discard await node.fetchPeerExchangePeers() - await sleepAsync(1000) - while node.getMixNodePoolSize() < 3: + while node.getMixNodePoolSize() < MinMixNodePoolSize: info "waiting for mix nodes to be discovered", currentpoolSize = node.getMixNodePoolSize() await sleepAsync(1000) diff --git a/apps/chat2mix/config_chat2mix.nim b/apps/chat2mix/config_chat2mix.nim index ddb7136cb..46cd481d7 100644 --- a/apps/chat2mix/config_chat2mix.nim +++ b/apps/chat2mix/config_chat2mix.nim @@ -203,13 +203,13 @@ type fleet* {. desc: "Select the fleet to connect to. This sets the DNS discovery URL to the selected fleet.", - defaultValue: Fleet.test, + defaultValue: Fleet.none, name: "fleet" .}: Fleet contentTopic* {. desc: "Content topic for chat messages.", - defaultValue: "/toy-chat-mix/2/huilong/proto", + defaultValue: "/toy-chat/2/baixa-chiado/proto", name: "content-topic" .}: string @@ -228,7 +228,14 @@ type desc: "WebSocket Secure Support.", defaultValue: false, name: "websocket-secure-support" - .}: bool ## rln-relay configuration + .}: bool + + ## Kademlia Discovery config + kadBootstrapNodes* {. + desc: + "Peer multiaddr for kademlia discovery bootstrap node (must include /p2p/). Argument may be repeated.", + name: "kad-bootstrap-node" + .}: seq[string] proc parseCmdArg*(T: type MixNodePubInfo, p: string): T = let elements = p.split(":") diff --git a/apps/liteprotocoltester/Dockerfile.liteprotocoltester.compile b/apps/liteprotocoltester/Dockerfile.liteprotocoltester.compile index 9e2432051..dd7018cc0 100644 --- a/apps/liteprotocoltester/Dockerfile.liteprotocoltester.compile +++ b/apps/liteprotocoltester/Dockerfile.liteprotocoltester.compile @@ -7,7 +7,7 @@ ARG NIM_COMMIT ARG LOG_LEVEL=TRACE # Get build tools and required header files -RUN apk add --no-cache bash git build-base openssl-dev linux-headers curl jq +RUN apk add --no-cache bash git build-base openssl-dev linux-headers curl jq libbsd-dev WORKDIR /app COPY . . @@ -43,7 +43,8 @@ EXPOSE 30303 60000 8545 RUN apk add --no-cache libgcc libpq-dev \ wget \ iproute2 \ - python3 + python3 \ + libstdc++ COPY --from=nim-build /app/build/liteprotocoltester /usr/bin/ RUN chmod +x /usr/bin/liteprotocoltester diff --git a/apps/liteprotocoltester/liteprotocoltester.nim b/apps/liteprotocoltester/liteprotocoltester.nim index adb1b0f8a..46c85e910 100644 --- a/apps/liteprotocoltester/liteprotocoltester.nim +++ b/apps/liteprotocoltester/liteprotocoltester.nim @@ -130,7 +130,8 @@ when isMainModule: info "Setting up shutdown hooks" proc asyncStopper(waku: Waku) {.async: (raises: [Exception]).} = - await waku.stop() + (await waku.stop()).isOkOr: + error "Waku shutdown failed", error = error quit(QuitSuccess) # Handle Ctrl-C SIGINT @@ -160,7 +161,8 @@ when isMainModule: # Not available in -d:release mode writeStackTrace() - waitFor waku.stop() + (waitFor waku.stop()).isOkOr: + error "Waku shutdown failed", error = error quit(QuitFailure) c_signal(ansi_c.SIGSEGV, handleSigsegv) diff --git a/apps/wakucanary/wakucanary.nim b/apps/wakucanary/wakucanary.nim index bcff9653e..40bf4db45 100644 --- a/apps/wakucanary/wakucanary.nim +++ b/apps/wakucanary/wakucanary.nim @@ -143,16 +143,18 @@ proc areProtocolsSupported( proc pingNode( node: WakuNode, peerInfo: RemotePeerInfo -): Future[void] {.async, gcsafe.} = +): Future[bool] {.async, gcsafe.} = try: let conn = await node.switch.dial(peerInfo.peerId, peerInfo.addrs, PingCodec) let pingDelay = await node.libp2pPing.ping(conn) info "Peer response time (ms)", peerId = peerInfo.peerId, ping = pingDelay.millis + return true except CatchableError: var msg = getCurrentExceptionMsg() if msg == "Future operation cancelled!": msg = "timedout" error "Failed to ping the peer", peer = peerInfo, err = msg + return false proc main(rng: ref HmacDrbgContext): Future[int] {.async.} = let conf: WakuCanaryConf = WakuCanaryConf.load() @@ -268,8 +270,17 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} = let lp2pPeerStore = node.switch.peerStore let conStatus = node.peerManager.switch.peerStore[ConnectionBook][peer.peerId] + var pingSuccess = true if conf.ping: - discard await pingFut + try: + pingSuccess = await pingFut + except CatchableError as exc: + pingSuccess = false + error "Ping operation failed or timed out", error = exc.msg + + if not pingSuccess: + error "Ping to the node failed", peerId = peer.peerId, conStatus = $conStatus + quit(QuitFailure) if conStatus in [Connected, CanConnect]: let nodeProtocols = lp2pPeerStore[ProtoBook][peer.peerId] diff --git a/apps/wakunode2/wakunode2.nim b/apps/wakunode2/wakunode2.nim index b50c7113b..c8132ff4e 100644 --- a/apps/wakunode2/wakunode2.nim +++ b/apps/wakunode2/wakunode2.nim @@ -62,7 +62,8 @@ when isMainModule: info "Setting up shutdown hooks" proc asyncStopper(waku: Waku) {.async: (raises: [Exception]).} = - await waku.stop() + (await waku.stop()).isOkOr: + error "Waku shutdown failed", error = error quit(QuitSuccess) # Handle Ctrl-C SIGINT @@ -92,7 +93,8 @@ when isMainModule: # Not available in -d:release mode writeStackTrace() - waitFor waku.stop() + (waitFor waku.stop()).isOkOr: + error "Waku shutdown failed", error = error quit(QuitFailure) c_signal(ansi_c.SIGSEGV, handleSigsegv) diff --git a/docs/api/rest-api.md b/docs/api/rest-api.md index eeb90abfb..cc8e51020 100644 --- a/docs/api/rest-api.md +++ b/docs/api/rest-api.md @@ -38,6 +38,9 @@ A particular OpenAPI spec can be easily imported into [Postman](https://www.post curl http://localhost:8645/debug/v1/info -s | jq ``` +### Store API + +The `page_size` flag in the Store API has a default value of 20 and a max value of 100. ### Node configuration Find details [here](https://github.com/waku-org/nwaku/tree/master/docs/operators/how-to/configure-rest-api.md) diff --git a/docs/contributors/release-process.md b/docs/contributors/release-process.md index c0fb12d1c..8aa9282cd 100644 --- a/docs/contributors/release-process.md +++ b/docs/contributors/release-process.md @@ -6,44 +6,52 @@ For more context, see https://trunkbaseddevelopment.com/branch-for-release/ ## How to do releases -### Before release +### Prerequisites + +- All issues under the corresponding release [milestone](https://github.com/waku-org/nwaku/milestones) have been closed or, after consultation, deferred to the next release. +- All submodules are up to date. + > Updating submodules requires a PR (and very often several "fixes" to maintain compatibility with the changes in submodules). That PR process must be done and merged a couple of days before the release. -Ensure all items in this list are ticked: -- [ ] All issues under the corresponding release [milestone](https://github.com/waku-org/nwaku/milestones) has been closed or, after consultation, deferred to a next release. -- [ ] All submodules are up to date. - > **IMPORTANT:** Updating submodules requires a PR (and very often several "fixes" to maintain compatibility with the changes in submodules). That PR process must be done and merged a couple of days before the release. > In case the submodules update has a low effort and/or risk for the release, follow the ["Update submodules"](./git-submodules.md) instructions. - > If the effort or risk is too high, consider postponing the submodules upgrade for the subsequent release or delaying the current release until the submodules updates are included in the release candidate. -- [ ] The [js-waku CI tests](https://github.com/waku-org/js-waku/actions/workflows/ci.yml) pass against the release candidate (i.e. nwaku latest `master`). - > **NOTE:** This serves as a basic regression test against typical clients of nwaku. - > The specific job that needs to pass is named `node_with_nwaku_master`. -### Performing the release + > If the effort or risk is too high, consider postponing the submodules upgrade for the subsequent release or delaying the current release until the submodules updates are included in the release candidate. + +### Release types + +- **Full release**: follow the entire [Release process](#release-process--step-by-step). + +- **Beta release**: skip just `6c` and `6d` steps from [Release process](#release-process--step-by-step). + +- Choose the appropriate release process based on the release type: + - [Full Release](../../.github/ISSUE_TEMPLATE/prepare_full_release.md) + - [Beta Release](../../.github/ISSUE_TEMPLATE/prepare_beta_release.md) + +### Release process ( step by step ) 1. Checkout a release branch from master ``` - git checkout -b release/v0.1.0 + git checkout -b release/v0.X.0 ``` -1. Update `CHANGELOG.md` and ensure it is up to date. Use the helper Make target to get PR based release-notes/changelog update. +2. Update `CHANGELOG.md` and ensure it is up to date. Use the helper Make target to get PR based release-notes/changelog update. ``` make release-notes ``` -1. Create a release-candidate tag with the same name as release and `-rc.N` suffix a few days before the official release and push it +3. Create a release-candidate tag with the same name as release and `-rc.N` suffix a few days before the official release and push it ``` - git tag -as v0.1.0-rc.0 -m "Initial release." - git push origin v0.1.0-rc.0 + git tag -as v0.X.0-rc.0 -m "Initial release." + git push origin v0.X.0-rc.0 ``` - This will trigger a [workflow](../../.github/workflows/pre-release.yml) which will build RC artifacts and create and publish a Github release + This will trigger a [workflow](../../.github/workflows/pre-release.yml) which will build RC artifacts and create and publish a GitHub release -1. Open a PR from the release branch for others to review the included changes and the release-notes +4. Open a PR from the release branch for others to review the included changes and the release-notes -1. In case additional changes are needed, create a new RC tag +5. In case additional changes are needed, create a new RC tag Make sure the new tag is associated with CHANGELOG update. @@ -52,25 +60,63 @@ Ensure all items in this list are ticked: # Make changes, rebase and create new tag # Squash to one commit and make a nice commit message git rebase -i origin/master - git tag -as v0.1.0-rc.1 -m "Initial release." - git push origin v0.1.0-rc.1 + git tag -as v0.X.0-rc.1 -m "Initial release." + git push origin v0.X.0-rc.1 ``` -1. Validate the release. For the release validation process, please refer to the following [guide](https://www.notion.so/Release-Process-61234f335b904cd0943a5033ed8f42b4#47af557e7f9744c68fdbe5240bf93ca9) + Similarly use v0.X.0-rc.2, v0.X.0-rc.3 etc. for additional RC tags. -1. Once the release-candidate has been validated, create a final release tag and push it. -We also need to merge release branch back to master as a final step. +6. **Validation of release candidate** + + 6a. **Automated testing** + - Ensure all the unit tests (specifically js-waku tests) are green against the release candidate. + + 6b. **Waku fleet testing** + - Start job on `waku.test` [Deployment job](https://ci.infra.status.im/job/nim-waku/), wait for completion of the job. If it fails, then debug it. + - After completion, disable fleet so that daily ci not override your release candidate. + - Verify at https://fleets.waku.org/ that the fleet is locked to the release candidate image. + - Check if the image is created at [Harbor](https://harbor.status.im/harbor/projects/9/repositories/nwaku/artifacts-tab). + - Search [Kibana logs](https://kibana.infra.status.im/app/discover) from the previous month (since the last release was deployed) for possible crashes or errors in `waku.test`. + - Set time range to "Last 30 days" (or since last release). + - Most relevant search query: `(fleet: "waku.test" AND message: "SIGSEGV")`, `(fleet: "waku.test" AND message: "exception")`, `(fleet: "waku.test" AND message: "error")`. + - Document any crashes or errors found. + - If `waku.test` validation is successful, deploy to `waku.sandbox` using the same [Deployment job](https://ci.infra.status.im/job/nim-waku/). + - Search [Kibana logs](https://kibana.infra.status.im/app/discover) for `waku.sandbox`: `(fleet: "waku.sandbox" AND message: "SIGSEGV")`, `(fleet: "waku.sandbox" AND message: "exception")`, `(fleet: "waku.sandbox" AND message: "error")`. most probably if there are no crashes or errors in `waku.test`, there will be no crashes or errors in `waku.sandbox`. + - Enable the `waku.test` fleet again to resume auto-deployment of the latest `master` commit. + + 6c. **QA and DST testing** + - Ask Vac-QA and Vac-DST to run their available tests against the release candidate; share all release candidates with both teams. + + > We need an additional report like [this](https://www.notion.so/DST-Reports-1228f96fb65c80729cd1d98a7496fe6f) specifically from the DST team. Inform DST team about what are the expectations for this rc. For example, if we expect higher or lower bandwidth consumption. + + 6d. **Status fleet testing** + - Deploy release candidate to `status.staging` + - Perform [sanity check](https://www.notion.so/How-to-test-Nwaku-on-Status-12c6e4b9bf06420ca868bd199129b425) and log results as comments in this issue. + - Connect 2 instances to `status.staging` fleet, one in relay mode, the other one in light client. + - 1:1 Chats with each other + - Send and receive messages in a community + - Close one instance, send messages with second instance, reopen first instance and confirm messages sent while offline are retrieved from store + - Perform checks based on _end-user impact_. + - Inform other (Waku and Status) CCs to point their instances to `status.staging` for a few days. Ping Status colleagues from their Discord server or [Status community](https://status.app) (not a blocking point). + - Ask Status-QA to perform sanity checks (as described above) and checks based on _end user impact_; specify the version being tested. + - Ask Status-QA or infra to run the automated Status e2e tests against `status.staging`. + - Get other CCs' sign-off: they should comment on this PR, e.g., "Used the app for a week, no problem." If problems are reported, resolve them and create a new RC. + - **Get Status-QA sign-off**, ensuring that the `status.test` update will not disturb ongoing activities. + +7. Once the release-candidate has been validated, create a final release tag and push it. +We also need to merge the release branch back into master as a final step. ``` - git checkout release/v0.1.0 - git tag -as v0.1.0 -m "Initial release." - git push origin v0.1.0 + git checkout release/v0.X.0 + git tag -as v0.X.0 -m "final release." (use v0.X.0-beta as the tag if you are creating a beta release) + git push origin v0.X.0 git switch master git pull - git merge release/v0.1.0 + git merge release/v0.X.0 ``` +8. Update `waku-rust-bindings`, `waku-simulator` and `nwaku-compose` to use the new release. -1. Create a [Github release](https://github.com/waku-org/nwaku/releases) from the release tag. +9. Create a [GitHub release](https://github.com/waku-org/nwaku/releases) from the release tag. * Add binaries produced by the ["Upload Release Asset"](https://github.com/waku-org/nwaku/actions/workflows/release-assets.yml) workflow. Where possible, test the binaries before uploading to the release. @@ -80,22 +126,10 @@ We also need to merge release branch back to master as a final step. 2. Deploy the release image to [Dockerhub](https://hub.docker.com/r/wakuorg/nwaku) by triggering [the manual Jenkins deployment job](https://ci.infra.status.im/job/nim-waku/job/docker-manual/). > Ensure the following build parameters are set: > - `MAKE_TARGET`: `wakunode2` - > - `IMAGE_TAG`: the release tag (e.g. `v0.16.0`) + > - `IMAGE_TAG`: the release tag (e.g. `v0.38.0`) > - `IMAGE_NAME`: `wakuorg/nwaku` > - `NIMFLAGS`: `--colors:off -d:disableMarchNative -d:chronicles_colors:none -d:postgres` - > - `GIT_REF` the release tag (e.g. `v0.16.0`) -3. Update the default nwaku image in [nwaku-compose](https://github.com/waku-org/nwaku-compose/blob/master/docker-compose.yml) -4. Deploy the release to appropriate fleets: - - Inform clients - > **NOTE:** known clients are currently using some version of js-waku, go-waku, nwaku or waku-rs. - > Clients are reachable via the corresponding channels on the Vac Discord server. - > It should be enough to inform clients on the `#nwaku` and `#announce` channels on Discord. - > Informal conversations with specific repo maintainers are often part of this process. - - Check if nwaku configuration parameters changed. If so [update fleet configuration](https://www.notion.so/Fleet-Ownership-7532aad8896d46599abac3c274189741?pvs=4#d2d2f0fe4b3c429fbd860a1d64f89a64) in [infra-nim-waku](https://github.com/status-im/infra-nim-waku) - - Deploy release to the `waku.sandbox` fleet from [Jenkins](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-sandbox/). - - Ensure that nodes successfully start up and monitor health using [Grafana](https://grafana.infra.status.im/d/qrp_ZCTGz/nim-waku-v2?orgId=1) and [Kibana](https://kibana.infra.status.im/goto/a7728e70-eb26-11ec-81d1-210eb3022c76). - - If necessary, revert by deploying the previous release. Download logs and open a bug report issue. -5. Submit a PR to merge the release branch back to `master`. Make sure you use the option `Merge pull request (Create a merge commit)` to perform such merge. + > - `GIT_REF` the release tag (e.g. `v0.38.0`) ### Performing a patch release @@ -116,4 +150,15 @@ We also need to merge release branch back to master as a final step. 4. Once the release-candidate has been validated and changelog PR got merged, cherry-pick the changelog update from master to the release branch. Create a final release tag and push it. -5. Create a [Github release](https://github.com/waku-org/nwaku/releases) from the release tag and follow the same post-release process as usual. +5. Create a [GitHub release](https://github.com/waku-org/nwaku/releases) from the release tag and follow the same post-release process as usual. + +### Links + +- [Release process](https://github.com/waku-org/nwaku/blob/master/docs/contributors/release-process.md) +- [Release notes](https://github.com/waku-org/nwaku/blob/master/CHANGELOG.md) +- [Fleet ownership](https://www.notion.so/Fleet-Ownership-7532aad8896d46599abac3c274189741?pvs=4#d2d2f0fe4b3c429fbd860a1d64f89a64) +- [Infra-nim-waku](https://github.com/status-im/infra-nim-waku) +- [Jenkins](https://ci.infra.status.im/job/nim-waku/) +- [Fleets](https://fleets.waku.org/) +- [Harbor](https://harbor.status.im/harbor/projects/9/repositories/nwaku/artifacts-tab) +- [Kibana](https://kibana.infra.status.im/app/) \ No newline at end of file diff --git a/docs/operators/how-to/configure-rest-api.md b/docs/operators/how-to/configure-rest-api.md index 3fe070aab..7a58a798c 100644 --- a/docs/operators/how-to/configure-rest-api.md +++ b/docs/operators/how-to/configure-rest-api.md @@ -1,4 +1,3 @@ - # Configure a REST API node A subset of the node configuration can be used to modify the behaviour of the HTTP REST API. @@ -21,3 +20,5 @@ Example: ```shell wakunode2 --rest=true ``` + +The `page_size` flag in the Store API has a default value of 20 and a max value of 100. diff --git a/examples/api_example/api_example.nim b/examples/api_example/api_example.nim new file mode 100644 index 000000000..4a7cde5db --- /dev/null +++ b/examples/api_example/api_example.nim @@ -0,0 +1,94 @@ +import std/options +import chronos, results, confutils, confutils/defs +import waku + +type CliArgs = object + ethRpcEndpoint* {. + defaultValue: "", desc: "ETH RPC Endpoint, if passed, RLN is enabled" + .}: string + +proc periodicSender(w: Waku): Future[void] {.async.} = + let sentListener = MessageSentEvent.listen( + proc(event: MessageSentEvent) {.async: (raises: []).} = + echo "Message sent with request ID: ", + event.requestId, " hash: ", event.messageHash + ).valueOr: + echo "Failed to listen to message sent event: ", error + return + + let errorListener = MessageErrorEvent.listen( + proc(event: MessageErrorEvent) {.async: (raises: []).} = + echo "Message failed to send with request ID: ", + event.requestId, " error: ", event.error + ).valueOr: + echo "Failed to listen to message error event: ", error + return + + let propagatedListener = MessagePropagatedEvent.listen( + proc(event: MessagePropagatedEvent) {.async: (raises: []).} = + echo "Message propagated with request ID: ", + event.requestId, " hash: ", event.messageHash + ).valueOr: + echo "Failed to listen to message propagated event: ", error + return + + defer: + MessageSentEvent.dropListener(sentListener) + MessageErrorEvent.dropListener(errorListener) + MessagePropagatedEvent.dropListener(propagatedListener) + + ## Periodically sends a Waku message every 30 seconds + var counter = 0 + while true: + let envelope = MessageEnvelope.init( + contentTopic = "example/content/topic", + payload = "Hello Waku! Message number: " & $counter, + ) + + let sendRequestId = (await w.send(envelope)).valueOr: + echo "Failed to send message: ", error + quit(QuitFailure) + + echo "Sending message with request ID: ", sendRequestId, " counter: ", counter + + counter += 1 + await sleepAsync(30.seconds) + +when isMainModule: + let args = CliArgs.load() + + echo "Starting Waku node..." + + # Use WakuNodeConf (the CLI configuration type) for node setup + var conf = defaultWakuNodeConf().valueOr: + echo "Failed to create default config: ", error + quit(QuitFailure) + + if args.ethRpcEndpoint == "": + # Create a basic configuration for the Waku node + # No RLN as we don't have an ETH RPC Endpoint + conf.mode = Core + conf.preset = "logos.dev" + else: + # Connect to TWN, use ETH RPC Endpoint for RLN + conf.mode = Core + conf.preset = "twn" + conf.ethClientUrls = @[EthRpcUrl(args.ethRpcEndpoint)] + + # Create the node using the library API's createNode function + let node = (waitFor createNode(conf)).valueOr: + echo "Failed to create node: ", error + quit(QuitFailure) + + echo("Waku node created successfully!") + + # Start the node + (waitFor startWaku(addr node)).isOkOr: + echo "Failed to start node: ", error + quit(QuitFailure) + + echo "Node started successfully!" + + asyncSpawn periodicSender(node) + + runForever() diff --git a/examples/cbindings/waku_example.c b/examples/cbindings/waku_example.c index 35ac8a2e2..f337203ae 100644 --- a/examples/cbindings/waku_example.c +++ b/examples/cbindings/waku_example.c @@ -19,283 +19,309 @@ pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; pthread_cond_t cond = PTHREAD_COND_INITIALIZER; int callback_executed = 0; -void waitForCallback() { - pthread_mutex_lock(&mutex); - while (!callback_executed) { - pthread_cond_wait(&cond, &mutex); - } - callback_executed = 0; - pthread_mutex_unlock(&mutex); +void waitForCallback() +{ + pthread_mutex_lock(&mutex); + while (!callback_executed) + { + pthread_cond_wait(&cond, &mutex); + } + callback_executed = 0; + pthread_mutex_unlock(&mutex); } -#define WAKU_CALL(call) \ -do { \ - int ret = call; \ - if (ret != 0) { \ - printf("Failed the call to: %s. Returned code: %d\n", #call, ret); \ - exit(1); \ - } \ - waitForCallback(); \ -} while (0) +#define WAKU_CALL(call) \ + do \ + { \ + int ret = call; \ + if (ret != 0) \ + { \ + printf("Failed the call to: %s. Returned code: %d\n", #call, ret); \ + exit(1); \ + } \ + waitForCallback(); \ + } while (0) -struct ConfigNode { - char host[128]; - int port; - char key[128]; - int relay; - char peers[2048]; - int store; - char storeNode[2048]; - char storeRetentionPolicy[64]; - char storeDbUrl[256]; - int storeVacuum; - int storeDbMigration; - int storeMaxNumDbConnections; +struct ConfigNode +{ + char host[128]; + int port; + char key[128]; + int relay; + char peers[2048]; + int store; + char storeNode[2048]; + char storeRetentionPolicy[64]; + char storeDbUrl[256]; + int storeVacuum; + int storeDbMigration; + int storeMaxNumDbConnections; }; // libwaku Context -void* ctx; +void *ctx; // For the case of C language we don't need to store a particular userData -void* userData = NULL; +void *userData = NULL; // Arguments parsing static char doc[] = "\nC example that shows how to use the waku library."; static char args_doc[] = ""; static struct argp_option options[] = { - { "host", 'h', "HOST", 0, "IP to listen for for LibP2P traffic. (default: \"0.0.0.0\")"}, - { "port", 'p', "PORT", 0, "TCP listening port. (default: \"60000\")"}, - { "key", 'k', "KEY", 0, "P2P node private key as 64 char hex string."}, - { "relay", 'r', "RELAY", 0, "Enable relay protocol: 1 or 0. (default: 1)"}, - { "peers", 'a', "PEERS", 0, "Comma-separated list of peer-multiaddress to connect\ + {"host", 'h', "HOST", 0, "IP to listen for for LibP2P traffic. (default: \"0.0.0.0\")"}, + {"port", 'p', "PORT", 0, "TCP listening port. (default: \"60000\")"}, + {"key", 'k', "KEY", 0, "P2P node private key as 64 char hex string."}, + {"relay", 'r', "RELAY", 0, "Enable relay protocol: 1 or 0. (default: 1)"}, + {"peers", 'a', "PEERS", 0, "Comma-separated list of peer-multiaddress to connect\ to. (default: \"\") e.g. \"/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\""}, - { 0 } -}; + {0}}; -static error_t parse_opt(int key, char *arg, struct argp_state *state) { +static error_t parse_opt(int key, char *arg, struct argp_state *state) +{ - struct ConfigNode *cfgNode = state->input; - switch (key) { - case 'h': - snprintf(cfgNode->host, 128, "%s", arg); - break; - case 'p': - cfgNode->port = atoi(arg); - break; - case 'k': - snprintf(cfgNode->key, 128, "%s", arg); - break; - case 'r': - cfgNode->relay = atoi(arg); - break; - case 'a': - snprintf(cfgNode->peers, 2048, "%s", arg); - break; - case ARGP_KEY_ARG: - if (state->arg_num >= 1) /* Too many arguments. */ - argp_usage(state); - break; - case ARGP_KEY_END: - break; - default: - return ARGP_ERR_UNKNOWN; - } + struct ConfigNode *cfgNode = state->input; + switch (key) + { + case 'h': + snprintf(cfgNode->host, 128, "%s", arg); + break; + case 'p': + cfgNode->port = atoi(arg); + break; + case 'k': + snprintf(cfgNode->key, 128, "%s", arg); + break; + case 'r': + cfgNode->relay = atoi(arg); + break; + case 'a': + snprintf(cfgNode->peers, 2048, "%s", arg); + break; + case ARGP_KEY_ARG: + if (state->arg_num >= 1) /* Too many arguments. */ + argp_usage(state); + break; + case ARGP_KEY_END: + break; + default: + return ARGP_ERR_UNKNOWN; + } - return 0; + return 0; } -void signal_cond() { - pthread_mutex_lock(&mutex); - callback_executed = 1; - pthread_cond_signal(&cond); - pthread_mutex_unlock(&mutex); +void signal_cond() +{ + pthread_mutex_lock(&mutex); + callback_executed = 1; + pthread_cond_signal(&cond); + pthread_mutex_unlock(&mutex); } -static struct argp argp = { options, parse_opt, args_doc, doc, 0, 0, 0 }; +static struct argp argp = {options, parse_opt, args_doc, doc, 0, 0, 0}; -void event_handler(int callerRet, const char* msg, size_t len, void* userData) { - if (callerRet == RET_ERR) { - printf("Error: %s\n", msg); - exit(1); - } - else if (callerRet == RET_OK) { - printf("Receiving event: %s\n", msg); - } +void event_handler(int callerRet, const char *msg, size_t len, void *userData) +{ + if (callerRet == RET_ERR) + { + printf("Error: %s\n", msg); + exit(1); + } + else if (callerRet == RET_OK) + { + printf("Receiving event: %s\n", msg); + } - signal_cond(); + signal_cond(); } -void on_event_received(int callerRet, const char* msg, size_t len, void* userData) { - if (callerRet == RET_ERR) { - printf("Error: %s\n", msg); - exit(1); - } - else if (callerRet == RET_OK) { - printf("Receiving event: %s\n", msg); - } +void on_event_received(int callerRet, const char *msg, size_t len, void *userData) +{ + if (callerRet == RET_ERR) + { + printf("Error: %s\n", msg); + exit(1); + } + else if (callerRet == RET_OK) + { + printf("Receiving event: %s\n", msg); + } } -char* contentTopic = NULL; -void handle_content_topic(int callerRet, const char* msg, size_t len, void* userData) { - if (contentTopic != NULL) { - free(contentTopic); - } +char *contentTopic = NULL; +void handle_content_topic(int callerRet, const char *msg, size_t len, void *userData) +{ + if (contentTopic != NULL) + { + free(contentTopic); + } - contentTopic = malloc(len * sizeof(char) + 1); - strcpy(contentTopic, msg); - signal_cond(); + contentTopic = malloc(len * sizeof(char) + 1); + strcpy(contentTopic, msg); + signal_cond(); } -char* publishResponse = NULL; -void handle_publish_ok(int callerRet, const char* msg, size_t len, void* userData) { - printf("Publish Ok: %s %lu\n", msg, len); +char *publishResponse = NULL; +void handle_publish_ok(int callerRet, const char *msg, size_t len, void *userData) +{ + printf("Publish Ok: %s %lu\n", msg, len); - if (publishResponse != NULL) { - free(publishResponse); - } + if (publishResponse != NULL) + { + free(publishResponse); + } - publishResponse = malloc(len * sizeof(char) + 1); - strcpy(publishResponse, msg); + publishResponse = malloc(len * sizeof(char) + 1); + strcpy(publishResponse, msg); } #define MAX_MSG_SIZE 65535 -void publish_message(const char* msg) { - char jsonWakuMsg[MAX_MSG_SIZE]; - char *msgPayload = b64_encode(msg, strlen(msg)); +void publish_message(const char *msg) +{ + char jsonWakuMsg[MAX_MSG_SIZE]; + char *msgPayload = b64_encode(msg, strlen(msg)); - WAKU_CALL( waku_content_topic(ctx, - "appName", - 1, - "contentTopicName", - "encoding", - handle_content_topic, - userData) ); - snprintf(jsonWakuMsg, - MAX_MSG_SIZE, - "{\"payload\":\"%s\",\"contentTopic\":\"%s\"}", - msgPayload, contentTopic); + WAKU_CALL(waku_content_topic(ctx, + handle_content_topic, + userData, + "appName", + 1, + "contentTopicName", + "encoding")); + snprintf(jsonWakuMsg, + MAX_MSG_SIZE, + "{\"payload\":\"%s\",\"contentTopic\":\"%s\"}", + msgPayload, contentTopic); - free(msgPayload); + free(msgPayload); - WAKU_CALL( waku_relay_publish(ctx, - "/waku/2/rs/16/32", - jsonWakuMsg, - 10000 /*timeout ms*/, - event_handler, - userData) ); + WAKU_CALL(waku_relay_publish(ctx, + event_handler, + userData, + "/waku/2/rs/16/32", + jsonWakuMsg, + 10000 /*timeout ms*/)); } -void show_help_and_exit() { - printf("Wrong parameters\n"); - exit(1); +void show_help_and_exit() +{ + printf("Wrong parameters\n"); + exit(1); } -void print_default_pubsub_topic(int callerRet, const char* msg, size_t len, void* userData) { - printf("Default pubsub topic: %s\n", msg); - signal_cond(); +void print_default_pubsub_topic(int callerRet, const char *msg, size_t len, void *userData) +{ + printf("Default pubsub topic: %s\n", msg); + signal_cond(); } -void print_waku_version(int callerRet, const char* msg, size_t len, void* userData) { - printf("Git Version: %s\n", msg); - signal_cond(); +void print_waku_version(int callerRet, const char *msg, size_t len, void *userData) +{ + printf("Git Version: %s\n", msg); + signal_cond(); } // Beginning of UI program logic -enum PROGRAM_STATE { - MAIN_MENU, - SUBSCRIBE_TOPIC_MENU, - CONNECT_TO_OTHER_NODE_MENU, - PUBLISH_MESSAGE_MENU +enum PROGRAM_STATE +{ + MAIN_MENU, + SUBSCRIBE_TOPIC_MENU, + CONNECT_TO_OTHER_NODE_MENU, + PUBLISH_MESSAGE_MENU }; enum PROGRAM_STATE current_state = MAIN_MENU; -void show_main_menu() { - printf("\nPlease, select an option:\n"); - printf("\t1.) Subscribe to topic\n"); - printf("\t2.) Connect to other node\n"); - printf("\t3.) Publish a message\n"); +void show_main_menu() +{ + printf("\nPlease, select an option:\n"); + printf("\t1.) Subscribe to topic\n"); + printf("\t2.) Connect to other node\n"); + printf("\t3.) Publish a message\n"); } -void handle_user_input() { - char cmd[1024]; - memset(cmd, 0, 1024); - int numRead = read(0, cmd, 1024); - if (numRead <= 0) { - return; - } +void handle_user_input() +{ + char cmd[1024]; + memset(cmd, 0, 1024); + int numRead = read(0, cmd, 1024); + if (numRead <= 0) + { + return; + } - switch (atoi(cmd)) - { - case SUBSCRIBE_TOPIC_MENU: - { - printf("Indicate the Pubsubtopic to subscribe:\n"); - char pubsubTopic[128]; - scanf("%127s", pubsubTopic); + switch (atoi(cmd)) + { + case SUBSCRIBE_TOPIC_MENU: + { + printf("Indicate the Pubsubtopic to subscribe:\n"); + char pubsubTopic[128]; + scanf("%127s", pubsubTopic); - WAKU_CALL( waku_relay_subscribe(ctx, - pubsubTopic, - event_handler, - userData) ); - printf("The subscription went well\n"); + WAKU_CALL(waku_relay_subscribe(ctx, + event_handler, + userData, + pubsubTopic)); + printf("The subscription went well\n"); - show_main_menu(); - } + show_main_menu(); + } + break; + + case CONNECT_TO_OTHER_NODE_MENU: + // printf("Connecting to a node. Please indicate the peer Multiaddress:\n"); + // printf("e.g.: /ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\n"); + // char peerAddr[512]; + // scanf("%511s", peerAddr); + // WAKU_CALL(waku_connect(ctx, peerAddr, 10000 /* timeoutMs */, event_handler, userData)); + show_main_menu(); break; - case CONNECT_TO_OTHER_NODE_MENU: - printf("Connecting to a node. Please indicate the peer Multiaddress:\n"); - printf("e.g.: /ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\n"); - char peerAddr[512]; - scanf("%511s", peerAddr); - WAKU_CALL(waku_connect(ctx, peerAddr, 10000 /* timeoutMs */, event_handler, userData)); - show_main_menu(); + case PUBLISH_MESSAGE_MENU: + { + printf("Type the message to publish:\n"); + char msg[1024]; + scanf("%1023s", msg); + + publish_message(msg); + + show_main_menu(); + } + break; + + case MAIN_MENU: break; - - case PUBLISH_MESSAGE_MENU: - { - printf("Type the message to publish:\n"); - char msg[1024]; - scanf("%1023s", msg); - - publish_message(msg); - - show_main_menu(); - } - break; - - case MAIN_MENU: - break; - } + } } // End of UI program logic -int main(int argc, char** argv) { - struct ConfigNode cfgNode; - // default values - snprintf(cfgNode.host, 128, "0.0.0.0"); - cfgNode.port = 60000; - cfgNode.relay = 1; +int main(int argc, char **argv) +{ + struct ConfigNode cfgNode; + // default values + snprintf(cfgNode.host, 128, "0.0.0.0"); + cfgNode.port = 60000; + cfgNode.relay = 1; - cfgNode.store = 0; - snprintf(cfgNode.storeNode, 2048, ""); - snprintf(cfgNode.storeRetentionPolicy, 64, "time:6000000"); - snprintf(cfgNode.storeDbUrl, 256, "postgres://postgres:test123@localhost:5432/postgres"); - cfgNode.storeVacuum = 0; - cfgNode.storeDbMigration = 0; - cfgNode.storeMaxNumDbConnections = 30; + cfgNode.store = 0; + snprintf(cfgNode.storeNode, 2048, ""); + snprintf(cfgNode.storeRetentionPolicy, 64, "time:6000000"); + snprintf(cfgNode.storeDbUrl, 256, "postgres://postgres:test123@localhost:5432/postgres"); + cfgNode.storeVacuum = 0; + cfgNode.storeDbMigration = 0; + cfgNode.storeMaxNumDbConnections = 30; - if (argp_parse(&argp, argc, argv, 0, 0, &cfgNode) - == ARGP_ERR_UNKNOWN) { - show_help_and_exit(); - } + if (argp_parse(&argp, argc, argv, 0, 0, &cfgNode) == ARGP_ERR_UNKNOWN) + { + show_help_and_exit(); + } - char jsonConfig[5000]; - snprintf(jsonConfig, 5000, "{ \ + char jsonConfig[5000]; + snprintf(jsonConfig, 5000, "{ \ \"clusterId\": 16, \ \"shards\": [ 1, 32, 64, 128, 256 ], \ \"numShardsInNetwork\": 257, \ @@ -313,54 +339,56 @@ int main(int argc, char** argv) { \"discv5UdpPort\": 9999, \ \"dnsDiscoveryUrl\": \"enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.prod.status.nodes.status.im\", \ \"dnsDiscoveryNameServers\": [\"8.8.8.8\", \"1.0.0.1\"] \ - }", cfgNode.host, - cfgNode.port, - cfgNode.relay ? "true":"false", - cfgNode.store ? "true":"false", - cfgNode.storeDbUrl, - cfgNode.storeRetentionPolicy, - cfgNode.storeMaxNumDbConnections); + }", + cfgNode.host, + cfgNode.port, + cfgNode.relay ? "true" : "false", + cfgNode.store ? "true" : "false", + cfgNode.storeDbUrl, + cfgNode.storeRetentionPolicy, + cfgNode.storeMaxNumDbConnections); - ctx = waku_new(jsonConfig, event_handler, userData); - waitForCallback(); + ctx = waku_new(jsonConfig, event_handler, userData); + waitForCallback(); - WAKU_CALL( waku_default_pubsub_topic(ctx, print_default_pubsub_topic, userData) ); - WAKU_CALL( waku_version(ctx, print_waku_version, userData) ); + WAKU_CALL(waku_default_pubsub_topic(ctx, print_default_pubsub_topic, userData)); + WAKU_CALL(waku_version(ctx, print_waku_version, userData)); - printf("Bind addr: %s:%u\n", cfgNode.host, cfgNode.port); - printf("Waku Relay enabled: %s\n", cfgNode.relay == 1 ? "YES": "NO"); + printf("Bind addr: %s:%u\n", cfgNode.host, cfgNode.port); + printf("Waku Relay enabled: %s\n", cfgNode.relay == 1 ? "YES" : "NO"); - waku_set_event_callback(ctx, on_event_received, userData); + set_event_callback(ctx, on_event_received, userData); - waku_start(ctx, event_handler, userData); - waitForCallback(); + waku_start(ctx, event_handler, userData); + waitForCallback(); - WAKU_CALL( waku_listen_addresses(ctx, event_handler, userData) ); + WAKU_CALL(waku_listen_addresses(ctx, event_handler, userData)); - WAKU_CALL( waku_relay_subscribe(ctx, - "/waku/2/rs/0/0", - event_handler, - userData) ); + WAKU_CALL(waku_relay_subscribe(ctx, + event_handler, + userData, + "/waku/2/rs/16/32")); - WAKU_CALL( waku_discv5_update_bootnodes(ctx, - "[\"enr:-QEkuEBIkb8q8_mrorHndoXH9t5N6ZfD-jehQCrYeoJDPHqT0l0wyaONa2-piRQsi3oVKAzDShDVeoQhy0uwN1xbZfPZAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQKnGt-GSgqPSf3IAPM7bFgTlpczpMZZLF3geeoNNsxzSoN0Y3CCdl-DdWRwgiMohXdha3UyDw\",\"enr:-QEkuEB3WHNS-xA3RDpfu9A2Qycr3bN3u7VoArMEiDIFZJ66F1EB3d4wxZN1hcdcOX-RfuXB-MQauhJGQbpz3qUofOtLAYJpZIJ2NIJpcIQI2SVcim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQPK35Nnz0cWUtSAhBp7zvHEhyU_AqeQUlqzLiLxfP2L4oN0Y3CCdl-DdWRwgiMohXdha3UyDw\"]", - event_handler, - userData) ); + WAKU_CALL(waku_discv5_update_bootnodes(ctx, + event_handler, + userData, + "[\"enr:-QEkuEBIkb8q8_mrorHndoXH9t5N6ZfD-jehQCrYeoJDPHqT0l0wyaONa2-piRQsi3oVKAzDShDVeoQhy0uwN1xbZfPZAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQKnGt-GSgqPSf3IAPM7bFgTlpczpMZZLF3geeoNNsxzSoN0Y3CCdl-DdWRwgiMohXdha3UyDw\",\"enr:-QEkuEB3WHNS-xA3RDpfu9A2Qycr3bN3u7VoArMEiDIFZJ66F1EB3d4wxZN1hcdcOX-RfuXB-MQauhJGQbpz3qUofOtLAYJpZIJ2NIJpcIQI2SVcim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQPK35Nnz0cWUtSAhBp7zvHEhyU_AqeQUlqzLiLxfP2L4oN0Y3CCdl-DdWRwgiMohXdha3UyDw\"]")); - WAKU_CALL( waku_get_peerids_from_peerstore(ctx, - event_handler, - userData) ); + WAKU_CALL(waku_get_peerids_from_peerstore(ctx, + event_handler, + userData)); - show_main_menu(); - while(1) { - handle_user_input(); + show_main_menu(); + while (1) + { + handle_user_input(); - // Uncomment the following if need to test the metrics retrieval - // WAKU_CALL( waku_get_metrics(ctx, - // event_handler, - // userData) ); - } + // Uncomment the following if need to test the metrics retrieval + // WAKU_CALL( waku_get_metrics(ctx, + // event_handler, + // userData) ); + } - pthread_mutex_destroy(&mutex); - pthread_cond_destroy(&cond); + pthread_mutex_destroy(&mutex); + pthread_cond_destroy(&cond); } diff --git a/examples/cpp/waku.cpp b/examples/cpp/waku.cpp index c47877d02..2824f8e53 100644 --- a/examples/cpp/waku.cpp +++ b/examples/cpp/waku.cpp @@ -21,37 +21,43 @@ pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; pthread_cond_t cond = PTHREAD_COND_INITIALIZER; int callback_executed = 0; -void waitForCallback() { +void waitForCallback() +{ pthread_mutex_lock(&mutex); - while (!callback_executed) { + while (!callback_executed) + { pthread_cond_wait(&cond, &mutex); } callback_executed = 0; pthread_mutex_unlock(&mutex); } -void signal_cond() { +void signal_cond() +{ pthread_mutex_lock(&mutex); callback_executed = 1; pthread_cond_signal(&cond); pthread_mutex_unlock(&mutex); } -#define WAKU_CALL(call) \ -do { \ - int ret = call; \ - if (ret != 0) { \ - std::cout << "Failed the call to: " << #call << ". Code: " << ret << "\n"; \ - } \ - waitForCallback(); \ -} while (0) +#define WAKU_CALL(call) \ + do \ + { \ + int ret = call; \ + if (ret != 0) \ + { \ + std::cout << "Failed the call to: " << #call << ". Code: " << ret << "\n"; \ + } \ + waitForCallback(); \ + } while (0) -struct ConfigNode { - char host[128]; - int port; - char key[128]; - int relay; - char peers[2048]; +struct ConfigNode +{ + char host[128]; + int port; + char key[128]; + int relay; + char peers[2048]; }; // Arguments parsing @@ -59,70 +65,76 @@ static char doc[] = "\nC example that shows how to use the waku library."; static char args_doc[] = ""; static struct argp_option options[] = { - { "host", 'h', "HOST", 0, "IP to listen for for LibP2P traffic. (default: \"0.0.0.0\")"}, - { "port", 'p', "PORT", 0, "TCP listening port. (default: \"60000\")"}, - { "key", 'k', "KEY", 0, "P2P node private key as 64 char hex string."}, - { "relay", 'r', "RELAY", 0, "Enable relay protocol: 1 or 0. (default: 1)"}, - { "peers", 'a', "PEERS", 0, "Comma-separated list of peer-multiaddress to connect\ + {"host", 'h', "HOST", 0, "IP to listen for for LibP2P traffic. (default: \"0.0.0.0\")"}, + {"port", 'p', "PORT", 0, "TCP listening port. (default: \"60000\")"}, + {"key", 'k', "KEY", 0, "P2P node private key as 64 char hex string."}, + {"relay", 'r', "RELAY", 0, "Enable relay protocol: 1 or 0. (default: 1)"}, + {"peers", 'a', "PEERS", 0, "Comma-separated list of peer-multiaddress to connect\ to. (default: \"\") e.g. \"/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\""}, - { 0 } -}; + {0}}; -static error_t parse_opt(int key, char *arg, struct argp_state *state) { +static error_t parse_opt(int key, char *arg, struct argp_state *state) +{ - struct ConfigNode *cfgNode = (ConfigNode *) state->input; - switch (key) { - case 'h': - snprintf(cfgNode->host, 128, "%s", arg); - break; - case 'p': - cfgNode->port = atoi(arg); - break; - case 'k': - snprintf(cfgNode->key, 128, "%s", arg); - break; - case 'r': - cfgNode->relay = atoi(arg); - break; - case 'a': - snprintf(cfgNode->peers, 2048, "%s", arg); - break; - case ARGP_KEY_ARG: - if (state->arg_num >= 1) /* Too many arguments. */ + struct ConfigNode *cfgNode = (ConfigNode *)state->input; + switch (key) + { + case 'h': + snprintf(cfgNode->host, 128, "%s", arg); + break; + case 'p': + cfgNode->port = atoi(arg); + break; + case 'k': + snprintf(cfgNode->key, 128, "%s", arg); + break; + case 'r': + cfgNode->relay = atoi(arg); + break; + case 'a': + snprintf(cfgNode->peers, 2048, "%s", arg); + break; + case ARGP_KEY_ARG: + if (state->arg_num >= 1) /* Too many arguments. */ argp_usage(state); - break; - case ARGP_KEY_END: - break; - default: - return ARGP_ERR_UNKNOWN; - } + break; + case ARGP_KEY_END: + break; + default: + return ARGP_ERR_UNKNOWN; + } return 0; } -void event_handler(const char* msg, size_t len) { +void event_handler(const char *msg, size_t len) +{ printf("Receiving event: %s\n", msg); } -void handle_error(const char* msg, size_t len) { +void handle_error(const char *msg, size_t len) +{ printf("handle_error: %s\n", msg); exit(1); } template -auto cify(F&& f) { - static F fn = std::forward(f); - return [](int callerRet, const char* msg, size_t len, void* userData) { - signal_cond(); - return fn(msg, len); - }; +auto cify(F &&f) +{ + static F fn = std::forward(f); + return [](int callerRet, const char *msg, size_t len, void *userData) + { + signal_cond(); + return fn(msg, len); + }; } -static struct argp argp = { options, parse_opt, args_doc, doc, 0, 0, 0 }; +static struct argp argp = {options, parse_opt, args_doc, doc, 0, 0, 0}; // Beginning of UI program logic -enum PROGRAM_STATE { +enum PROGRAM_STATE +{ MAIN_MENU, SUBSCRIBE_TOPIC_MENU, CONNECT_TO_OTHER_NODE_MENU, @@ -131,18 +143,21 @@ enum PROGRAM_STATE { enum PROGRAM_STATE current_state = MAIN_MENU; -void show_main_menu() { +void show_main_menu() +{ printf("\nPlease, select an option:\n"); printf("\t1.) Subscribe to topic\n"); printf("\t2.) Connect to other node\n"); printf("\t3.) Publish a message\n"); } -void handle_user_input(void* ctx) { +void handle_user_input(void *ctx) +{ char cmd[1024]; memset(cmd, 0, 1024); int numRead = read(0, cmd, 1024); - if (numRead <= 0) { + if (numRead <= 0) + { return; } @@ -154,12 +169,11 @@ void handle_user_input(void* ctx) { char pubsubTopic[128]; scanf("%127s", pubsubTopic); - WAKU_CALL( waku_relay_subscribe(ctx, - pubsubTopic, - cify([&](const char* msg, size_t len) { - event_handler(msg, len); - }), - nullptr) ); + WAKU_CALL(waku_relay_subscribe(ctx, + cify([&](const char *msg, size_t len) + { event_handler(msg, len); }), + nullptr, + pubsubTopic)); printf("The subscription went well\n"); show_main_menu(); @@ -171,15 +185,14 @@ void handle_user_input(void* ctx) { printf("e.g.: /ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\n"); char peerAddr[512]; scanf("%511s", peerAddr); - WAKU_CALL( waku_connect(ctx, - peerAddr, - 10000 /* timeoutMs */, - cify([&](const char* msg, size_t len) { - event_handler(msg, len); - }), - nullptr)); + WAKU_CALL(waku_connect(ctx, + cify([&](const char *msg, size_t len) + { event_handler(msg, len); }), + nullptr, + peerAddr, + 10000 /* timeoutMs */)); show_main_menu(); - break; + break; case PUBLISH_MESSAGE_MENU: { @@ -193,28 +206,26 @@ void handle_user_input(void* ctx) { std::string contentTopic; waku_content_topic(ctx, + cify([&contentTopic](const char *msg, size_t len) + { contentTopic = msg; }), + nullptr, "appName", - 1, - "contentTopicName", - "encoding", - cify([&contentTopic](const char* msg, size_t len) { - contentTopic = msg; - }), - nullptr); + 1, + "contentTopicName", + "encoding"); snprintf(jsonWakuMsg, 2048, "{\"payload\":\"%s\",\"contentTopic\":\"%s\"}", msgPayload.data(), contentTopic.c_str()); - WAKU_CALL( waku_relay_publish(ctx, - "/waku/2/rs/16/32", - jsonWakuMsg, - 10000 /*timeout ms*/, - cify([&](const char* msg, size_t len) { - event_handler(msg, len); - }), - nullptr) ); + WAKU_CALL(waku_relay_publish(ctx, + cify([&](const char *msg, size_t len) + { event_handler(msg, len); }), + nullptr, + "/waku/2/rs/16/32", + jsonWakuMsg, + 10000 /*timeout ms*/)); show_main_menu(); } @@ -227,12 +238,14 @@ void handle_user_input(void* ctx) { // End of UI program logic -void show_help_and_exit() { +void show_help_and_exit() +{ printf("Wrong parameters\n"); exit(1); } -int main(int argc, char** argv) { +int main(int argc, char **argv) +{ struct ConfigNode cfgNode; // default values snprintf(cfgNode.host, 128, "0.0.0.0"); @@ -241,8 +254,8 @@ int main(int argc, char** argv) { cfgNode.port = 60000; cfgNode.relay = 1; - if (argp_parse(&argp, argc, argv, 0, 0, &cfgNode) - == ARGP_ERR_UNKNOWN) { + if (argp_parse(&argp, argc, argv, 0, 0, &cfgNode) == ARGP_ERR_UNKNOWN) + { show_help_and_exit(); } @@ -260,72 +273,64 @@ int main(int argc, char** argv) { \"discv5UdpPort\": 9999, \ \"dnsDiscoveryUrl\": \"enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.prod.status.nodes.status.im\", \ \"dnsDiscoveryNameServers\": [\"8.8.8.8\", \"1.0.0.1\"] \ - }", cfgNode.host, - cfgNode.port); + }", + cfgNode.host, + cfgNode.port); - void* ctx = + void *ctx = waku_new(jsonConfig, - cify([](const char* msg, size_t len) { - std::cout << "waku_new feedback: " << msg << std::endl; - } - ), - nullptr - ); + cify([](const char *msg, size_t len) + { std::cout << "waku_new feedback: " << msg << std::endl; }), + nullptr); waitForCallback(); // example on how to retrieve a value from the `libwaku` callback. std::string defaultPubsubTopic; WAKU_CALL( waku_default_pubsub_topic( - ctx, - cify([&defaultPubsubTopic](const char* msg, size_t len) { - defaultPubsubTopic = msg; - } - ), - nullptr)); + ctx, + cify([&defaultPubsubTopic](const char *msg, size_t len) + { defaultPubsubTopic = msg; }), + nullptr)); std::cout << "Default pubsub topic: " << defaultPubsubTopic << std::endl; - WAKU_CALL(waku_version(ctx, - cify([&](const char* msg, size_t len) { - std::cout << "Git Version: " << msg << std::endl; - }), + WAKU_CALL(waku_version(ctx, + cify([&](const char *msg, size_t len) + { std::cout << "Git Version: " << msg << std::endl; }), nullptr)); printf("Bind addr: %s:%u\n", cfgNode.host, cfgNode.port); - printf("Waku Relay enabled: %s\n", cfgNode.relay == 1 ? "YES": "NO"); + printf("Waku Relay enabled: %s\n", cfgNode.relay == 1 ? "YES" : "NO"); std::string pubsubTopic; - WAKU_CALL(waku_pubsub_topic(ctx, - "example", - cify([&](const char* msg, size_t len) { - pubsubTopic = msg; - }), - nullptr)); + WAKU_CALL(waku_pubsub_topic(ctx, + cify([&](const char *msg, size_t len) + { pubsubTopic = msg; }), + nullptr, + "example")); std::cout << "Custom pubsub topic: " << pubsubTopic << std::endl; - waku_set_event_callback(ctx, - cify([&](const char* msg, size_t len) { - event_handler(msg, len); - }), - nullptr); + set_event_callback(ctx, + cify([&](const char *msg, size_t len) + { event_handler(msg, len); }), + nullptr); - WAKU_CALL( waku_start(ctx, - cify([&](const char* msg, size_t len) { - event_handler(msg, len); - }), - nullptr)); + WAKU_CALL(waku_start(ctx, + cify([&](const char *msg, size_t len) + { event_handler(msg, len); }), + nullptr)); - WAKU_CALL( waku_relay_subscribe(ctx, - defaultPubsubTopic.c_str(), - cify([&](const char* msg, size_t len) { - event_handler(msg, len); - }), - nullptr) ); + WAKU_CALL(waku_relay_subscribe(ctx, + cify([&](const char *msg, size_t len) + { event_handler(msg, len); }), + nullptr, + defaultPubsubTopic.c_str())); show_main_menu(); - while(1) { + while (1) + { handle_user_input(ctx); } } diff --git a/examples/golang/waku.go b/examples/golang/waku.go index 846362dfe..e205ecd09 100644 --- a/examples/golang/waku.go +++ b/examples/golang/waku.go @@ -71,32 +71,32 @@ package main static void* cGoWakuNew(const char* configJson, void* resp) { // We pass NULL because we are not interested in retrieving data from this callback - void* ret = waku_new(configJson, (WakuCallBack) callback, resp); + void* ret = waku_new(configJson, (FFICallBack) callback, resp); return ret; } static void cGoWakuStart(void* wakuCtx, void* resp) { - WAKU_CALL(waku_start(wakuCtx, (WakuCallBack) callback, resp)); + WAKU_CALL(waku_start(wakuCtx, (FFICallBack) callback, resp)); } static void cGoWakuStop(void* wakuCtx, void* resp) { - WAKU_CALL(waku_stop(wakuCtx, (WakuCallBack) callback, resp)); + WAKU_CALL(waku_stop(wakuCtx, (FFICallBack) callback, resp)); } static void cGoWakuDestroy(void* wakuCtx, void* resp) { - WAKU_CALL(waku_destroy(wakuCtx, (WakuCallBack) callback, resp)); + WAKU_CALL(waku_destroy(wakuCtx, (FFICallBack) callback, resp)); } static void cGoWakuStartDiscV5(void* wakuCtx, void* resp) { - WAKU_CALL(waku_start_discv5(wakuCtx, (WakuCallBack) callback, resp)); + WAKU_CALL(waku_start_discv5(wakuCtx, (FFICallBack) callback, resp)); } static void cGoWakuStopDiscV5(void* wakuCtx, void* resp) { - WAKU_CALL(waku_stop_discv5(wakuCtx, (WakuCallBack) callback, resp)); + WAKU_CALL(waku_stop_discv5(wakuCtx, (FFICallBack) callback, resp)); } static void cGoWakuVersion(void* wakuCtx, void* resp) { - WAKU_CALL(waku_version(wakuCtx, (WakuCallBack) callback, resp)); + WAKU_CALL(waku_version(wakuCtx, (FFICallBack) callback, resp)); } static void cGoWakuSetEventCallback(void* wakuCtx) { @@ -112,7 +112,7 @@ package main // This technique is needed because cgo only allows to export Go functions and not methods. - waku_set_event_callback(wakuCtx, (WakuCallBack) globalEventCallback, wakuCtx); + set_event_callback(wakuCtx, (FFICallBack) globalEventCallback, wakuCtx); } static void cGoWakuContentTopic(void* wakuCtx, @@ -123,20 +123,21 @@ package main void* resp) { WAKU_CALL( waku_content_topic(wakuCtx, + (FFICallBack) callback, + resp, appName, appVersion, contentTopicName, - encoding, - (WakuCallBack) callback, - resp) ); + encoding + ) ); } static void cGoWakuPubsubTopic(void* wakuCtx, char* topicName, void* resp) { - WAKU_CALL( waku_pubsub_topic(wakuCtx, topicName, (WakuCallBack) callback, resp) ); + WAKU_CALL( waku_pubsub_topic(wakuCtx, (FFICallBack) callback, resp, topicName) ); } static void cGoWakuDefaultPubsubTopic(void* wakuCtx, void* resp) { - WAKU_CALL (waku_default_pubsub_topic(wakuCtx, (WakuCallBack) callback, resp)); + WAKU_CALL (waku_default_pubsub_topic(wakuCtx, (FFICallBack) callback, resp)); } static void cGoWakuRelayPublish(void* wakuCtx, @@ -146,34 +147,36 @@ package main void* resp) { WAKU_CALL (waku_relay_publish(wakuCtx, + (FFICallBack) callback, + resp, pubSubTopic, jsonWakuMessage, - timeoutMs, - (WakuCallBack) callback, - resp)); + timeoutMs + )); } static void cGoWakuRelaySubscribe(void* wakuCtx, char* pubSubTopic, void* resp) { WAKU_CALL ( waku_relay_subscribe(wakuCtx, - pubSubTopic, - (WakuCallBack) callback, - resp) ); + (FFICallBack) callback, + resp, + pubSubTopic) ); } static void cGoWakuRelayUnsubscribe(void* wakuCtx, char* pubSubTopic, void* resp) { WAKU_CALL ( waku_relay_unsubscribe(wakuCtx, - pubSubTopic, - (WakuCallBack) callback, - resp) ); + (FFICallBack) callback, + resp, + pubSubTopic) ); } static void cGoWakuConnect(void* wakuCtx, char* peerMultiAddr, int timeoutMs, void* resp) { WAKU_CALL( waku_connect(wakuCtx, + (FFICallBack) callback, + resp, peerMultiAddr, - timeoutMs, - (WakuCallBack) callback, - resp) ); + timeoutMs + ) ); } static void cGoWakuDialPeerById(void* wakuCtx, @@ -183,42 +186,44 @@ package main void* resp) { WAKU_CALL( waku_dial_peer_by_id(wakuCtx, + (FFICallBack) callback, + resp, peerId, protocol, - timeoutMs, - (WakuCallBack) callback, - resp) ); + timeoutMs + ) ); } static void cGoWakuDisconnectPeerById(void* wakuCtx, char* peerId, void* resp) { WAKU_CALL( waku_disconnect_peer_by_id(wakuCtx, - peerId, - (WakuCallBack) callback, - resp) ); + (FFICallBack) callback, + resp, + peerId + ) ); } static void cGoWakuListenAddresses(void* wakuCtx, void* resp) { - WAKU_CALL (waku_listen_addresses(wakuCtx, (WakuCallBack) callback, resp) ); + WAKU_CALL (waku_listen_addresses(wakuCtx, (FFICallBack) callback, resp) ); } static void cGoWakuGetMyENR(void* ctx, void* resp) { - WAKU_CALL (waku_get_my_enr(ctx, (WakuCallBack) callback, resp) ); + WAKU_CALL (waku_get_my_enr(ctx, (FFICallBack) callback, resp) ); } static void cGoWakuGetMyPeerId(void* ctx, void* resp) { - WAKU_CALL (waku_get_my_peerid(ctx, (WakuCallBack) callback, resp) ); + WAKU_CALL (waku_get_my_peerid(ctx, (FFICallBack) callback, resp) ); } static void cGoWakuListPeersInMesh(void* ctx, char* pubSubTopic, void* resp) { - WAKU_CALL (waku_relay_get_num_peers_in_mesh(ctx, pubSubTopic, (WakuCallBack) callback, resp) ); + WAKU_CALL (waku_relay_get_num_peers_in_mesh(ctx, (FFICallBack) callback, resp, pubSubTopic) ); } static void cGoWakuGetNumConnectedPeers(void* ctx, char* pubSubTopic, void* resp) { - WAKU_CALL (waku_relay_get_num_connected_peers(ctx, pubSubTopic, (WakuCallBack) callback, resp) ); + WAKU_CALL (waku_relay_get_num_connected_peers(ctx, (FFICallBack) callback, resp, pubSubTopic) ); } static void cGoWakuGetPeerIdsFromPeerStore(void* wakuCtx, void* resp) { - WAKU_CALL (waku_get_peerids_from_peerstore(wakuCtx, (WakuCallBack) callback, resp) ); + WAKU_CALL (waku_get_peerids_from_peerstore(wakuCtx, (FFICallBack) callback, resp) ); } static void cGoWakuLightpushPublish(void* wakuCtx, @@ -227,10 +232,11 @@ package main void* resp) { WAKU_CALL (waku_lightpush_publish(wakuCtx, + (FFICallBack) callback, + resp, pubSubTopic, - jsonWakuMessage, - (WakuCallBack) callback, - resp)); + jsonWakuMessage + )); } static void cGoWakuStoreQuery(void* wakuCtx, @@ -240,11 +246,12 @@ package main void* resp) { WAKU_CALL (waku_store_query(wakuCtx, + (FFICallBack) callback, + resp, jsonQuery, peerAddr, - timeoutMs, - (WakuCallBack) callback, - resp)); + timeoutMs + )); } static void cGoWakuPeerExchangeQuery(void* wakuCtx, @@ -252,9 +259,10 @@ package main void* resp) { WAKU_CALL (waku_peer_exchange_request(wakuCtx, - numPeers, - (WakuCallBack) callback, - resp)); + (FFICallBack) callback, + resp, + numPeers + )); } static void cGoWakuGetPeerIdsByProtocol(void* wakuCtx, @@ -262,9 +270,10 @@ package main void* resp) { WAKU_CALL (waku_get_peerids_by_protocol(wakuCtx, - protocol, - (WakuCallBack) callback, - resp)); + (FFICallBack) callback, + resp, + protocol + )); } */ diff --git a/examples/ios/WakuExample.xcodeproj/project.pbxproj b/examples/ios/WakuExample.xcodeproj/project.pbxproj new file mode 100644 index 000000000..b7ce1dce7 --- /dev/null +++ b/examples/ios/WakuExample.xcodeproj/project.pbxproj @@ -0,0 +1,331 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 63; + objects = { + +/* Begin PBXBuildFile section */ + 45714AF6D1D12AF5C36694FB /* WakuExampleApp.swift in Sources */ = {isa = PBXBuildFile; fileRef = 0671AF6DCB0D788B0C1E9C8B /* WakuExampleApp.swift */; }; + 6468FA3F5F760D3FCAD6CDBF /* ContentView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7D8744E36DADC11F38A1CC99 /* ContentView.swift */; }; + C4EA202B782038F96336401F /* WakuNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = 638A565C495A63CFF7396FBC /* WakuNode.swift */; }; +/* End PBXBuildFile section */ + +/* Begin PBXFileReference section */ + 0671AF6DCB0D788B0C1E9C8B /* WakuExampleApp.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = WakuExampleApp.swift; sourceTree = ""; }; + 31BE20DB2755A11000723420 /* libwaku.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = libwaku.h; sourceTree = ""; }; + 5C5AAC91E0166D28BFA986DB /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist; path = Info.plist; sourceTree = ""; }; + 638A565C495A63CFF7396FBC /* WakuNode.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = WakuNode.swift; sourceTree = ""; }; + 7D8744E36DADC11F38A1CC99 /* ContentView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ContentView.swift; sourceTree = ""; }; + A8655016B3DF9B0877631CE5 /* WakuExample-Bridging-Header.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "WakuExample-Bridging-Header.h"; sourceTree = ""; }; + CFBE844B6E18ACB81C65F83B /* WakuExample.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = WakuExample.app; sourceTree = BUILT_PRODUCTS_DIR; }; +/* End PBXFileReference section */ + +/* Begin PBXGroup section */ + 34547A6259485BD047D6375C /* Products */ = { + isa = PBXGroup; + children = ( + CFBE844B6E18ACB81C65F83B /* WakuExample.app */, + ); + name = Products; + sourceTree = ""; + }; + 4F76CB85EC44E951B8E75522 /* WakuExample */ = { + isa = PBXGroup; + children = ( + 7D8744E36DADC11F38A1CC99 /* ContentView.swift */, + 5C5AAC91E0166D28BFA986DB /* Info.plist */, + 31BE20DB2755A11000723420 /* libwaku.h */, + A8655016B3DF9B0877631CE5 /* WakuExample-Bridging-Header.h */, + 0671AF6DCB0D788B0C1E9C8B /* WakuExampleApp.swift */, + 638A565C495A63CFF7396FBC /* WakuNode.swift */, + ); + path = WakuExample; + sourceTree = ""; + }; + D40CD2446F177CAABB0A747A = { + isa = PBXGroup; + children = ( + 4F76CB85EC44E951B8E75522 /* WakuExample */, + 34547A6259485BD047D6375C /* Products */, + ); + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXNativeTarget section */ + F751EF8294AD21F713D47FDA /* WakuExample */ = { + isa = PBXNativeTarget; + buildConfigurationList = 757FA0123629BD63CB254113 /* Build configuration list for PBXNativeTarget "WakuExample" */; + buildPhases = ( + D3AFD8C4DA68BF5C4F7D8E10 /* Sources */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = WakuExample; + packageProductDependencies = ( + ); + productName = WakuExample; + productReference = CFBE844B6E18ACB81C65F83B /* WakuExample.app */; + productType = "com.apple.product-type.application"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + 4FF82F0F4AF8E1E34728F150 /* Project object */ = { + isa = PBXProject; + attributes = { + BuildIndependentTargetsInParallel = YES; + LastUpgradeCheck = 1500; + }; + buildConfigurationList = B3A4F48294254543E79767C4 /* Build configuration list for PBXProject "WakuExample" */; + compatibilityVersion = "Xcode 14.0"; + developmentRegion = en; + hasScannedForEncodings = 0; + knownRegions = ( + Base, + en, + ); + mainGroup = D40CD2446F177CAABB0A747A; + minimizedProjectReferenceProxies = 1; + projectDirPath = ""; + projectRoot = ""; + targets = ( + F751EF8294AD21F713D47FDA /* WakuExample */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXSourcesBuildPhase section */ + D3AFD8C4DA68BF5C4F7D8E10 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 6468FA3F5F760D3FCAD6CDBF /* ContentView.swift in Sources */, + 45714AF6D1D12AF5C36694FB /* WakuExampleApp.swift in Sources */, + C4EA202B782038F96336401F /* WakuNode.swift in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin XCBuildConfiguration section */ + 36939122077C66DD94082311 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + CODE_SIGN_IDENTITY = "iPhone Developer"; + DEVELOPMENT_TEAM = 2Q52K2W84K; + HEADER_SEARCH_PATHS = "$(PROJECT_DIR)/WakuExample"; + INFOPLIST_FILE = WakuExample/Info.plist; + IPHONEOS_DEPLOYMENT_TARGET = 18.6; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + ); + "LIBRARY_SEARCH_PATHS[sdk=iphoneos*]" = "$(PROJECT_DIR)/../../build/ios/iphoneos-arm64"; + "LIBRARY_SEARCH_PATHS[sdk=iphonesimulator*]" = "$(PROJECT_DIR)/../../build/ios/iphonesimulator-arm64"; + MACOSX_DEPLOYMENT_TARGET = 15.6; + OTHER_LDFLAGS = ( + "-lc++", + "-force_load", + "$(PROJECT_DIR)/../../build/ios/iphoneos-arm64/libwaku.a", + "-lsqlite3", + "-lz", + ); + PRODUCT_BUNDLE_IDENTIFIER = org.waku.example; + SDKROOT = iphoneos; + SUPPORTED_PLATFORMS = "iphoneos iphonesimulator"; + SUPPORTS_MACCATALYST = NO; + SUPPORTS_MAC_DESIGNED_FOR_IPHONE_IPAD = YES; + SUPPORTS_XR_DESIGNED_FOR_IPHONE_IPAD = YES; + SWIFT_OBJC_BRIDGING_HEADER = "WakuExample/WakuExample-Bridging-Header.h"; + TARGETED_DEVICE_FAMILY = "1,2"; + }; + name = Release; + }; + 9BA833A09EEDB4B3FCCD8F8E /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++14"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_NS_ASSERTIONS = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_C_LANGUAGE_STANDARD = gnu11; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 18.6; + MTL_ENABLE_DEBUG_INFO = NO; + MTL_FAST_MATH = YES; + PRODUCT_NAME = "$(TARGET_NAME)"; + SDKROOT = iphoneos; + SUPPORTED_PLATFORMS = "iphoneos iphonesimulator"; + SUPPORTS_MACCATALYST = NO; + SWIFT_COMPILATION_MODE = wholemodule; + SWIFT_OPTIMIZATION_LEVEL = "-O"; + SWIFT_VERSION = 5.0; + }; + name = Release; + }; + A59ABFB792FED8974231E5AC /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++14"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = dwarf; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; + GCC_C_LANGUAGE_STANDARD = gnu11; + GCC_DYNAMIC_NO_PIC = NO; + GCC_NO_COMMON_BLOCKS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "$(inherited)", + "DEBUG=1", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 18.6; + MTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE; + MTL_FAST_MATH = YES; + ONLY_ACTIVE_ARCH = YES; + PRODUCT_NAME = "$(TARGET_NAME)"; + SDKROOT = iphoneos; + SUPPORTED_PLATFORMS = "iphoneos iphonesimulator"; + SUPPORTS_MACCATALYST = NO; + SWIFT_ACTIVE_COMPILATION_CONDITIONS = DEBUG; + SWIFT_OPTIMIZATION_LEVEL = "-Onone"; + SWIFT_VERSION = 5.0; + }; + name = Debug; + }; + AF5ADDAA865B1F6BD4E70A79 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + CODE_SIGN_IDENTITY = "iPhone Developer"; + DEVELOPMENT_TEAM = 2Q52K2W84K; + HEADER_SEARCH_PATHS = "$(PROJECT_DIR)/WakuExample"; + INFOPLIST_FILE = WakuExample/Info.plist; + IPHONEOS_DEPLOYMENT_TARGET = 18.6; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + ); + "LIBRARY_SEARCH_PATHS[sdk=iphoneos*]" = "$(PROJECT_DIR)/../../build/ios/iphoneos-arm64"; + "LIBRARY_SEARCH_PATHS[sdk=iphonesimulator*]" = "$(PROJECT_DIR)/../../build/ios/iphonesimulator-arm64"; + MACOSX_DEPLOYMENT_TARGET = 15.6; + OTHER_LDFLAGS = ( + "-lc++", + "-force_load", + "$(PROJECT_DIR)/../../build/ios/iphoneos-arm64/libwaku.a", + "-lsqlite3", + "-lz", + ); + PRODUCT_BUNDLE_IDENTIFIER = org.waku.example; + SDKROOT = iphoneos; + SUPPORTED_PLATFORMS = "iphoneos iphonesimulator"; + SUPPORTS_MACCATALYST = NO; + SUPPORTS_MAC_DESIGNED_FOR_IPHONE_IPAD = YES; + SUPPORTS_XR_DESIGNED_FOR_IPHONE_IPAD = YES; + SWIFT_OBJC_BRIDGING_HEADER = "WakuExample/WakuExample-Bridging-Header.h"; + TARGETED_DEVICE_FAMILY = "1,2"; + }; + name = Debug; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + 757FA0123629BD63CB254113 /* Build configuration list for PBXNativeTarget "WakuExample" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + AF5ADDAA865B1F6BD4E70A79 /* Debug */, + 36939122077C66DD94082311 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Debug; + }; + B3A4F48294254543E79767C4 /* Build configuration list for PBXProject "WakuExample" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + A59ABFB792FED8974231E5AC /* Debug */, + 9BA833A09EEDB4B3FCCD8F8E /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Debug; + }; +/* End XCConfigurationList section */ + }; + rootObject = 4FF82F0F4AF8E1E34728F150 /* Project object */; +} diff --git a/examples/ios/WakuExample.xcodeproj/project.xcworkspace/contents.xcworkspacedata b/examples/ios/WakuExample.xcodeproj/project.xcworkspace/contents.xcworkspacedata new file mode 100644 index 000000000..919434a62 --- /dev/null +++ b/examples/ios/WakuExample.xcodeproj/project.xcworkspace/contents.xcworkspacedata @@ -0,0 +1,7 @@ + + + + + diff --git a/examples/ios/WakuExample/ContentView.swift b/examples/ios/WakuExample/ContentView.swift new file mode 100644 index 000000000..14bb4ee1d --- /dev/null +++ b/examples/ios/WakuExample/ContentView.swift @@ -0,0 +1,229 @@ +// +// ContentView.swift +// WakuExample +// +// Minimal chat PoC using libwaku on iOS +// + +import SwiftUI + +struct ContentView: View { + @StateObject private var wakuNode = WakuNode() + @State private var messageText = "" + + var body: some View { + ZStack { + // Main content + VStack(spacing: 0) { + // Header with status + HStack { + Circle() + .fill(statusColor) + .frame(width: 10, height: 10) + VStack(alignment: .leading, spacing: 2) { + Text(wakuNode.status.rawValue) + .font(.caption) + if wakuNode.status == .running { + HStack(spacing: 4) { + Text(wakuNode.isConnected ? "Connected" : "Discovering...") + Text("•") + filterStatusView + } + .font(.caption2) + .foregroundColor(.secondary) + + // Subscription maintenance status + if wakuNode.subscriptionMaintenanceActive { + HStack(spacing: 4) { + Image(systemName: "arrow.triangle.2.circlepath") + .foregroundColor(.blue) + Text("Maintenance active") + if wakuNode.failedSubscribeAttempts > 0 { + Text("(\(wakuNode.failedSubscribeAttempts) retries)") + .foregroundColor(.orange) + } + } + .font(.caption2) + .foregroundColor(.secondary) + } + } + } + Spacer() + if wakuNode.status == .stopped { + Button("Start") { + wakuNode.start() + } + .buttonStyle(.borderedProminent) + .controlSize(.small) + } else if wakuNode.status == .running { + if !wakuNode.filterSubscribed { + Button("Resub") { + wakuNode.resubscribe() + } + .buttonStyle(.bordered) + .controlSize(.small) + } + Button("Stop") { + wakuNode.stop() + } + .buttonStyle(.bordered) + .controlSize(.small) + } + } + .padding() + .background(Color.gray.opacity(0.1)) + + // Messages list + ScrollViewReader { proxy in + ScrollView { + LazyVStack(alignment: .leading, spacing: 8) { + ForEach(wakuNode.receivedMessages.reversed()) { message in + MessageBubble(message: message) + .id(message.id) + } + } + .padding() + } + .onChange(of: wakuNode.receivedMessages.count) { _, newCount in + if let lastMessage = wakuNode.receivedMessages.first { + withAnimation { + proxy.scrollTo(lastMessage.id, anchor: .bottom) + } + } + } + } + + Divider() + + // Message input + HStack(spacing: 12) { + TextField("Message", text: $messageText) + .textFieldStyle(.roundedBorder) + .disabled(wakuNode.status != .running) + + Button(action: sendMessage) { + Image(systemName: "paperplane.fill") + .foregroundColor(.white) + .padding(10) + .background(canSend ? Color.blue : Color.gray) + .clipShape(Circle()) + } + .disabled(!canSend) + } + .padding() + .background(Color.gray.opacity(0.1)) + } + + // Toast overlay for errors + VStack { + ForEach(wakuNode.errorQueue) { error in + ToastView(error: error) { + wakuNode.dismissError(error) + } + .transition(.asymmetric( + insertion: .move(edge: .top).combined(with: .opacity), + removal: .opacity + )) + } + Spacer() + } + .padding(.top, 8) + .animation(.easeInOut(duration: 0.3), value: wakuNode.errorQueue) + } + } + + private var statusColor: Color { + switch wakuNode.status { + case .stopped: return .gray + case .starting: return .yellow + case .running: return .green + case .error: return .red + } + } + + @ViewBuilder + private var filterStatusView: some View { + if wakuNode.filterSubscribed { + Text("Filter OK") + .foregroundColor(.green) + } else if wakuNode.failedSubscribeAttempts > 0 { + Text("Filter retrying (\(wakuNode.failedSubscribeAttempts))") + .foregroundColor(.orange) + } else { + Text("Filter pending") + .foregroundColor(.orange) + } + } + + private var canSend: Bool { + wakuNode.status == .running && wakuNode.isConnected && !messageText.trimmingCharacters(in: .whitespaces).isEmpty + } + + private func sendMessage() { + let text = messageText.trimmingCharacters(in: .whitespaces) + guard !text.isEmpty else { return } + + wakuNode.publish(message: text) + messageText = "" + } +} + +// MARK: - Toast View + +struct ToastView: View { + let error: TimestampedError + let onDismiss: () -> Void + + var body: some View { + HStack(spacing: 12) { + Image(systemName: "exclamationmark.triangle.fill") + .foregroundColor(.white) + + Text(error.message) + .font(.subheadline) + .foregroundColor(.white) + .lineLimit(2) + + Spacer() + + Button(action: onDismiss) { + Image(systemName: "xmark.circle.fill") + .foregroundColor(.white.opacity(0.8)) + .font(.title3) + } + .buttonStyle(.plain) + } + .padding(.horizontal, 16) + .padding(.vertical, 12) + .background( + RoundedRectangle(cornerRadius: 12) + .fill(Color.red.opacity(0.9)) + .shadow(color: .black.opacity(0.2), radius: 8, x: 0, y: 4) + ) + .padding(.horizontal, 16) + .padding(.vertical, 4) + } +} + +// MARK: - Message Bubble + +struct MessageBubble: View { + let message: WakuMessage + + var body: some View { + VStack(alignment: .leading, spacing: 4) { + Text(message.payload) + .padding(10) + .background(Color.blue.opacity(0.1)) + .cornerRadius(12) + + Text(message.timestamp, style: .time) + .font(.caption2) + .foregroundColor(.secondary) + } + } +} + +#Preview { + ContentView() +} diff --git a/examples/ios/WakuExample/Info.plist b/examples/ios/WakuExample/Info.plist new file mode 100644 index 000000000..a9222555a --- /dev/null +++ b/examples/ios/WakuExample/Info.plist @@ -0,0 +1,36 @@ + + + + + CFBundleDevelopmentRegion + $(DEVELOPMENT_LANGUAGE) + CFBundleDisplayName + Waku Example + CFBundleExecutable + $(EXECUTABLE_NAME) + CFBundleIdentifier + org.waku.example + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + WakuExample + CFBundlePackageType + APPL + CFBundleShortVersionString + 1.0 + CFBundleVersion + 1 + NSAppTransportSecurity + + NSAllowsArbitraryLoads + + + UILaunchScreen + + UISupportedInterfaceOrientations + + UIInterfaceOrientationPortrait + + + + diff --git a/examples/ios/WakuExample/WakuExample-Bridging-Header.h b/examples/ios/WakuExample/WakuExample-Bridging-Header.h new file mode 100644 index 000000000..50595450e --- /dev/null +++ b/examples/ios/WakuExample/WakuExample-Bridging-Header.h @@ -0,0 +1,15 @@ +// +// WakuExample-Bridging-Header.h +// WakuExample +// +// Bridging header to expose libwaku C functions to Swift +// + +#ifndef WakuExample_Bridging_Header_h +#define WakuExample_Bridging_Header_h + +#import "libwaku.h" + +#endif /* WakuExample_Bridging_Header_h */ + + diff --git a/examples/ios/WakuExample/WakuExampleApp.swift b/examples/ios/WakuExample/WakuExampleApp.swift new file mode 100644 index 000000000..fb99785aa --- /dev/null +++ b/examples/ios/WakuExample/WakuExampleApp.swift @@ -0,0 +1,19 @@ +// +// WakuExampleApp.swift +// WakuExample +// +// SwiftUI app entry point for Waku iOS example +// + +import SwiftUI + +@main +struct WakuExampleApp: App { + var body: some Scene { + WindowGroup { + ContentView() + } + } +} + + diff --git a/examples/ios/WakuExample/WakuNode.swift b/examples/ios/WakuExample/WakuNode.swift new file mode 100644 index 000000000..245529a2f --- /dev/null +++ b/examples/ios/WakuExample/WakuNode.swift @@ -0,0 +1,739 @@ +// +// WakuNode.swift +// WakuExample +// +// Swift wrapper around libwaku C API for edge mode (lightpush + filter) +// Uses Swift actors for thread safety and UI responsiveness +// + +import Foundation + +// MARK: - Data Types + +/// Message received from Waku network +struct WakuMessage: Identifiable, Equatable, Sendable { + let id: String // messageHash from Waku - unique identifier for deduplication + let payload: String + let contentTopic: String + let timestamp: Date +} + +/// Waku node status +enum WakuNodeStatus: String, Sendable { + case stopped = "Stopped" + case starting = "Starting..." + case running = "Running" + case error = "Error" +} + +/// Status updates from WakuActor to WakuNode +enum WakuStatusUpdate: Sendable { + case statusChanged(WakuNodeStatus) + case connectionChanged(isConnected: Bool) + case filterSubscriptionChanged(subscribed: Bool, failedAttempts: Int) + case maintenanceChanged(active: Bool) + case error(String) +} + +/// Error with timestamp for toast queue +struct TimestampedError: Identifiable, Equatable { + let id = UUID() + let message: String + let timestamp: Date + + static func == (lhs: TimestampedError, rhs: TimestampedError) -> Bool { + lhs.id == rhs.id + } +} + +// MARK: - Callback Context for C API + +private final class CallbackContext: @unchecked Sendable { + private let lock = NSLock() + private var _continuation: CheckedContinuation<(success: Bool, result: String?), Never>? + private var _resumed = false + var success: Bool = false + var result: String? + + var continuation: CheckedContinuation<(success: Bool, result: String?), Never>? { + get { + lock.lock() + defer { lock.unlock() } + return _continuation + } + set { + lock.lock() + defer { lock.unlock() } + _continuation = newValue + } + } + + /// Thread-safe resume - ensures continuation is only resumed once + /// Returns true if this call actually resumed, false if already resumed + @discardableResult + func resumeOnce(returning value: (success: Bool, result: String?)) -> Bool { + lock.lock() + defer { lock.unlock() } + + guard !_resumed, let cont = _continuation else { + return false + } + + _resumed = true + _continuation = nil + cont.resume(returning: value) + return true + } +} + +// MARK: - WakuActor + +/// Actor that isolates all Waku operations from the main thread +/// All C API calls and mutable state are contained here +actor WakuActor { + + // MARK: - State + + private var ctx: UnsafeMutableRawPointer? + private var seenMessageHashes: Set = [] + private var isSubscribed: Bool = false + private var isSubscribing: Bool = false + private var hasPeers: Bool = false + private var maintenanceTask: Task? + private var eventProcessingTask: Task? + + // Stream continuations for communicating with UI + private var messageContinuation: AsyncStream.Continuation? + private var statusContinuation: AsyncStream.Continuation? + + // Event stream from C callbacks + private var eventContinuation: AsyncStream.Continuation? + + // Configuration + let defaultPubsubTopic = "/waku/2/rs/1/0" + let defaultContentTopic = "/waku-ios-example/1/chat/proto" + private let staticPeer = "/dns4/node-01.do-ams3.waku.sandbox.status.im/tcp/30303/p2p/16Uiu2HAmPLe7Mzm8TsYUubgCAW1aJoeFScxrLj8ppHFivPo97bUZ" + + // Subscription maintenance settings + private let maxFailedSubscribes = 3 + private let retryWaitSeconds: UInt64 = 2_000_000_000 // 2 seconds in nanoseconds + private let maintenanceIntervalSeconds: UInt64 = 30_000_000_000 // 30 seconds in nanoseconds + private let maxSeenHashes = 1000 + + // MARK: - Static callback storage (for C callbacks) + + // We need a way for C callbacks to reach the actor + // Using a simple static reference (safe because we only have one instance) + private static var sharedEventContinuation: AsyncStream.Continuation? + + private static let eventCallback: WakuCallBack = { ret, msg, len, userData in + guard ret == RET_OK, let msg = msg else { return } + let str = String(cString: msg) + WakuActor.sharedEventContinuation?.yield(str) + } + + private static let syncCallback: WakuCallBack = { ret, msg, len, userData in + guard let userData = userData else { return } + let context = Unmanaged.fromOpaque(userData).takeUnretainedValue() + let success = (ret == RET_OK) + var resultStr: String? = nil + if let msg = msg { + resultStr = String(cString: msg) + } + context.resumeOnce(returning: (success, resultStr)) + } + + // MARK: - Stream Setup + + func setMessageContinuation(_ continuation: AsyncStream.Continuation?) { + self.messageContinuation = continuation + } + + func setStatusContinuation(_ continuation: AsyncStream.Continuation?) { + self.statusContinuation = continuation + } + + // MARK: - Public API + + var isRunning: Bool { + ctx != nil + } + + var hasConnectedPeers: Bool { + hasPeers + } + + func start() async { + guard ctx == nil else { + print("[WakuActor] Already started") + return + } + + statusContinuation?.yield(.statusChanged(.starting)) + + // Create event stream for C callbacks + let eventStream = AsyncStream { continuation in + self.eventContinuation = continuation + WakuActor.sharedEventContinuation = continuation + } + + // Start event processing task + eventProcessingTask = Task { [weak self] in + for await eventJson in eventStream { + await self?.handleEvent(eventJson) + } + } + + // Initialize the node + let success = await initializeNode() + + if success { + statusContinuation?.yield(.statusChanged(.running)) + + // Connect to peer + let connected = await connectToPeer() + if connected { + hasPeers = true + statusContinuation?.yield(.connectionChanged(isConnected: true)) + + // Start maintenance loop + startMaintenanceLoop() + } else { + statusContinuation?.yield(.error("Failed to connect to service peer")) + } + } + } + + func stop() async { + guard let context = ctx else { return } + + // Stop maintenance loop + maintenanceTask?.cancel() + maintenanceTask = nil + + // Stop event processing + eventProcessingTask?.cancel() + eventProcessingTask = nil + + // Close event stream + eventContinuation?.finish() + eventContinuation = nil + WakuActor.sharedEventContinuation = nil + + statusContinuation?.yield(.statusChanged(.stopped)) + statusContinuation?.yield(.connectionChanged(isConnected: false)) + statusContinuation?.yield(.filterSubscriptionChanged(subscribed: false, failedAttempts: 0)) + statusContinuation?.yield(.maintenanceChanged(active: false)) + + // Reset state + let ctxToStop = context + ctx = nil + isSubscribed = false + isSubscribing = false + hasPeers = false + seenMessageHashes.removeAll() + + // Unsubscribe and stop in background (fire and forget) + Task.detached { + // Unsubscribe + _ = await self.callWakuSync { waku_filter_unsubscribe_all(ctxToStop, WakuActor.syncCallback, $0) } + print("[WakuActor] Unsubscribed from filter") + + // Stop + _ = await self.callWakuSync { waku_stop(ctxToStop, WakuActor.syncCallback, $0) } + print("[WakuActor] Node stopped") + + // Destroy + _ = await self.callWakuSync { waku_destroy(ctxToStop, WakuActor.syncCallback, $0) } + print("[WakuActor] Node destroyed") + } + } + + func publish(message: String, contentTopic: String? = nil) async { + guard let context = ctx else { + print("[WakuActor] Node not started") + return + } + + guard hasPeers else { + print("[WakuActor] No peers connected yet") + statusContinuation?.yield(.error("No peers connected yet. Please wait...")) + return + } + + let topic = contentTopic ?? defaultContentTopic + guard let payloadData = message.data(using: .utf8) else { return } + let payloadBase64 = payloadData.base64EncodedString() + let timestamp = Int64(Date().timeIntervalSince1970 * 1_000_000_000) + let jsonMessage = """ + {"payload":"\(payloadBase64)","contentTopic":"\(topic)","timestamp":\(timestamp)} + """ + + let result = await callWakuSync { userData in + waku_lightpush_publish( + context, + self.defaultPubsubTopic, + jsonMessage, + WakuActor.syncCallback, + userData + ) + } + + if result.success { + print("[WakuActor] Published message") + } else { + print("[WakuActor] Publish error: \(result.result ?? "unknown")") + statusContinuation?.yield(.error("Failed to send message")) + } + } + + func resubscribe() async { + print("[WakuActor] Force resubscribe requested") + isSubscribed = false + isSubscribing = false + statusContinuation?.yield(.filterSubscriptionChanged(subscribed: false, failedAttempts: 0)) + _ = await subscribe() + } + + // MARK: - Private Methods + + private func initializeNode() async -> Bool { + let config = """ + { + "tcpPort": 60000, + "clusterId": 1, + "shards": [0], + "relay": false, + "lightpush": true, + "filter": true, + "logLevel": "DEBUG", + "discv5Discovery": true, + "discv5BootstrapNodes": [ + "enr:-QESuEB4Dchgjn7gfAvwB00CxTA-nGiyk-aALI-H4dYSZD3rUk7bZHmP8d2U6xDiQ2vZffpo45Jp7zKNdnwDUx6g4o6XAYJpZIJ2NIJpcIRA4VDAim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOvD3S3jUNICsrOILlmhENiWAMmMVlAl6-Q8wRB7hidY4N0Y3CCdl-DdWRwgiMohXdha3UyDw", + "enr:-QEkuEBIkb8q8_mrorHndoXH9t5N6ZfD-jehQCrYeoJDPHqT0l0wyaONa2-piRQsi3oVKAzDShDVeoQhy0uwN1xbZfPZAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQKnGt-GSgqPSf3IAPM7bFgTlpczpMZZLF3geeoNNsxzSoN0Y3CCdl-DdWRwgiMohXdha3UyDw" + ], + "discv5UdpPort": 9999, + "dnsDiscovery": true, + "dnsDiscoveryUrl": "enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im", + "dnsDiscoveryNameServers": ["8.8.8.8", "1.0.0.1"] + } + """ + + // Create node - waku_new is special, it returns the context directly + let createResult = await withCheckedContinuation { (continuation: CheckedContinuation<(ctx: UnsafeMutableRawPointer?, success: Bool, result: String?), Never>) in + let callbackCtx = CallbackContext() + let userDataPtr = Unmanaged.passRetained(callbackCtx).toOpaque() + + // Set up a simple callback for waku_new + let newCtx = waku_new(config, { ret, msg, len, userData in + guard let userData = userData else { return } + let context = Unmanaged.fromOpaque(userData).takeUnretainedValue() + context.success = (ret == RET_OK) + if let msg = msg { + context.result = String(cString: msg) + } + }, userDataPtr) + + // Small delay to ensure callback completes + DispatchQueue.global().asyncAfter(deadline: .now() + 0.1) { + Unmanaged.fromOpaque(userDataPtr).release() + continuation.resume(returning: (newCtx, callbackCtx.success, callbackCtx.result)) + } + } + + guard createResult.ctx != nil else { + statusContinuation?.yield(.statusChanged(.error)) + statusContinuation?.yield(.error("Failed to create node: \(createResult.result ?? "unknown")")) + return false + } + + ctx = createResult.ctx + + // Set event callback + waku_set_event_callback(ctx, WakuActor.eventCallback, nil) + + // Start node + let startResult = await callWakuSync { userData in + waku_start(self.ctx, WakuActor.syncCallback, userData) + } + + guard startResult.success else { + statusContinuation?.yield(.statusChanged(.error)) + statusContinuation?.yield(.error("Failed to start node: \(startResult.result ?? "unknown")")) + ctx = nil + return false + } + + print("[WakuActor] Node started") + return true + } + + private func connectToPeer() async -> Bool { + guard let context = ctx else { return false } + + print("[WakuActor] Connecting to static peer...") + + let result = await callWakuSync { userData in + waku_connect(context, self.staticPeer, 10000, WakuActor.syncCallback, userData) + } + + if result.success { + print("[WakuActor] Connected to peer successfully") + return true + } else { + print("[WakuActor] Failed to connect: \(result.result ?? "unknown")") + return false + } + } + + private func subscribe(contentTopic: String? = nil) async -> Bool { + guard let context = ctx else { return false } + guard !isSubscribed && !isSubscribing else { return isSubscribed } + + isSubscribing = true + let topic = contentTopic ?? defaultContentTopic + + let result = await callWakuSync { userData in + waku_filter_subscribe( + context, + self.defaultPubsubTopic, + topic, + WakuActor.syncCallback, + userData + ) + } + + isSubscribing = false + + if result.success { + print("[WakuActor] Subscribe request successful to \(topic)") + isSubscribed = true + statusContinuation?.yield(.filterSubscriptionChanged(subscribed: true, failedAttempts: 0)) + return true + } else { + print("[WakuActor] Subscribe error: \(result.result ?? "unknown")") + isSubscribed = false + return false + } + } + + private func pingFilterPeer() async -> Bool { + guard let context = ctx else { return false } + + let result = await callWakuSync { userData in + waku_ping_peer( + context, + self.staticPeer, + 10000, + WakuActor.syncCallback, + userData + ) + } + + return result.success + } + + // MARK: - Subscription Maintenance + + private func startMaintenanceLoop() { + guard maintenanceTask == nil else { + print("[WakuActor] Maintenance loop already running") + return + } + + statusContinuation?.yield(.maintenanceChanged(active: true)) + print("[WakuActor] Starting subscription maintenance loop") + + maintenanceTask = Task { [weak self] in + guard let self = self else { return } + + var failedSubscribes = 0 + var isFirstPingOnConnection = true + + while !Task.isCancelled { + guard await self.isRunning else { break } + + print("[WakuActor] Maintaining subscription...") + + let pingSuccess = await self.pingFilterPeer() + let currentlySubscribed = await self.isSubscribed + + if pingSuccess && currentlySubscribed { + print("[WakuActor] Subscription is live, waiting 30s") + try? await Task.sleep(nanoseconds: self.maintenanceIntervalSeconds) + continue + } + + if !isFirstPingOnConnection && !pingSuccess { + print("[WakuActor] Ping failed - subscription may be lost") + await self.statusContinuation?.yield(.filterSubscriptionChanged(subscribed: false, failedAttempts: failedSubscribes)) + } + isFirstPingOnConnection = false + + print("[WakuActor] No active subscription found. Sending subscribe request...") + + await self.resetSubscriptionState() + let subscribeSuccess = await self.subscribe() + + if subscribeSuccess { + print("[WakuActor] Subscribe request successful") + failedSubscribes = 0 + try? await Task.sleep(nanoseconds: self.maintenanceIntervalSeconds) + continue + } + + failedSubscribes += 1 + await self.statusContinuation?.yield(.filterSubscriptionChanged(subscribed: false, failedAttempts: failedSubscribes)) + print("[WakuActor] Subscribe request failed. Attempt \(failedSubscribes)/\(self.maxFailedSubscribes)") + + if failedSubscribes < self.maxFailedSubscribes { + print("[WakuActor] Retrying in 2s...") + try? await Task.sleep(nanoseconds: self.retryWaitSeconds) + } else { + print("[WakuActor] Max subscribe failures reached") + await self.statusContinuation?.yield(.error("Filter subscription failed after \(self.maxFailedSubscribes) attempts")) + failedSubscribes = 0 + try? await Task.sleep(nanoseconds: self.maintenanceIntervalSeconds) + } + } + + print("[WakuActor] Subscription maintenance loop stopped") + await self.statusContinuation?.yield(.maintenanceChanged(active: false)) + } + } + + private func resetSubscriptionState() { + isSubscribed = false + isSubscribing = false + } + + // MARK: - Event Handling + + private func handleEvent(_ eventJson: String) { + guard let data = eventJson.data(using: .utf8), + let json = try? JSONSerialization.jsonObject(with: data) as? [String: Any], + let eventType = json["eventType"] as? String else { + return + } + + if eventType == "connection_change" { + handleConnectionChange(json) + } else if eventType == "message" { + handleMessage(json) + } + } + + private func handleConnectionChange(_ json: [String: Any]) { + guard let peerEvent = json["peerEvent"] as? String else { return } + + if peerEvent == "Joined" || peerEvent == "Identified" { + hasPeers = true + statusContinuation?.yield(.connectionChanged(isConnected: true)) + } else if peerEvent == "Left" { + statusContinuation?.yield(.filterSubscriptionChanged(subscribed: false, failedAttempts: 0)) + } + } + + private func handleMessage(_ json: [String: Any]) { + guard let messageHash = json["messageHash"] as? String, + let wakuMessage = json["wakuMessage"] as? [String: Any], + let payloadBase64 = wakuMessage["payload"] as? String, + let contentTopic = wakuMessage["contentTopic"] as? String, + let payloadData = Data(base64Encoded: payloadBase64), + let payloadString = String(data: payloadData, encoding: .utf8) else { + return + } + + // Deduplicate + guard !seenMessageHashes.contains(messageHash) else { + return + } + + seenMessageHashes.insert(messageHash) + + // Limit memory usage + if seenMessageHashes.count > maxSeenHashes { + seenMessageHashes.removeAll() + } + + let message = WakuMessage( + id: messageHash, + payload: payloadString, + contentTopic: contentTopic, + timestamp: Date() + ) + + messageContinuation?.yield(message) + } + + // MARK: - Helper for synchronous C calls + + private func callWakuSync(_ work: @escaping (UnsafeMutableRawPointer) -> Void) async -> (success: Bool, result: String?) { + await withCheckedContinuation { continuation in + let context = CallbackContext() + context.continuation = continuation + let userDataPtr = Unmanaged.passRetained(context).toOpaque() + + work(userDataPtr) + + // Set a timeout to avoid hanging forever + DispatchQueue.global().asyncAfter(deadline: .now() + 15) { + // Try to resume with timeout - will be ignored if callback already resumed + let didTimeout = context.resumeOnce(returning: (false, "Timeout")) + if didTimeout { + print("[WakuActor] Call timed out") + } + Unmanaged.fromOpaque(userDataPtr).release() + } + } + } +} + +// MARK: - WakuNode (MainActor UI Wrapper) + +/// Main-thread UI wrapper that consumes updates from WakuActor via AsyncStreams +@MainActor +class WakuNode: ObservableObject { + + // MARK: - Published Properties (UI State) + + @Published var status: WakuNodeStatus = .stopped + @Published var receivedMessages: [WakuMessage] = [] + @Published var errorQueue: [TimestampedError] = [] + @Published var isConnected: Bool = false + @Published var filterSubscribed: Bool = false + @Published var subscriptionMaintenanceActive: Bool = false + @Published var failedSubscribeAttempts: Int = 0 + + // Topics (read-only access to actor's config) + var defaultPubsubTopic: String { "/waku/2/rs/1/0" } + var defaultContentTopic: String { "/waku-ios-example/1/chat/proto" } + + // MARK: - Private Properties + + private let actor = WakuActor() + private var messageTask: Task? + private var statusTask: Task? + + // MARK: - Initialization + + init() {} + + deinit { + messageTask?.cancel() + statusTask?.cancel() + } + + // MARK: - Public API + + func start() { + guard status == .stopped || status == .error else { + print("[WakuNode] Already started or starting") + return + } + + // Create message stream + let messageStream = AsyncStream { continuation in + Task { + await self.actor.setMessageContinuation(continuation) + } + } + + // Create status stream + let statusStream = AsyncStream { continuation in + Task { + await self.actor.setStatusContinuation(continuation) + } + } + + // Start consuming messages + messageTask = Task { @MainActor in + for await message in messageStream { + self.receivedMessages.insert(message, at: 0) + if self.receivedMessages.count > 100 { + self.receivedMessages.removeLast() + } + } + } + + // Start consuming status updates + statusTask = Task { @MainActor in + for await update in statusStream { + self.handleStatusUpdate(update) + } + } + + // Start the actor + Task { + await actor.start() + } + } + + func stop() { + messageTask?.cancel() + messageTask = nil + statusTask?.cancel() + statusTask = nil + + Task { + await actor.stop() + } + + // Immediate UI update + status = .stopped + isConnected = false + filterSubscribed = false + subscriptionMaintenanceActive = false + failedSubscribeAttempts = 0 + } + + func publish(message: String, contentTopic: String? = nil) { + Task { + await actor.publish(message: message, contentTopic: contentTopic) + } + } + + func resubscribe() { + Task { + await actor.resubscribe() + } + } + + func dismissError(_ error: TimestampedError) { + errorQueue.removeAll { $0.id == error.id } + } + + func dismissAllErrors() { + errorQueue.removeAll() + } + + // MARK: - Private Methods + + private func handleStatusUpdate(_ update: WakuStatusUpdate) { + switch update { + case .statusChanged(let newStatus): + status = newStatus + + case .connectionChanged(let connected): + isConnected = connected + + case .filterSubscriptionChanged(let subscribed, let attempts): + filterSubscribed = subscribed + failedSubscribeAttempts = attempts + + case .maintenanceChanged(let active): + subscriptionMaintenanceActive = active + + case .error(let message): + let error = TimestampedError(message: message, timestamp: Date()) + errorQueue.append(error) + + // Schedule auto-dismiss after 10 seconds + let errorId = error.id + Task { @MainActor in + try? await Task.sleep(nanoseconds: 10_000_000_000) + self.errorQueue.removeAll { $0.id == errorId } + } + } + } +} diff --git a/examples/ios/WakuExample/libwaku.h b/examples/ios/WakuExample/libwaku.h new file mode 100644 index 000000000..b5d6c9bab --- /dev/null +++ b/examples/ios/WakuExample/libwaku.h @@ -0,0 +1,253 @@ + +// Generated manually and inspired by the one generated by the Nim Compiler. +// In order to see the header file generated by Nim just run `make libwaku` +// from the root repo folder and the header should be created in +// nimcache/release/libwaku/libwaku.h +#ifndef __libwaku__ +#define __libwaku__ + +#include +#include + +// The possible returned values for the functions that return int +#define RET_OK 0 +#define RET_ERR 1 +#define RET_MISSING_CALLBACK 2 + +#ifdef __cplusplus +extern "C" { +#endif + +typedef void (*WakuCallBack) (int callerRet, const char* msg, size_t len, void* userData); + +// Creates a new instance of the waku node. +// Sets up the waku node from the given configuration. +// Returns a pointer to the Context needed by the rest of the API functions. +void* waku_new( + const char* configJson, + WakuCallBack callback, + void* userData); + +int waku_start(void* ctx, + WakuCallBack callback, + void* userData); + +int waku_stop(void* ctx, + WakuCallBack callback, + void* userData); + +// Destroys an instance of a waku node created with waku_new +int waku_destroy(void* ctx, + WakuCallBack callback, + void* userData); + +int waku_version(void* ctx, + WakuCallBack callback, + void* userData); + +// Sets a callback that will be invoked whenever an event occurs. +// It is crucial that the passed callback is fast, non-blocking and potentially thread-safe. +void waku_set_event_callback(void* ctx, + WakuCallBack callback, + void* userData); + +int waku_content_topic(void* ctx, + const char* appName, + unsigned int appVersion, + const char* contentTopicName, + const char* encoding, + WakuCallBack callback, + void* userData); + +int waku_pubsub_topic(void* ctx, + const char* topicName, + WakuCallBack callback, + void* userData); + +int waku_default_pubsub_topic(void* ctx, + WakuCallBack callback, + void* userData); + +int waku_relay_publish(void* ctx, + const char* pubSubTopic, + const char* jsonWakuMessage, + unsigned int timeoutMs, + WakuCallBack callback, + void* userData); + +int waku_lightpush_publish(void* ctx, + const char* pubSubTopic, + const char* jsonWakuMessage, + WakuCallBack callback, + void* userData); + +int waku_relay_subscribe(void* ctx, + const char* pubSubTopic, + WakuCallBack callback, + void* userData); + +int waku_relay_add_protected_shard(void* ctx, + int clusterId, + int shardId, + char* publicKey, + WakuCallBack callback, + void* userData); + +int waku_relay_unsubscribe(void* ctx, + const char* pubSubTopic, + WakuCallBack callback, + void* userData); + +int waku_filter_subscribe(void* ctx, + const char* pubSubTopic, + const char* contentTopics, + WakuCallBack callback, + void* userData); + +int waku_filter_unsubscribe(void* ctx, + const char* pubSubTopic, + const char* contentTopics, + WakuCallBack callback, + void* userData); + +int waku_filter_unsubscribe_all(void* ctx, + WakuCallBack callback, + void* userData); + +int waku_relay_get_num_connected_peers(void* ctx, + const char* pubSubTopic, + WakuCallBack callback, + void* userData); + +int waku_relay_get_connected_peers(void* ctx, + const char* pubSubTopic, + WakuCallBack callback, + void* userData); + +int waku_relay_get_num_peers_in_mesh(void* ctx, + const char* pubSubTopic, + WakuCallBack callback, + void* userData); + +int waku_relay_get_peers_in_mesh(void* ctx, + const char* pubSubTopic, + WakuCallBack callback, + void* userData); + +int waku_store_query(void* ctx, + const char* jsonQuery, + const char* peerAddr, + int timeoutMs, + WakuCallBack callback, + void* userData); + +int waku_connect(void* ctx, + const char* peerMultiAddr, + unsigned int timeoutMs, + WakuCallBack callback, + void* userData); + +int waku_disconnect_peer_by_id(void* ctx, + const char* peerId, + WakuCallBack callback, + void* userData); + +int waku_disconnect_all_peers(void* ctx, + WakuCallBack callback, + void* userData); + +int waku_dial_peer(void* ctx, + const char* peerMultiAddr, + const char* protocol, + int timeoutMs, + WakuCallBack callback, + void* userData); + +int waku_dial_peer_by_id(void* ctx, + const char* peerId, + const char* protocol, + int timeoutMs, + WakuCallBack callback, + void* userData); + +int waku_get_peerids_from_peerstore(void* ctx, + WakuCallBack callback, + void* userData); + +int waku_get_connected_peers_info(void* ctx, + WakuCallBack callback, + void* userData); + +int waku_get_peerids_by_protocol(void* ctx, + const char* protocol, + WakuCallBack callback, + void* userData); + +int waku_listen_addresses(void* ctx, + WakuCallBack callback, + void* userData); + +int waku_get_connected_peers(void* ctx, + WakuCallBack callback, + void* userData); + +// Returns a list of multiaddress given a url to a DNS discoverable ENR tree +// Parameters +// char* entTreeUrl: URL containing a discoverable ENR tree +// char* nameDnsServer: The nameserver to resolve the ENR tree url. +// int timeoutMs: Timeout value in milliseconds to execute the call. +int waku_dns_discovery(void* ctx, + const char* entTreeUrl, + const char* nameDnsServer, + int timeoutMs, + WakuCallBack callback, + void* userData); + +// Updates the bootnode list used for discovering new peers via DiscoveryV5 +// bootnodes - JSON array containing the bootnode ENRs i.e. `["enr:...", "enr:..."]` +int waku_discv5_update_bootnodes(void* ctx, + char* bootnodes, + WakuCallBack callback, + void* userData); + +int waku_start_discv5(void* ctx, + WakuCallBack callback, + void* userData); + +int waku_stop_discv5(void* ctx, + WakuCallBack callback, + void* userData); + +// Retrieves the ENR information +int waku_get_my_enr(void* ctx, + WakuCallBack callback, + void* userData); + +int waku_get_my_peerid(void* ctx, + WakuCallBack callback, + void* userData); + +int waku_get_metrics(void* ctx, + WakuCallBack callback, + void* userData); + +int waku_peer_exchange_request(void* ctx, + int numPeers, + WakuCallBack callback, + void* userData); + +int waku_ping_peer(void* ctx, + const char* peerAddr, + int timeoutMs, + WakuCallBack callback, + void* userData); + +int waku_is_online(void* ctx, + WakuCallBack callback, + void* userData); + +#ifdef __cplusplus +} +#endif + +#endif /* __libwaku__ */ diff --git a/examples/ios/project.yml b/examples/ios/project.yml new file mode 100644 index 000000000..9519e8b9e --- /dev/null +++ b/examples/ios/project.yml @@ -0,0 +1,47 @@ +name: WakuExample +options: + bundleIdPrefix: org.waku + deploymentTarget: + iOS: "14.0" + xcodeVersion: "15.0" + +settings: + SWIFT_VERSION: "5.0" + SUPPORTED_PLATFORMS: "iphoneos iphonesimulator" + SUPPORTS_MACCATALYST: "NO" + +targets: + WakuExample: + type: application + platform: iOS + supportedDestinations: [iOS] + sources: + - WakuExample + settings: + INFOPLIST_FILE: WakuExample/Info.plist + PRODUCT_BUNDLE_IDENTIFIER: org.waku.example + SWIFT_OBJC_BRIDGING_HEADER: WakuExample/WakuExample-Bridging-Header.h + HEADER_SEARCH_PATHS: + - "$(PROJECT_DIR)/WakuExample" + "LIBRARY_SEARCH_PATHS[sdk=iphoneos*]": + - "$(PROJECT_DIR)/../../build/ios/iphoneos-arm64" + "LIBRARY_SEARCH_PATHS[sdk=iphonesimulator*]": + - "$(PROJECT_DIR)/../../build/ios/iphonesimulator-arm64" + OTHER_LDFLAGS: + - "-lc++" + - "-lwaku" + IPHONEOS_DEPLOYMENT_TARGET: "14.0" + info: + path: WakuExample/Info.plist + properties: + CFBundleName: WakuExample + CFBundleDisplayName: Waku Example + CFBundleIdentifier: org.waku.example + CFBundleVersion: "1" + CFBundleShortVersionString: "1.0" + UILaunchScreen: {} + UISupportedInterfaceOrientations: + - UIInterfaceOrientationPortrait + NSAppTransportSecurity: + NSAllowsArbitraryLoads: true + diff --git a/examples/lightpush_mix/lightpush_publisher_mix.nim b/examples/lightpush_mix/lightpush_publisher_mix.nim index 1e26daa9b..104de8552 100644 --- a/examples/lightpush_mix/lightpush_publisher_mix.nim +++ b/examples/lightpush_mix/lightpush_publisher_mix.nim @@ -51,7 +51,6 @@ proc splitPeerIdAndAddr(maddr: string): (string, string) = proc setupAndPublish(rng: ref HmacDrbgContext, conf: LightPushMixConf) {.async.} = # use notice to filter all waku messaging setupLog(logging.LogLevel.DEBUG, logging.LogFormat.TEXT) - notice "starting publisher", wakuPort = conf.port let @@ -114,17 +113,8 @@ proc setupAndPublish(rng: ref HmacDrbgContext, conf: LightPushMixConf) {.async.} let dPeerId = PeerId.init(destPeerId).valueOr: error "Failed to initialize PeerId", error = error return - var conn: Connection - if not conf.mixDisabled: - conn = node.wakuMix.toConnection( - MixDestination.init(dPeerId, pxPeerInfo.addrs[0]), # destination lightpush peer - WakuLightPushCodec, # protocol codec which will be used over the mix connection - MixParameters(expectReply: Opt.some(true), numSurbs: Opt.some(byte(1))), - # mix parameters indicating we expect a single reply - ).valueOr: - error "failed to create mix connection", error = error - return + await node.mountRendezvousClient(clusterId) await node.start() node.peerManager.start() node.startPeerExchangeLoop() @@ -145,20 +135,26 @@ proc setupAndPublish(rng: ref HmacDrbgContext, conf: LightPushMixConf) {.async.} var i = 0 while i < conf.numMsgs: + var conn: Connection if conf.mixDisabled: let connOpt = await node.peerManager.dialPeer(dPeerId, WakuLightPushCodec) if connOpt.isNone(): error "failed to dial peer with WakuLightPushCodec", target_peer_id = dPeerId return conn = connOpt.get() + else: + conn = node.wakuMix.toConnection( + MixDestination.exitNode(dPeerId), # destination lightpush peer + WakuLightPushCodec, # protocol codec which will be used over the mix connection + MixParameters(expectReply: Opt.some(true), numSurbs: Opt.some(byte(1))), + # mix parameters indicating we expect a single reply + ).valueOr: + error "failed to create mix connection", error = error + return i = i + 1 let text = """Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam venenatis magna ut tortor faucibus, in vestibulum nibh commodo. Aenean eget vestibulum augue. Nullam suscipit urna non nunc efficitur, at iaculis nisl consequat. Mauris quis ultrices elit. Suspendisse lobortis odio vitae laoreet facilisis. Cras ornare sem felis, at vulputate magna aliquam ac. Duis quis est ultricies, euismod nulla ac, interdum dui. Maecenas sit amet est vitae enim commodo gravida. Proin vitae elit nulla. Donec tempor dolor lectus, in faucibus velit elementum quis. Donec non mauris eu nibh faucibus cursus ut egestas dolor. Aliquam venenatis ligula id velit pulvinar malesuada. Vestibulum scelerisque, justo non porta gravida, nulla justo tempor purus, at sollicitudin erat erat vel libero. - Fusce nec eros eu metus tristique aliquet. Sed ut magna sagittis, vulputate diam sit amet, aliquam magna. Aenean sollicitudin velit lacus, eu ultrices magna semper at. Integer vitae felis ligula. In a eros nec risus condimentum tincidunt fermentum sit amet ex. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Nullam vitae justo maximus, fringilla tellus nec, rutrum purus. Etiam efficitur nisi dapibus euismod vestibulum. Phasellus at felis elementum, tristique nulla ac, consectetur neque. - Maecenas hendrerit nibh eget velit rutrum, in ornare mauris molestie. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia curae; Praesent dignissim efficitur eros, sit amet rutrum justo mattis a. Fusce mollis neque at erat placerat bibendum. Ut fringilla fringilla orci, ut fringilla metus fermentum vel. In hac habitasse platea dictumst. Donec hendrerit porttitor odio. Suspendisse ornare sollicitudin mauris, sodales pulvinar velit finibus vel. Fusce id pulvinar neque. Suspendisse eget tincidunt sapien, ac accumsan turpis. - Curabitur cursus tincidunt leo at aliquet. Nunc dapibus quam id venenatis varius. Aenean eget augue vel velit dapibus aliquam. Nulla facilisi. Curabitur cursus, turpis vel congue volutpat, tellus eros cursus lacus, eu fringilla turpis orci non ipsum. In hac habitasse platea dictumst. Nulla aliquam nisl a nunc placerat, eget dignissim felis pulvinar. Fusce sed porta mauris. Donec sodales arcu in nisl sodales, quis posuere massa ultricies. Nam feugiat massa eget felis ultricies finibus. Nunc magna nulla, interdum a elit vel, egestas efficitur urna. Ut posuere tincidunt odio in maximus. Sed at dignissim est. - Morbi accumsan elementum ligula ut fringilla. Praesent in ex metus. Phasellus urna est, tempus sit amet elementum vitae, sollicitudin vel ipsum. Fusce hendrerit eleifend dignissim. Maecenas tempor dapibus dui quis laoreet. Cras tincidunt sed ipsum sed pellentesque. Proin ut tellus nec ipsum varius interdum. Curabitur id velit ligula. Etiam sapien nulla, cursus sodales orci eu, porta lobortis nunc. Nunc at dapibus velit. Nulla et nunc vehicula, condimentum erat quis, elementum dolor. Quisque eu metus fermentum, vestibulum tellus at, sollicitudin odio. Ut vel neque justo. - Praesent porta porta velit, vel porttitor sem. Donec sagittis at nulla venenatis iaculis. Nullam vel eleifend felis. Nullam a pellentesque lectus. Aliquam tincidunt semper dui sed bibendum. Donec hendrerit, urna et cursus dictum, neque neque convallis magna, id condimentum sem urna quis massa. Fusce non quam vulputate, fermentum mauris at, malesuada ipsum. Mauris id pellentesque libero. Donec vel erat ullamcorper, dapibus quam id, imperdiet urna. Praesent sed ligula ut est pellentesque pharetra quis et diam. Ut placerat lorem eget mi fermentum aliquet. + Fusce nec eros eu metus tristique aliquet. This is message #""" & $i & """ sent from a publisher using mix. End of transmission.""" let message = WakuMessage( @@ -168,25 +164,34 @@ proc setupAndPublish(rng: ref HmacDrbgContext, conf: LightPushMixConf) {.async.} timestamp: getNowInNanosecondTime(), ) # current timestamp - let res = await node.wakuLightpushClient.publishWithConn( - LightpushPubsubTopic, message, conn, dPeerId - ) + let res = + await node.wakuLightpushClient.publish(some(LightpushPubsubTopic), message, conn) - if res.isOk(): - lp_mix_success.inc() - notice "published message", - text = text, - timestamp = message.timestamp, - psTopic = LightpushPubsubTopic, - contentTopic = LightpushContentTopic - else: - error "failed to publish message", error = $res.error + let startTime = getNowInNanosecondTime() + + ( + await node.wakuLightpushClient.publishWithConn( + LightpushPubsubTopic, message, conn, dPeerId + ) + ).isOkOr: + error "failed to publish message via mix", error = error.desc lp_mix_failed.inc(labelValues = ["publish_error"]) + return + + let latency = float64(getNowInNanosecondTime() - startTime) / 1_000_000.0 + lp_mix_latency.observe(latency) + lp_mix_success.inc() + notice "published message", + text = text, + timestamp = message.timestamp, + latency = latency, + psTopic = LightpushPubsubTopic, + contentTopic = LightpushContentTopic if conf.mixDisabled: await conn.close() await sleepAsync(conf.msgIntervalMilliseconds) - info "###########Sent all messages via mix" + info "Sent all messages via mix" quit(0) when isMainModule: diff --git a/examples/lightpush_mix/lightpush_publisher_mix_metrics.nim b/examples/lightpush_mix/lightpush_publisher_mix_metrics.nim index cd06b3e3e..3c467e28c 100644 --- a/examples/lightpush_mix/lightpush_publisher_mix_metrics.nim +++ b/examples/lightpush_mix/lightpush_publisher_mix_metrics.nim @@ -6,3 +6,6 @@ declarePublicCounter lp_mix_success, "number of lightpush messages sent via mix" declarePublicCounter lp_mix_failed, "number of lightpush messages failed via mix", labels = ["error"] + +declarePublicHistogram lp_mix_latency, + "lightpush publish latency via mix in milliseconds" diff --git a/examples/python/waku.py b/examples/python/waku.py index 4d5f5643e..65eb5d750 100644 --- a/examples/python/waku.py +++ b/examples/python/waku.py @@ -102,8 +102,8 @@ print("Waku Relay enabled: {}".format(args.relay)) # Set the event callback callback = callback_type(handle_event) # This line is important so that the callback is not gc'ed -libwaku.waku_set_event_callback.argtypes = [callback_type, ctypes.c_void_p] -libwaku.waku_set_event_callback(callback, ctypes.c_void_p(0)) +libwaku.set_event_callback.argtypes = [callback_type, ctypes.c_void_p] +libwaku.set_event_callback(callback, ctypes.c_void_p(0)) # Start the node libwaku.waku_start.argtypes = [ctypes.c_void_p, @@ -117,32 +117,32 @@ libwaku.waku_start(ctx, # Subscribe to the default pubsub topic libwaku.waku_relay_subscribe.argtypes = [ctypes.c_void_p, - ctypes.c_char_p, callback_type, - ctypes.c_void_p] + ctypes.c_void_p, + ctypes.c_char_p] libwaku.waku_relay_subscribe(ctx, - default_pubsub_topic.encode('utf-8'), callback_type( #onErrCb lambda ret, msg, len: print("Error calling waku_relay_subscribe: %s" % msg.decode('utf-8')) ), - ctypes.c_void_p(0)) + ctypes.c_void_p(0), + default_pubsub_topic.encode('utf-8')) libwaku.waku_connect.argtypes = [ctypes.c_void_p, - ctypes.c_char_p, - ctypes.c_int, callback_type, - ctypes.c_void_p] + ctypes.c_void_p, + ctypes.c_char_p, + ctypes.c_int] libwaku.waku_connect(ctx, - args.peer.encode('utf-8'), - 10000, # onErrCb callback_type( lambda ret, msg, len: print("Error calling waku_connect: %s" % msg.decode('utf-8'))), - ctypes.c_void_p(0)) + ctypes.c_void_p(0), + args.peer.encode('utf-8'), + 10000) # app = Flask(__name__) # @app.route("/") diff --git a/examples/qt/waku_handler.h b/examples/qt/waku_handler.h index 161a17c82..2fb3ce3b7 100644 --- a/examples/qt/waku_handler.h +++ b/examples/qt/waku_handler.h @@ -27,7 +27,7 @@ public: void initialize(const QString& jsonConfig, WakuCallBack event_handler, void* userData) { ctx = waku_new(jsonConfig.toUtf8().constData(), WakuCallBack(event_handler), userData); - waku_set_event_callback(ctx, on_event_received, userData); + set_event_callback(ctx, on_event_received, userData); qDebug() << "Waku context initialized, ready to start."; } diff --git a/examples/rust/src/main.rs b/examples/rust/src/main.rs index 926d0e3b0..d26e9627e 100644 --- a/examples/rust/src/main.rs +++ b/examples/rust/src/main.rs @@ -3,22 +3,22 @@ use std::ffi::CString; use std::os::raw::{c_char, c_int, c_void}; use std::{slice, thread, time}; -pub type WakuCallback = unsafe extern "C" fn(c_int, *const c_char, usize, *const c_void); +pub type FFICallBack = unsafe extern "C" fn(c_int, *const c_char, usize, *const c_void); extern "C" { pub fn waku_new( config_json: *const u8, - cb: WakuCallback, + cb: FFICallBack, user_data: *const c_void, ) -> *mut c_void; - pub fn waku_version(ctx: *const c_void, cb: WakuCallback, user_data: *const c_void) -> c_int; + pub fn waku_version(ctx: *const c_void, cb: FFICallBack, user_data: *const c_void) -> c_int; - pub fn waku_start(ctx: *const c_void, cb: WakuCallback, user_data: *const c_void) -> c_int; + pub fn waku_start(ctx: *const c_void, cb: FFICallBack, user_data: *const c_void) -> c_int; pub fn waku_default_pubsub_topic( ctx: *mut c_void, - cb: WakuCallback, + cb: FFICallBack, user_data: *const c_void, ) -> *mut c_void; } @@ -40,7 +40,7 @@ pub unsafe extern "C" fn trampoline( closure(return_val, &buffer_utf8); } -pub fn get_trampoline(_closure: &C) -> WakuCallback +pub fn get_trampoline(_closure: &C) -> FFICallBack where C: FnMut(i32, &str), { diff --git a/examples/waku_example.nim b/examples/waku_example.nim deleted file mode 100644 index ebac0b466..000000000 --- a/examples/waku_example.nim +++ /dev/null @@ -1,40 +0,0 @@ -import std/options -import chronos, results, confutils, confutils/defs -import waku - -type CliArgs = object - ethRpcEndpoint* {. - defaultValue: "", desc: "ETH RPC Endpoint, if passed, RLN is enabled" - .}: string - -when isMainModule: - let args = CliArgs.load() - - echo "Starting Waku node..." - - let config = - if (args.ethRpcEndpoint == ""): - # Create a basic configuration for the Waku node - # No RLN as we don't have an ETH RPC Endpoint - NodeConfig.init( - protocolsConfig = ProtocolsConfig.init(entryNodes = @[], clusterId = 42) - ) - else: - # Connect to TWN, use ETH RPC Endpoint for RLN - NodeConfig.init(ethRpcEndpoints = @[args.ethRpcEndpoint]) - - # Create the node using the library API's createNode function - let node = (waitFor createNode(config)).valueOr: - echo "Failed to create node: ", error - quit(QuitFailure) - - echo("Waku node created successfully!") - - # Start the node - (waitFor startWaku(addr node)).isOkOr: - echo "Failed to start node: ", error - quit(QuitFailure) - - echo "Node started successfully!" - - runForever() diff --git a/flake.lock b/flake.lock index 359ae2579..b927e8807 100644 --- a/flake.lock +++ b/flake.lock @@ -2,17 +2,17 @@ "nodes": { "nixpkgs": { "locked": { - "lastModified": 1740603184, - "narHash": "sha256-t+VaahjQAWyA+Ctn2idyo1yxRIYpaDxMgHkgCNiMJa4=", + "lastModified": 1757590060, + "narHash": "sha256-EWwwdKLMZALkgHFyKW7rmyhxECO74+N+ZO5xTDnY/5c=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "f44bd8ca21e026135061a0a57dcf3d0775b67a49", + "rev": "0ef228213045d2cdb5a169a95d63ded38670b293", "type": "github" }, "original": { "owner": "NixOS", "repo": "nixpkgs", - "rev": "f44bd8ca21e026135061a0a57dcf3d0775b67a49", + "rev": "0ef228213045d2cdb5a169a95d63ded38670b293", "type": "github" } }, @@ -22,26 +22,48 @@ "zerokit": "zerokit" } }, - "zerokit": { + "rust-overlay": { "inputs": { "nixpkgs": [ + "zerokit", "nixpkgs" ] }, "locked": { - "lastModified": 1743756626, - "narHash": "sha256-SvhfEl0bJcRsCd79jYvZbxQecGV2aT+TXjJ57WVv7Aw=", - "owner": "vacp2p", - "repo": "zerokit", - "rev": "c60e0c33fc6350a4b1c20e6b6727c44317129582", + "lastModified": 1748399823, + "narHash": "sha256-kahD8D5hOXOsGbNdoLLnqCL887cjHkx98Izc37nDjlA=", + "owner": "oxalica", + "repo": "rust-overlay", + "rev": "d68a69dc71bc19beb3479800392112c2f6218159", "type": "github" }, "original": { - "owner": "vacp2p", - "repo": "zerokit", - "rev": "c60e0c33fc6350a4b1c20e6b6727c44317129582", + "owner": "oxalica", + "repo": "rust-overlay", "type": "github" } + }, + "zerokit": { + "inputs": { + "nixpkgs": [ + "nixpkgs" + ], + "rust-overlay": "rust-overlay" + }, + "locked": { + "lastModified": 1762211504, + "narHash": "sha256-SbDoBElFYJ4cYebltxlO2lYnz6qOaDAVY6aNJ5bqHDE=", + "ref": "refs/heads/master", + "rev": "3160d9504d07791f2fc9b610948a6cf9a58ed488", + "revCount": 342, + "type": "git", + "url": "https://github.com/vacp2p/zerokit" + }, + "original": { + "rev": "3160d9504d07791f2fc9b610948a6cf9a58ed488", + "type": "git", + "url": "https://github.com/vacp2p/zerokit" + } } }, "root": "root", diff --git a/flake.nix b/flake.nix index 760f49337..ee24c8f13 100644 --- a/flake.nix +++ b/flake.nix @@ -1,5 +1,5 @@ { - description = "NWaku build flake"; + description = "Logos Messaging Nim build flake"; nixConfig = { extra-substituters = [ "https://nix-cache.status.im/" ]; @@ -7,9 +7,12 @@ }; inputs = { - nixpkgs.url = "github:NixOS/nixpkgs?rev=f44bd8ca21e026135061a0a57dcf3d0775b67a49"; + # We are pinning the commit because ultimately we want to use same commit across different projects. + # A commit from nixpkgs 24.11 release : https://github.com/NixOS/nixpkgs/tree/release-24.11 + nixpkgs.url = "github:NixOS/nixpkgs/0ef228213045d2cdb5a169a95d63ded38670b293"; + # WARNING: Remember to update commit and use 'nix flake update' to update flake.lock. zerokit = { - url = "github:vacp2p/zerokit?rev=c60e0c33fc6350a4b1c20e6b6727c44317129582"; + url = "git+https://github.com/vacp2p/zerokit?rev=3160d9504d07791f2fc9b610948a6cf9a58ed488"; inputs.nixpkgs.follows = "nixpkgs"; }; }; @@ -49,16 +52,37 @@ libwaku-android-arm64 = pkgs.callPackage ./nix/default.nix { inherit stableSystems; src = self; - targets = ["libwaku-android-arm64"]; - androidArch = "aarch64-linux-android"; + targets = ["libwaku-android-arm64"]; abidir = "arm64-v8a"; - zerokitPkg = zerokit.packages.${system}.zerokit-android-arm64; + zerokitRln = zerokit.packages.${system}.rln-android-arm64; }; - default = libwaku-android-arm64; + + libwaku = pkgs.callPackage ./nix/default.nix { + inherit stableSystems; + src = self; + targets = ["libwaku"]; + zerokitRln = zerokit.packages.${system}.rln; + }; + + wakucanary = pkgs.callPackage ./nix/default.nix { + inherit stableSystems; + src = self; + targets = ["wakucanary"]; + zerokitRln = zerokit.packages.${system}.rln; + }; + + liblogosdelivery = pkgs.callPackage ./nix/default.nix { + inherit stableSystems; + src = self; + targets = ["liblogosdelivery"]; + zerokitRln = zerokit.packages.${system}.rln; + }; + + default = libwaku; }); devShells = forAllSystems (system: { default = pkgsFor.${system}.callPackage ./nix/shell.nix {}; }); }; -} \ No newline at end of file +} diff --git a/liblogosdelivery/BUILD.md b/liblogosdelivery/BUILD.md new file mode 100644 index 000000000..011fbb438 --- /dev/null +++ b/liblogosdelivery/BUILD.md @@ -0,0 +1,123 @@ +# Building liblogosdelivery and Examples + +## Prerequisites + +- Nim 2.x compiler +- Rust toolchain (for RLN dependencies) +- GCC or Clang compiler +- Make + +## Building the Library + +### Dynamic Library + +```bash +make liblogosdelivery +``` + +This creates `build/liblogosdelivery.dylib` (macOS) or `build/liblogosdelivery.so` (Linux). + +### Static Library + +```bash +nim liblogosdelivery STATIC=1 +``` + +This creates `build/liblogosdelivery.a`. + +## Building Examples + +### liblogosdelivery Example + +Compile the C example that demonstrates all library features: + +```bash +# Using Make (recommended) +make liblogosdelivery_example + +## Running Examples + +```bash +./build/liblogosdelivery_example +``` + +The example will: +1. Create a Logos Messaging node +2. Register event callbacks for message events +3. Start the node +4. Subscribe to a content topic +5. Send a message +6. Show message delivery events (sent, propagated, or error) +7. Unsubscribe and cleanup + +## Build Artifacts + +After building, you'll have: + +``` +build/ +├── liblogosdelivery.dylib # Dynamic library (34MB) +├── liblogosdelivery.dylib.dSYM/ # Debug symbols +└── liblogosdelivery_example # Compiled example (34KB) +``` + +## Library Headers + +The main header file is: +- `liblogosdelivery/liblogosdelivery.h` - C API declarations + +## Troubleshooting + +### Library not found at runtime + +If you get "library not found" errors when running the example: + +**macOS:** +```bash +export DYLD_LIBRARY_PATH=/path/to/build:$DYLD_LIBRARY_PATH +./build/liblogosdelivery_example +``` + +**Linux:** +```bash +export LD_LIBRARY_PATH=/path/to/build:$LD_LIBRARY_PATH +./build/liblogosdelivery_example +``` +## Cross-Compilation + +For cross-compilation, you need to: +1. Build the Nim library for the target platform +2. Use the appropriate cross-compiler +3. Link against the target platform's liblogosdelivery + +Example for Linux from macOS: +```bash +# Build library for Linux (requires Docker or cross-compilation setup) +# Then compile with cross-compiler +``` + +## Integration with Your Project + +### CMake + +```cmake +find_library(LMAPI_LIBRARY NAMES lmapi PATHS ${PROJECT_SOURCE_DIR}/build) +include_directories(${PROJECT_SOURCE_DIR}/liblogosdelivery) +target_link_libraries(your_target ${LMAPI_LIBRARY}) +``` + +### Makefile + +```makefile +CFLAGS += -I/path/to/liblogosdelivery +LDFLAGS += -L/path/to/build -llmapi -Wl,-rpath,/path/to/build + +your_program: your_program.c + $(CC) $(CFLAGS) $< -o $@ $(LDFLAGS) +``` + +## API Documentation + +See: +- [liblogosdelivery.h](liblogosdelivery/liblogosdelivery.h) - API function declarations +- [MESSAGE_EVENTS.md](liblogosdelivery/MESSAGE_EVENTS.md) - Message event handling guide diff --git a/liblogosdelivery/MESSAGE_EVENTS.md b/liblogosdelivery/MESSAGE_EVENTS.md new file mode 100644 index 000000000..60740fb62 --- /dev/null +++ b/liblogosdelivery/MESSAGE_EVENTS.md @@ -0,0 +1,148 @@ +# Message Event Handling in LMAPI + +## Overview + +The liblogosdelivery library emits three types of message delivery events that clients can listen to by registering an event callback using `logosdelivery_set_event_callback()`. + +## Event Types + +### 1. message_sent +Emitted when a message is successfully accepted by the send service and queued for delivery. + +**JSON Structure:** +```json +{ + "eventType": "message_sent", + "requestId": "unique-request-id", + "messageHash": "0x..." +} +``` + +**Fields:** +- `eventType`: Always "message_sent" +- `requestId`: Request ID returned from the send operation +- `messageHash`: Hash of the message that was sent + +### 2. message_propagated +Emitted when a message has been successfully propagated to neighboring nodes on the network. + +**JSON Structure:** +```json +{ + "eventType": "message_propagated", + "requestId": "unique-request-id", + "messageHash": "0x..." +} +``` + +**Fields:** +- `eventType`: Always "message_propagated" +- `requestId`: Request ID from the send operation +- `messageHash`: Hash of the message that was propagated + +### 3. message_error +Emitted when an error occurs during message sending or propagation. + +**JSON Structure:** +```json +{ + "eventType": "message_error", + "requestId": "unique-request-id", + "messageHash": "0x...", + "error": "error description" +} +``` + +**Fields:** +- `eventType`: Always "message_error" +- `requestId`: Request ID from the send operation +- `messageHash`: Hash of the message that failed +- `error`: Description of what went wrong + +## Usage + +### 1. Define an Event Callback + +```c +void event_callback(int ret, const char *msg, size_t len, void *userData) { + if (ret != RET_OK || msg == NULL || len == 0) { + return; + } + + // Parse the JSON message + // Extract eventType field + // Handle based on event type + + if (eventType == "message_sent") { + // Handle message sent + } else if (eventType == "message_propagated") { + // Handle message propagated + } else if (eventType == "message_error") { + // Handle message error + } +} +``` + +### 2. Register the Callback + +```c +void *ctx = logosdelivery_create_node(config, callback, userData); +logosdelivery_set_event_callback(ctx, event_callback, NULL); +``` + +### 3. Start the Node + +Once the node is started, events will be delivered to your callback: + +```c +logosdelivery_start_node(ctx, callback, userData); +``` + +## Event Flow + +For a typical successful message send: + +1. **send** → Returns request ID +2. **message_sent** → Message accepted and queued +3. **message_propagated** → Message delivered to peers + +For a failed message send: + +1. **send** → Returns request ID +2. **message_sent** → Message accepted and queued +3. **message_error** → Delivery failed with error description + +## Important Notes + +1. **Thread Safety**: The event callback is invoked from the FFI worker thread. Ensure your callback is thread-safe if it accesses shared state. + +2. **Non-Blocking**: Keep the callback fast and non-blocking. Do not perform long-running operations in the callback. + +3. **JSON Parsing**: The example uses a simple string-based parser. For production, use a proper JSON library like: + - [cJSON](https://github.com/DaveGamble/cJSON) + - [json-c](https://github.com/json-c/json-c) + - [Jansson](https://github.com/akheron/jansson) + +4. **Memory Management**: The message buffer is owned by the library. Copy any data you need to retain. + +5. **Event Order**: Events are delivered in the order they occur, but timing depends on network conditions. + +## Example Implementation + +See `examples/liblogosdelivery_example.c` for a complete working example that: +- Registers an event callback +- Sends a message +- Receives and prints all three event types +- Properly parses the JSON event structure + +## Debugging Events + +To see all events during development: + +```c +void debug_event_callback(int ret, const char *msg, size_t len, void *userData) { + printf("Event received: %.*s\n", (int)len, msg); +} +``` + +This will print the raw JSON for all events, helping you understand the event structure. diff --git a/liblogosdelivery/README.md b/liblogosdelivery/README.md new file mode 100644 index 000000000..e8352c611 --- /dev/null +++ b/liblogosdelivery/README.md @@ -0,0 +1,262 @@ +# Logos Messaging API (LMAPI) Library + +A C FFI library providing a simplified interface to Logos Messaging functionality. + +## Overview + +This library wraps the high-level API functions from `waku/api/api.nim` and exposes them via a C FFI interface, making them accessible from C, C++, and other languages that support C FFI. + +## API Functions + +### Node Lifecycle + +#### `logosdelivery_create_node` +Creates a new instance of the node from the given configuration JSON. + +```c +void *logosdelivery_create_node( + const char *configJson, + FFICallBack callback, + void *userData +); +``` + +**Parameters:** +- `configJson`: JSON string containing node configuration +- `callback`: Callback function to receive the result +- `userData`: User data passed to the callback + +**Returns:** Pointer to the context needed by other API functions, or NULL on error. + +**Example configuration JSON:** +```json +{ + "mode": "Core", + "preset": "logos.dev", + "listenAddress": "0.0.0.0", + "tcpPort": 60000, + "discv5UdpPort": 9000 +} +``` + +Configuration uses flat field names matching `WakuNodeConf` in `tools/confutils/cli_args.nim`. +Use `"preset"` to select a network preset (e.g., `"twn"`, `"logos.dev"`) which auto-configures +entry nodes, cluster ID, sharding, and other network-specific settings. + +#### `logosdelivery_start_node` +Starts the node. + +```c +int logosdelivery_start_node( + void *ctx, + FFICallBack callback, + void *userData +); +``` + +#### `logosdelivery_stop_node` +Stops the node. + +```c +int logosdelivery_stop_node( + void *ctx, + FFICallBack callback, + void *userData +); +``` + +#### `logosdelivery_destroy` +Destroys a node instance and frees resources. + +```c +int logosdelivery_destroy( + void *ctx, + FFICallBack callback, + void *userData +); +``` + +### Messaging + +#### `logosdelivery_subscribe` +Subscribe to a content topic to receive messages. + +```c +int logosdelivery_subscribe( + void *ctx, + FFICallBack callback, + void *userData, + const char *contentTopic +); +``` + +**Parameters:** +- `ctx`: Context pointer from `logosdelivery_create_node` +- `callback`: Callback function to receive the result +- `userData`: User data passed to the callback +- `contentTopic`: Content topic string (e.g., "/myapp/1/chat/proto") + +#### `logosdelivery_unsubscribe` +Unsubscribe from a content topic. + +```c +int logosdelivery_unsubscribe( + void *ctx, + FFICallBack callback, + void *userData, + const char *contentTopic +); +``` + +#### `logosdelivery_send` +Send a message. + +```c +int logosdelivery_send( + void *ctx, + FFICallBack callback, + void *userData, + const char *messageJson +); +``` + +**Parameters:** +- `messageJson`: JSON string containing the message + +**Example message JSON:** +```json +{ + "contentTopic": "/myapp/1/chat/proto", + "payload": "SGVsbG8gV29ybGQ=", + "ephemeral": false +} +``` + +Note: The `payload` field should be base64-encoded. + +**Returns:** Request ID in the callback message that can be used to track message delivery. + +### Events + +#### `logosdelivery_set_event_callback` +Sets a callback that will be invoked whenever an event occurs (e.g., message received). + +```c +void logosdelivery_set_event_callback( + void *ctx, + FFICallBack callback, + void *userData +); +``` + +**Important:** The callback should be fast, non-blocking, and thread-safe. + +## Building + +The library follows the same build system as the main Logos Messaging project. + +### Build the library + +```bash +make liblogosdeliveryStatic # Build static library +# or +make liblogosdeliveryDynamic # Build dynamic library +``` + +## Return Codes + +All functions that return `int` use the following return codes: + +- `RET_OK` (0): Success +- `RET_ERR` (1): Error +- `RET_MISSING_CALLBACK` (2): Missing callback function + +## Callback Function + +All API functions use the following callback signature: + +```c +typedef void (*FFICallBack)( + int callerRet, + const char *msg, + size_t len, + void *userData +); +``` + +**Parameters:** +- `callerRet`: Return code (RET_OK, RET_ERR, etc.) +- `msg`: Response message (may be empty for success) +- `len`: Length of the message +- `userData`: User data passed in the original call + +## Example Usage + +```c +#include "liblogosdelivery.h" +#include + +void callback(int ret, const char *msg, size_t len, void *userData) { + if (ret == RET_OK) { + printf("Success: %.*s\n", (int)len, msg); + } else { + printf("Error: %.*s\n", (int)len, msg); + } +} + +int main() { + const char *config = "{" + "\"logLevel\": \"INFO\"," + "\"mode\": \"Core\"," + "\"preset\": \"logos.dev\"" + "}"; + + // Create node + void *ctx = logosdelivery_create_node(config, callback, NULL); + if (ctx == NULL) { + return 1; + } + + // Start node + logosdelivery_start_node(ctx, callback, NULL); + + // Subscribe to a topic + logosdelivery_subscribe(ctx, callback, NULL, "/myapp/1/chat/proto"); + + // Send a message + const char *msg = "{" + "\"contentTopic\": \"/myapp/1/chat/proto\"," + "\"payload\": \"SGVsbG8gV29ybGQ=\"," + "\"ephemeral\": false" + "}"; + logosdelivery_send(ctx, callback, NULL, msg); + + // Clean up + logosdelivery_stop_node(ctx, callback, NULL); + logosdelivery_destroy(ctx, callback, NULL); + + return 0; +} +``` + +## Architecture + +The library is structured as follows: + +- `liblogosdelivery.h`: C header file with function declarations +- `liblogosdelivery.nim`: Main library entry point +- `declare_lib.nim`: Library declaration and initialization +- `lmapi/node_api.nim`: Node lifecycle API implementation +- `lmapi/messaging_api.nim`: Subscribe/send API implementation + +The library uses the nim-ffi framework for FFI infrastructure, which handles: +- Thread-safe request processing +- Async operation management +- Memory management between C and Nim +- Callback marshaling + +## See Also + +- Main API documentation: `waku/api/api.nim` +- Original libwaku library: `library/libwaku.nim` +- nim-ffi framework: `vendor/nim-ffi/` diff --git a/liblogosdelivery/declare_lib.nim b/liblogosdelivery/declare_lib.nim new file mode 100644 index 000000000..5087a0dee --- /dev/null +++ b/liblogosdelivery/declare_lib.nim @@ -0,0 +1,33 @@ +import ffi +import std/locks +import waku/factory/waku + +declareLibrary("logosdelivery") + +var eventCallbackLock: Lock +initLock(eventCallbackLock) + +template requireInitializedNode*( + ctx: ptr FFIContext[Waku], opName: string, onError: untyped +) = + if isNil(ctx): + let errMsg {.inject.} = opName & " failed: invalid context" + onError + elif isNil(ctx.myLib) or isNil(ctx.myLib[]): + let errMsg {.inject.} = opName & " failed: node is not initialized" + onError + +proc logosdelivery_set_event_callback( + ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer +) {.dynlib, exportc, cdecl.} = + if isNil(ctx): + echo "error: invalid context in logosdelivery_set_event_callback" + return + + # prevent race conditions that might happen due incorrect usage. + eventCallbackLock.acquire() + defer: + eventCallbackLock.release() + + ctx[].eventCallback = cast[pointer](callback) + ctx[].eventUserData = userData diff --git a/liblogosdelivery/examples/json_utils.c b/liblogosdelivery/examples/json_utils.c new file mode 100644 index 000000000..8b33bb648 --- /dev/null +++ b/liblogosdelivery/examples/json_utils.c @@ -0,0 +1,96 @@ +#include "json_utils.h" +#include +#include + +const char* extract_json_field(const char *json, const char *field, char *buffer, size_t bufSize) { + char searchStr[256]; + snprintf(searchStr, sizeof(searchStr), "\"%s\":\"", field); + + const char *start = strstr(json, searchStr); + if (!start) { + return NULL; + } + + start += strlen(searchStr); + const char *end = strchr(start, '"'); + if (!end) { + return NULL; + } + + size_t len = end - start; + if (len >= bufSize) { + len = bufSize - 1; + } + + memcpy(buffer, start, len); + buffer[len] = '\0'; + + return buffer; +} + +const char* extract_json_object(const char *json, const char *field, size_t *outLen) { + char searchStr[256]; + snprintf(searchStr, sizeof(searchStr), "\"%s\":{", field); + + const char *start = strstr(json, searchStr); + if (!start) { + return NULL; + } + + // Advance to the opening brace + start = strchr(start, '{'); + if (!start) { + return NULL; + } + + // Find the matching closing brace (handles nested braces) + int depth = 0; + const char *p = start; + while (*p) { + if (*p == '{') depth++; + else if (*p == '}') { + depth--; + if (depth == 0) { + *outLen = (size_t)(p - start + 1); + return start; + } + } + p++; + } + return NULL; +} + +int decode_json_byte_array(const char *json, const char *field, char *buffer, size_t bufSize) { + char searchStr[256]; + snprintf(searchStr, sizeof(searchStr), "\"%s\":[", field); + + const char *start = strstr(json, searchStr); + if (!start) { + return -1; + } + + // Advance to the opening bracket + start = strchr(start, '['); + if (!start) { + return -1; + } + start++; // skip '[' + + size_t pos = 0; + const char *p = start; + while (*p && *p != ']' && pos < bufSize - 1) { + // Skip whitespace and commas + while (*p == ' ' || *p == ',' || *p == '\n' || *p == '\r' || *p == '\t') p++; + if (*p == ']') break; + + // Parse integer + int val = 0; + while (*p >= '0' && *p <= '9') { + val = val * 10 + (*p - '0'); + p++; + } + buffer[pos++] = (char)val; + } + buffer[pos] = '\0'; + return (int)pos; +} diff --git a/liblogosdelivery/examples/json_utils.h b/liblogosdelivery/examples/json_utils.h new file mode 100644 index 000000000..4039ca4f6 --- /dev/null +++ b/liblogosdelivery/examples/json_utils.h @@ -0,0 +1,21 @@ +#ifndef JSON_UTILS_H +#define JSON_UTILS_H + +#include + +// Extract a JSON string field value into buffer. +// Returns pointer to buffer on success, NULL on failure. +// Very basic parser - for production use a proper JSON library. +const char* extract_json_field(const char *json, const char *field, char *buffer, size_t bufSize); + +// Extract a nested JSON object as a raw string. +// Returns a pointer into `json` at the start of the object, and sets `outLen`. +// Handles nested braces. +const char* extract_json_object(const char *json, const char *field, size_t *outLen); + +// Decode a JSON array of integers (byte values) into a buffer. +// Parses e.g. [72,101,108,108,111] into "Hello". +// Returns number of bytes decoded, or -1 on error. +int decode_json_byte_array(const char *json, const char *field, char *buffer, size_t bufSize); + +#endif // JSON_UTILS_H diff --git a/liblogosdelivery/examples/logosdelivery_example.c b/liblogosdelivery/examples/logosdelivery_example.c new file mode 100644 index 000000000..729f7f0dc --- /dev/null +++ b/liblogosdelivery/examples/logosdelivery_example.c @@ -0,0 +1,227 @@ +#include "../liblogosdelivery.h" +#include "json_utils.h" +#include +#include +#include +#include + +static int create_node_ok = -1; + +// Flags set by event callback, polled by main thread +static volatile int got_message_sent = 0; +static volatile int got_message_error = 0; +static volatile int got_message_received = 0; + +// Event callback that handles message events +void event_callback(int ret, const char *msg, size_t len, void *userData) { + if (ret != RET_OK || msg == NULL || len == 0) { + return; + } + + // Create null-terminated string for easier parsing + char *eventJson = malloc(len + 1); + if (!eventJson) { + return; + } + memcpy(eventJson, msg, len); + eventJson[len] = '\0'; + + // Extract eventType + char eventType[64]; + if (!extract_json_field(eventJson, "eventType", eventType, sizeof(eventType))) { + free(eventJson); + return; + } + + // Handle different event types + if (strcmp(eventType, "message_sent") == 0) { + char requestId[128]; + char messageHash[128]; + extract_json_field(eventJson, "requestId", requestId, sizeof(requestId)); + extract_json_field(eventJson, "messageHash", messageHash, sizeof(messageHash)); + printf("[EVENT] Message sent - RequestID: %s, Hash: %s\n", requestId, messageHash); + got_message_sent = 1; + + } else if (strcmp(eventType, "message_error") == 0) { + char requestId[128]; + char messageHash[128]; + char error[256]; + extract_json_field(eventJson, "requestId", requestId, sizeof(requestId)); + extract_json_field(eventJson, "messageHash", messageHash, sizeof(messageHash)); + extract_json_field(eventJson, "error", error, sizeof(error)); + printf("[EVENT] Message error - RequestID: %s, Hash: %s, Error: %s\n", + requestId, messageHash, error); + got_message_error = 1; + + } else if (strcmp(eventType, "message_propagated") == 0) { + char requestId[128]; + char messageHash[128]; + extract_json_field(eventJson, "requestId", requestId, sizeof(requestId)); + extract_json_field(eventJson, "messageHash", messageHash, sizeof(messageHash)); + printf("[EVENT] Message propagated - RequestID: %s, Hash: %s\n", requestId, messageHash); + + } else if (strcmp(eventType, "connection_status_change") == 0) { + char connectionStatus[256]; + extract_json_field(eventJson, "connectionStatus", connectionStatus, sizeof(connectionStatus)); + printf("[EVENT] Connection status change - Status: %s\n", connectionStatus); + + } else if (strcmp(eventType, "message_received") == 0) { + char messageHash[128]; + extract_json_field(eventJson, "messageHash", messageHash, sizeof(messageHash)); + + // Extract the nested "message" object + size_t msgObjLen = 0; + const char *msgObj = extract_json_object(eventJson, "message", &msgObjLen); + if (msgObj) { + // Make a null-terminated copy of the message object + char *msgJson = malloc(msgObjLen + 1); + if (msgJson) { + memcpy(msgJson, msgObj, msgObjLen); + msgJson[msgObjLen] = '\0'; + + char contentTopic[256]; + extract_json_field(msgJson, "contentTopic", contentTopic, sizeof(contentTopic)); + + // Decode payload from JSON byte array to string + char payload[4096]; + int payloadLen = decode_json_byte_array(msgJson, "payload", payload, sizeof(payload)); + + printf("[EVENT] Message received - Hash: %s, ContentTopic: %s\n", messageHash, contentTopic); + if (payloadLen > 0) { + printf(" Payload (%d bytes): %.*s\n", payloadLen, payloadLen, payload); + } else { + printf(" Payload: (empty or could not decode)\n"); + } + + free(msgJson); + } + } else { + printf("[EVENT] Message received - Hash: %s (could not parse message)\n", messageHash); + } + got_message_received = 1; + + } else { + printf("[EVENT] Unknown event type: %s\n", eventType); + } + + free(eventJson); +} + +// Simple callback that prints results +void simple_callback(int ret, const char *msg, size_t len, void *userData) { + const char *operation = (const char *)userData; + + if (operation != NULL && strcmp(operation, "create_node") == 0) { + create_node_ok = (ret == RET_OK) ? 1 : 0; + } + + if (ret == RET_OK) { + if (len > 0) { + printf("[%s] Success: %.*s\n", operation, (int)len, msg); + } else { + printf("[%s] Success\n", operation); + } + } else { + printf("[%s] Error: %.*s\n", operation, (int)len, msg); + } +} + +int main() { + printf("=== Logos Messaging API (LMAPI) Example ===\n\n"); + + // Configuration JSON using WakuNodeConf field names (flat structure). + // Field names match Nim identifiers from WakuNodeConf in tools/confutils/cli_args.nim. + const char *config = "{" + "\"logLevel\": \"INFO\"," + "\"mode\": \"Core\"," + "\"preset\": \"logos.dev\"" + "}"; + + printf("1. Creating node...\n"); + void *ctx = logosdelivery_create_node(config, simple_callback, (void *)"create_node"); + if (ctx == NULL) { + printf("Failed to create node\n"); + return 1; + } + + // Wait a bit for the callback + sleep(1); + + if (create_node_ok != 1) { + printf("Create node failed, stopping example early.\n"); + logosdelivery_destroy(ctx, simple_callback, (void *)"destroy"); + return 1; + } + + printf("\n2. Setting up event callback...\n"); + logosdelivery_set_event_callback(ctx, event_callback, NULL); + printf("Event callback registered for message events\n"); + + printf("\n3. Starting node...\n"); + logosdelivery_start_node(ctx, simple_callback, (void *)"start_node"); + + // Wait for node to start + sleep(5); + + printf("\n4. Subscribing to content topic...\n"); + const char *contentTopic = "/example/1/chat/proto"; + logosdelivery_subscribe(ctx, simple_callback, (void *)"subscribe", contentTopic); + + // Wait for subscription + sleep(1); + + printf("\n5. Retrieving all possibl node info ids...\n"); + logosdelivery_get_available_node_info_ids(ctx, simple_callback, (void *)"get_available_node_info_ids"); + + printf("\nRetrieving node info for a specific invalid ID...\n"); + logosdelivery_get_node_info(ctx, simple_callback, (void *)"get_node_info", "WrongNodeInfoId"); + + printf("\nRetrieving several node info for specific correct IDs...\n"); + logosdelivery_get_node_info(ctx, simple_callback, (void *)"get_node_info", "Version"); + // logosdelivery_get_node_info(ctx, simple_callback, (void *)"get_node_info", "Metrics"); + logosdelivery_get_node_info(ctx, simple_callback, (void *)"get_node_info", "MyMultiaddresses"); + logosdelivery_get_node_info(ctx, simple_callback, (void *)"get_node_info", "MyENR"); + logosdelivery_get_node_info(ctx, simple_callback, (void *)"get_node_info", "MyPeerId"); + + printf("\nRetrieving available configs...\n"); + logosdelivery_get_available_configs(ctx, simple_callback, (void *)"get_available_configs"); + + printf("\n6. Sending a message...\n"); + printf("Watch for message events (sent, propagated, or error):\n"); + // Create base64-encoded payload: "Hello, Logos Messaging!" + const char *message = "{" + "\"contentTopic\": \"/example/1/chat/proto\"," + "\"payload\": \"SGVsbG8sIExvZ29zIE1lc3NhZ2luZyE=\"," + "\"ephemeral\": false" + "}"; + logosdelivery_send(ctx, simple_callback, (void *)"send", message); + + // Poll for terminal message events (sent, error, or received) with timeout + printf("Waiting for message delivery events...\n"); + int timeout_sec = 60; + int elapsed = 0; + while (!(got_message_sent || got_message_error || got_message_received) + && elapsed < timeout_sec) { + usleep(100000); // 100ms + elapsed++; + } + if (elapsed >= timeout_sec) { + printf("Timed out waiting for message events after %d seconds\n", timeout_sec); + } + + printf("\n7. Unsubscribing from content topic...\n"); + logosdelivery_unsubscribe(ctx, simple_callback, (void *)"unsubscribe", contentTopic); + + sleep(1); + + printf("\n8. Stopping node...\n"); + logosdelivery_stop_node(ctx, simple_callback, (void *)"stop_node"); + + sleep(1); + + printf("\n9. Destroying context...\n"); + logosdelivery_destroy(ctx, simple_callback, (void *)"destroy"); + + printf("\n=== Example completed ===\n"); + return 0; +} diff --git a/liblogosdelivery/json_event.nim b/liblogosdelivery/json_event.nim new file mode 100644 index 000000000..389e29120 --- /dev/null +++ b/liblogosdelivery/json_event.nim @@ -0,0 +1,27 @@ +import std/[json, macros] + +type JsonEvent*[T] = ref object + eventType*: string + payload*: T + +macro toFlatJson*(event: JsonEvent): JsonNode = + ## Serializes JsonEvent[T] to flat JSON with eventType first, + ## followed by all fields from T's payload + result = quote: + var jsonObj = newJObject() + jsonObj["eventType"] = %`event`.eventType + + # Serialize payload fields into the same object (flattening) + let payloadJson = %`event`.payload + for key, val in payloadJson.pairs: + jsonObj[key] = val + + jsonObj + +proc `$`*[T](event: JsonEvent[T]): string = + $toFlatJson(event) + +proc newJsonEvent*[T](eventType: string, payload: T): JsonEvent[T] = + ## Creates a new JsonEvent with the given eventType and payload. + ## The payload's fields will be flattened into the JSON output. + JsonEvent[T](eventType: eventType, payload: payload) diff --git a/liblogosdelivery/liblogosdelivery.h b/liblogosdelivery/liblogosdelivery.h new file mode 100644 index 000000000..5092db9f2 --- /dev/null +++ b/liblogosdelivery/liblogosdelivery.h @@ -0,0 +1,100 @@ + +// Generated manually and inspired by libwaku.h +// Header file for Logos Messaging API (LMAPI) library +#pragma once +#ifndef __liblogosdelivery__ +#define __liblogosdelivery__ + +#include +#include + +// The possible returned values for the functions that return int +#define RET_OK 0 +#define RET_ERR 1 +#define RET_MISSING_CALLBACK 2 + +#ifdef __cplusplus +extern "C" +{ +#endif + + typedef void (*FFICallBack)(int callerRet, const char *msg, size_t len, void *userData); + + // Creates a new instance of the node from the given configuration JSON. + // Returns a pointer to the Context needed by the rest of the API functions. + // Configuration should be in JSON format using WakuNodeConf field names. + // Field names match Nim identifiers from WakuNodeConf (camelCase). + // Example: {"mode": "Core", "clusterId": 42, "relay": true} + void *logosdelivery_create_node( + const char *configJson, + FFICallBack callback, + void *userData); + + // Starts the node. + int logosdelivery_start_node(void *ctx, + FFICallBack callback, + void *userData); + + // Stops the node. + int logosdelivery_stop_node(void *ctx, + FFICallBack callback, + void *userData); + + // Destroys an instance of a node created with logosdelivery_create_node + int logosdelivery_destroy(void *ctx, + FFICallBack callback, + void *userData); + + // Subscribe to a content topic. + // contentTopic: string representing the content topic (e.g., "/myapp/1/chat/proto") + int logosdelivery_subscribe(void *ctx, + FFICallBack callback, + void *userData, + const char *contentTopic); + + // Unsubscribe from a content topic. + int logosdelivery_unsubscribe(void *ctx, + FFICallBack callback, + void *userData, + const char *contentTopic); + + // Send a message. + // messageJson: JSON string with the following structure: + // { + // "contentTopic": "/myapp/1/chat/proto", + // "payload": "base64-encoded-payload", + // "ephemeral": false + // } + // Returns a request ID that can be used to track the message delivery. + int logosdelivery_send(void *ctx, + FFICallBack callback, + void *userData, + const char *messageJson); + + // Sets a callback that will be invoked whenever an event occurs. + // It is crucial that the passed callback is fast, non-blocking and potentially thread-safe. + void logosdelivery_set_event_callback(void *ctx, + FFICallBack callback, + void *userData); + + // Retrieves the list of available node info IDs. + int logosdelivery_get_available_node_info_ids(void *ctx, + FFICallBack callback, + void *userData); + + // Given a node info ID, retrieves the corresponding info. + int logosdelivery_get_node_info(void *ctx, + FFICallBack callback, + void *userData, + const char *nodeInfoId); + + // Retrieves the list of available configurations. + int logosdelivery_get_available_configs(void *ctx, + FFICallBack callback, + void *userData); + +#ifdef __cplusplus +} +#endif + +#endif /* __liblogosdelivery__ */ diff --git a/liblogosdelivery/liblogosdelivery.nim b/liblogosdelivery/liblogosdelivery.nim new file mode 100644 index 000000000..fc907498a --- /dev/null +++ b/liblogosdelivery/liblogosdelivery.nim @@ -0,0 +1,11 @@ +import std/[atomics, options] +import chronicles, chronos, chronos/threadsync, ffi +import waku/factory/waku, waku/node/waku_node, ./declare_lib + +################################################################################ +## Include different APIs, i.e. all procs with {.ffi.} pragma + +include + ./logos_delivery_api/node_api, + ./logos_delivery_api/messaging_api, + ./logos_delivery_api/debug_api diff --git a/liblogosdelivery/logos_delivery_api/debug_api.nim b/liblogosdelivery/logos_delivery_api/debug_api.nim new file mode 100644 index 000000000..623b3b08f --- /dev/null +++ b/liblogosdelivery/logos_delivery_api/debug_api.nim @@ -0,0 +1,54 @@ +import std/[json, strutils] +import waku/factory/waku_state_info +import tools/confutils/[cli_args, config_option_meta] + +proc logosdelivery_get_available_node_info_ids( + ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer +) {.ffi.} = + ## Returns the list of all available node info item ids that + ## can be queried with `get_node_info_item`. + requireInitializedNode(ctx, "GetNodeInfoIds"): + return err(errMsg) + + return ok($ctx.myLib[].stateInfo.getAllPossibleInfoItemIds()) + +proc logosdelivery_get_node_info( + ctx: ptr FFIContext[Waku], + callback: FFICallBack, + userData: pointer, + nodeInfoId: cstring, +) {.ffi.} = + ## Returns the content of the node info item with the given id if it exists. + requireInitializedNode(ctx, "GetNodeInfoItem"): + return err(errMsg) + + let infoItemIdEnum = + try: + parseEnum[NodeInfoId]($nodeInfoId) + except ValueError: + return err("Invalid node info id: " & $nodeInfoId) + + return ok(ctx.myLib[].stateInfo.getNodeInfoItem(infoItemIdEnum)) + +proc logosdelivery_get_available_configs( + ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer +) {.ffi.} = + ## Returns information about the accepted config items. + requireInitializedNode(ctx, "GetAvailableConfigs"): + return err(errMsg) + + let optionMetas: seq[ConfigOptionMeta] = extractConfigOptionMeta(WakuNodeConf) + var configOptionDetails = newJArray() + + # for confField, confValue in fieldPairs(conf): + # defaultConfig[confField] = $confValue + + for meta in optionMetas: + configOptionDetails.add( + %*{meta.fieldName: meta.typeName & "(" & meta.defaultValue & ")", "desc": meta.desc} + ) + + var jsonNode = newJObject() + jsonNode["configOptions"] = configOptionDetails + let asString = pretty(jsonNode) + return ok(pretty(jsonNode)) diff --git a/liblogosdelivery/logos_delivery_api/messaging_api.nim b/liblogosdelivery/logos_delivery_api/messaging_api.nim new file mode 100644 index 000000000..cb2771034 --- /dev/null +++ b/liblogosdelivery/logos_delivery_api/messaging_api.nim @@ -0,0 +1,91 @@ +import std/[json] +import chronos, results, ffi +import stew/byteutils +import + waku/common/base64, + waku/factory/waku, + waku/waku_core/topics/content_topic, + waku/api/[api, types], + ../declare_lib + +proc logosdelivery_subscribe( + ctx: ptr FFIContext[Waku], + callback: FFICallBack, + userData: pointer, + contentTopicStr: cstring, +) {.ffi.} = + requireInitializedNode(ctx, "Subscribe"): + return err(errMsg) + + # ContentTopic is just a string type alias + let contentTopic = ContentTopic($contentTopicStr) + + (await api.subscribe(ctx.myLib[], contentTopic)).isOkOr: + let errMsg = $error + return err("Subscribe failed: " & errMsg) + + return ok("") + +proc logosdelivery_unsubscribe( + ctx: ptr FFIContext[Waku], + callback: FFICallBack, + userData: pointer, + contentTopicStr: cstring, +) {.ffi.} = + requireInitializedNode(ctx, "Unsubscribe"): + return err(errMsg) + + # ContentTopic is just a string type alias + let contentTopic = ContentTopic($contentTopicStr) + + api.unsubscribe(ctx.myLib[], contentTopic).isOkOr: + let errMsg = $error + return err("Unsubscribe failed: " & errMsg) + + return ok("") + +proc logosdelivery_send( + ctx: ptr FFIContext[Waku], + callback: FFICallBack, + userData: pointer, + messageJson: cstring, +) {.ffi.} = + requireInitializedNode(ctx, "Send"): + return err(errMsg) + + ## Parse the message JSON and send the message + var jsonNode: JsonNode + try: + jsonNode = parseJson($messageJson) + except Exception as e: + return err("Failed to parse message JSON: " & e.msg) + + # Extract content topic + if not jsonNode.hasKey("contentTopic"): + return err("Missing contentTopic field") + + # ContentTopic is just a string type alias + let contentTopic = ContentTopic(jsonNode["contentTopic"].getStr()) + + # Extract payload (expect base64 encoded string) + if not jsonNode.hasKey("payload"): + return err("Missing payload field") + + let payloadStr = jsonNode["payload"].getStr() + let payload = base64.decode(Base64String(payloadStr)).valueOr: + return err("invalid payload format: " & error) + + # Extract ephemeral flag + let ephemeral = jsonNode.getOrDefault("ephemeral").getBool(false) + + # Create message envelope + let envelope = MessageEnvelope.init( + contentTopic = contentTopic, payload = payload, ephemeral = ephemeral + ) + + # Send the message + let requestId = (await api.send(ctx.myLib[], envelope)).valueOr: + let errMsg = $error + return err("Send failed: " & errMsg) + + return ok($requestId) diff --git a/liblogosdelivery/logos_delivery_api/node_api.nim b/liblogosdelivery/logos_delivery_api/node_api.nim new file mode 100644 index 000000000..cd644abd7 --- /dev/null +++ b/liblogosdelivery/logos_delivery_api/node_api.nim @@ -0,0 +1,169 @@ +import std/[json, strutils] +import chronos, chronicles, results, confutils, confutils/std/net, ffi +import + waku/factory/waku, + waku/node/waku_node, + waku/api/[api, types], + waku/events/[message_events, health_events], + tools/confutils/cli_args, + ../declare_lib, + ../json_event + +# Add JSON serialization for RequestId +proc `%`*(id: RequestId): JsonNode = + %($id) + +registerReqFFI(CreateNodeRequest, ctx: ptr FFIContext[Waku]): + proc(configJson: cstring): Future[Result[string, string]] {.async.} = + ## Parse the JSON configuration using fieldPairs approach (WakuNodeConf) + var conf = defaultWakuNodeConf().valueOr: + return err("Failed creating default conf: " & error) + + var jsonNode: JsonNode + try: + jsonNode = parseJson($configJson) + except Exception: + return err( + "Failed to parse config JSON: " & getCurrentExceptionMsg() & + " configJson string: " & $configJson + ) + + for confField, confValue in fieldPairs(conf): + if jsonNode.contains(confField): + let formattedString = ($jsonNode[confField]).strip(chars = {'\"'}) + try: + confValue = parseCmdArg(typeof(confValue), formattedString) + except Exception: + return err( + "Failed to parse field '" & confField & "': " & getCurrentExceptionMsg() & + ". Value: " & formattedString + ) + + # Create the node + ctx.myLib[] = (await api.createNode(conf)).valueOr: + let errMsg = $error + chronicles.error "CreateNodeRequest failed", err = errMsg + return err(errMsg) + + return ok("") + +proc logosdelivery_destroy( + ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer +): cint {.dynlib, exportc, cdecl.} = + initializeLibrary() + checkParams(ctx, callback, userData) + + ffi.destroyFFIContext(ctx).isOkOr: + let msg = "liblogosdelivery error: " & $error + callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) + return RET_ERR + + ## always need to invoke the callback although we don't retrieve value to the caller + callback(RET_OK, nil, 0, userData) + + return RET_OK + +proc logosdelivery_create_node( + configJson: cstring, callback: FFICallback, userData: pointer +): pointer {.dynlib, exportc, cdecl.} = + initializeLibrary() + + if isNil(callback): + echo "error: missing callback in logosdelivery_create_node" + return nil + + var ctx = ffi.createFFIContext[Waku]().valueOr: + let msg = "Error in createFFIContext: " & $error + callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) + return nil + + ctx.userData = userData + + ffi.sendRequestToFFIThread( + ctx, CreateNodeRequest.ffiNewReq(callback, userData, configJson) + ).isOkOr: + let msg = "error in sendRequestToFFIThread: " & $error + callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) + # free allocated resources as they won't be available + ffi.destroyFFIContext(ctx).isOkOr: + chronicles.error "Error in destroyFFIContext after sendRequestToFFIThread during creation", + err = $error + return nil + + return ctx + +proc logosdelivery_start_node( + ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer +) {.ffi.} = + requireInitializedNode(ctx, "START_NODE"): + return err(errMsg) + + # setting up outgoing event listeners + let sentListener = MessageSentEvent.listen( + ctx.myLib[].brokerCtx, + proc(event: MessageSentEvent) {.async: (raises: []).} = + callEventCallback(ctx, "onMessageSent"): + $newJsonEvent("message_sent", event), + ).valueOr: + chronicles.error "MessageSentEvent.listen failed", err = $error + return err("MessageSentEvent.listen failed: " & $error) + + let errorListener = MessageErrorEvent.listen( + ctx.myLib[].brokerCtx, + proc(event: MessageErrorEvent) {.async: (raises: []).} = + callEventCallback(ctx, "onMessageError"): + $newJsonEvent("message_error", event), + ).valueOr: + chronicles.error "MessageErrorEvent.listen failed", err = $error + return err("MessageErrorEvent.listen failed: " & $error) + + let propagatedListener = MessagePropagatedEvent.listen( + ctx.myLib[].brokerCtx, + proc(event: MessagePropagatedEvent) {.async: (raises: []).} = + callEventCallback(ctx, "onMessagePropagated"): + $newJsonEvent("message_propagated", event), + ).valueOr: + chronicles.error "MessagePropagatedEvent.listen failed", err = $error + return err("MessagePropagatedEvent.listen failed: " & $error) + + let receivedListener = MessageReceivedEvent.listen( + ctx.myLib[].brokerCtx, + proc(event: MessageReceivedEvent) {.async: (raises: []).} = + callEventCallback(ctx, "onMessageReceived"): + $newJsonEvent("message_received", event), + ).valueOr: + chronicles.error "MessageReceivedEvent.listen failed", err = $error + return err("MessageReceivedEvent.listen failed: " & $error) + + let ConnectionStatusChangeListener = EventConnectionStatusChange.listen( + ctx.myLib[].brokerCtx, + proc(event: EventConnectionStatusChange) {.async: (raises: []).} = + callEventCallback(ctx, "onConnectionStatusChange"): + $newJsonEvent("connection_status_change", event), + ).valueOr: + chronicles.error "ConnectionStatusChange.listen failed", err = $error + return err("ConnectionStatusChange.listen failed: " & $error) + + (await startWaku(addr ctx.myLib[])).isOkOr: + let errMsg = $error + chronicles.error "START_NODE failed", err = errMsg + return err("failed to start: " & errMsg) + return ok("") + +proc logosdelivery_stop_node( + ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer +) {.ffi.} = + requireInitializedNode(ctx, "STOP_NODE"): + return err(errMsg) + + MessageErrorEvent.dropAllListeners(ctx.myLib[].brokerCtx) + MessageSentEvent.dropAllListeners(ctx.myLib[].brokerCtx) + MessagePropagatedEvent.dropAllListeners(ctx.myLib[].brokerCtx) + MessageReceivedEvent.dropAllListeners(ctx.myLib[].brokerCtx) + EventConnectionStatusChange.dropAllListeners(ctx.myLib[].brokerCtx) + + (await ctx.myLib[].stop()).isOkOr: + let errMsg = $error + chronicles.error "STOP_NODE failed", err = errMsg + return err("failed to stop: " & errMsg) + return ok("") diff --git a/liblogosdelivery/nim.cfg b/liblogosdelivery/nim.cfg new file mode 100644 index 000000000..3fd5adb32 --- /dev/null +++ b/liblogosdelivery/nim.cfg @@ -0,0 +1,27 @@ +# Nim configuration for liblogosdelivery + +# Ensure correct compiler configuration +--gc: + refc +--threads: + on + +# Include paths +--path: + "../vendor/nim-ffi" +--path: + "../" + +# Optimization and debugging +--opt: + speed +--debugger: + native + +# Export symbols for dynamic library +--app: + lib +--noMain + +# Enable FFI macro features when needed for debugging +# --define:ffiDumpMacros diff --git a/library/alloc.nim b/library/alloc.nim deleted file mode 100644 index 1a6f118b5..000000000 --- a/library/alloc.nim +++ /dev/null @@ -1,42 +0,0 @@ -## Can be shared safely between threads -type SharedSeq*[T] = tuple[data: ptr UncheckedArray[T], len: int] - -proc alloc*(str: cstring): cstring = - # Byte allocation from the given address. - # There should be the corresponding manual deallocation with deallocShared ! - if str.isNil(): - var ret = cast[cstring](allocShared(1)) # Allocate memory for the null terminator - ret[0] = '\0' # Set the null terminator - return ret - - let ret = cast[cstring](allocShared(len(str) + 1)) - copyMem(ret, str, len(str) + 1) - return ret - -proc alloc*(str: string): cstring = - ## Byte allocation from the given address. - ## There should be the corresponding manual deallocation with deallocShared ! - var ret = cast[cstring](allocShared(str.len + 1)) - let s = cast[seq[char]](str) - for i in 0 ..< str.len: - ret[i] = s[i] - ret[str.len] = '\0' - return ret - -proc allocSharedSeq*[T](s: seq[T]): SharedSeq[T] = - let data = allocShared(sizeof(T) * s.len) - if s.len != 0: - copyMem(data, unsafeAddr s[0], s.len) - return (cast[ptr UncheckedArray[T]](data), s.len) - -proc deallocSharedSeq*[T](s: var SharedSeq[T]) = - deallocShared(s.data) - s.len = 0 - -proc toSeq*[T](s: SharedSeq[T]): seq[T] = - ## Creates a seq[T] from a SharedSeq[T]. No explicit dealloc is required - ## as req[T] is a GC managed type. - var ret = newSeq[T]() - for i in 0 ..< s.len: - ret.add(s.data[i]) - return ret diff --git a/library/declare_lib.nim b/library/declare_lib.nim new file mode 100644 index 000000000..188de8549 --- /dev/null +++ b/library/declare_lib.nim @@ -0,0 +1,10 @@ +import ffi +import waku/factory/waku + +declareLibrary("waku") + +proc set_event_callback( + ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer +) {.dynlib, exportc, cdecl.} = + ctx[].eventCallback = cast[pointer](callback) + ctx[].eventUserData = userData diff --git a/library/events/json_connection_status_change_event.nim b/library/events/json_connection_status_change_event.nim new file mode 100644 index 000000000..347a84c48 --- /dev/null +++ b/library/events/json_connection_status_change_event.nim @@ -0,0 +1,19 @@ +{.push raises: [].} + +import system, std/json +import ./json_base_event +import ../../waku/api/types + +type JsonConnectionStatusChangeEvent* = ref object of JsonEvent + status*: ConnectionStatus + +proc new*( + T: type JsonConnectionStatusChangeEvent, status: ConnectionStatus +): T = + return JsonConnectionStatusChangeEvent( + eventType: "node_health_change", + status: status + ) + +method `$`*(event: JsonConnectionStatusChangeEvent): string = + $(%*event) diff --git a/library/events/json_waku_not_responding_event.nim b/library/events/json_waku_not_responding_event.nim deleted file mode 100644 index 1e1d5fcc5..000000000 --- a/library/events/json_waku_not_responding_event.nim +++ /dev/null @@ -1,9 +0,0 @@ -import system, std/json, ./json_base_event - -type JsonWakuNotRespondingEvent* = ref object of JsonEvent - -proc new*(T: type JsonWakuNotRespondingEvent): T = - return JsonWakuNotRespondingEvent(eventType: "waku_not_responding") - -method `$`*(event: JsonWakuNotRespondingEvent): string = - $(%*event) diff --git a/library/ffi_types.nim b/library/ffi_types.nim deleted file mode 100644 index a5eeb9711..000000000 --- a/library/ffi_types.nim +++ /dev/null @@ -1,30 +0,0 @@ -################################################################################ -### Exported types - -type WakuCallBack* = proc( - callerRet: cint, msg: ptr cchar, len: csize_t, userData: pointer -) {.cdecl, gcsafe, raises: [].} - -const RET_OK*: cint = 0 -const RET_ERR*: cint = 1 -const RET_MISSING_CALLBACK*: cint = 2 - -### End of exported types -################################################################################ - -################################################################################ -### FFI utils - -template foreignThreadGc*(body: untyped) = - when declared(setupForeignThreadGc): - setupForeignThreadGc() - - body - - when declared(tearDownForeignThreadGc): - tearDownForeignThreadGc() - -type onDone* = proc() - -### End of FFI utils -################################################################################ diff --git a/library/ios_bearssl_stubs.c b/library/ios_bearssl_stubs.c new file mode 100644 index 000000000..a028cdf25 --- /dev/null +++ b/library/ios_bearssl_stubs.c @@ -0,0 +1,32 @@ +/** + * iOS stubs for BearSSL tools functions not normally included in the library. + * These are typically from the BearSSL tools/ directory which is for CLI tools. + */ + +#include + +/* x509_noanchor context - simplified stub */ +typedef struct { + void *vtable; + void *inner; +} x509_noanchor_context; + +/* Stub for x509_noanchor_init - used to skip anchor validation */ +void x509_noanchor_init(x509_noanchor_context *xwc, const void **inner) { + if (xwc && inner) { + xwc->inner = (void*)*inner; + xwc->vtable = NULL; + } +} + +/* TAs (Trust Anchors) - empty array stub */ +/* This is typically defined by applications with their CA certificates */ +typedef struct { + void *dn; + size_t dn_len; + unsigned flags; + void *pkey; +} br_x509_trust_anchor; + +const br_x509_trust_anchor TAs[1] = {{0}}; +const size_t TAs_NUM = 0; diff --git a/library/ios_natpmp_stubs.c b/library/ios_natpmp_stubs.c new file mode 100644 index 000000000..ef635db10 --- /dev/null +++ b/library/ios_natpmp_stubs.c @@ -0,0 +1,14 @@ +/** + * iOS stub for getgateway.c functions. + * iOS doesn't have net/route.h, so we provide a stub that returns failure. + * NAT-PMP functionality won't work but the library will link. + */ + +#include +#include + +/* getdefaultgateway - returns -1 (failure) on iOS */ +int getdefaultgateway(in_addr_t *addr) { + (void)addr; /* unused */ + return -1; /* failure - not supported on iOS */ +} diff --git a/library/kernel_api/debug_node_api.nim b/library/kernel_api/debug_node_api.nim new file mode 100644 index 000000000..9d5a7f134 --- /dev/null +++ b/library/kernel_api/debug_node_api.nim @@ -0,0 +1,50 @@ +import std/json +import + chronicles, + chronos, + results, + eth/p2p/discoveryv5/enr, + strutils, + libp2p/peerid, + metrics, + ffi +import + waku/factory/waku, waku/node/waku_node, waku/node/health_monitor, library/declare_lib + +proc getMultiaddresses(node: WakuNode): seq[string] = + return node.info().listenAddresses + +proc getMetrics(): string = + {.gcsafe.}: + return defaultRegistry.toText() ## defaultRegistry is {.global.} in metrics module + +proc waku_version( + ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer +) {.ffi.} = + return ok(WakuNodeVersionString) + +proc waku_listen_addresses( + ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer +) {.ffi.} = + ## returns a comma-separated string of the listen addresses + return ok(ctx.myLib[].node.getMultiaddresses().join(",")) + +proc waku_get_my_enr( + ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer +) {.ffi.} = + return ok(ctx.myLib[].node.enr.toURI()) + +proc waku_get_my_peerid( + ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer +) {.ffi.} = + return ok($ctx.myLib[].node.peerId()) + +proc waku_get_metrics( + ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer +) {.ffi.} = + return ok(getMetrics()) + +proc waku_is_online( + ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer +) {.ffi.} = + return ok($ctx.myLib[].healthMonitor.onlineMonitor.amIOnline()) diff --git a/library/kernel_api/discovery_api.nim b/library/kernel_api/discovery_api.nim new file mode 100644 index 000000000..f61b7bad1 --- /dev/null +++ b/library/kernel_api/discovery_api.nim @@ -0,0 +1,96 @@ +import std/json +import chronos, chronicles, results, strutils, libp2p/multiaddress, ffi +import + waku/factory/waku, + waku/discovery/waku_dnsdisc, + waku/discovery/waku_discv5, + waku/waku_core/peers, + waku/node/waku_node, + waku/node/kernel_api, + library/declare_lib + +proc retrieveBootstrapNodes( + enrTreeUrl: string, ipDnsServer: string +): Future[Result[seq[string], string]] {.async.} = + let dnsNameServers = @[parseIpAddress(ipDnsServer)] + let discoveredPeers: seq[RemotePeerInfo] = ( + await retrieveDynamicBootstrapNodes(enrTreeUrl, dnsNameServers) + ).valueOr: + return err("failed discovering peers from DNS: " & $error) + + var multiAddresses = newSeq[string]() + + for discPeer in discoveredPeers: + for address in discPeer.addrs: + multiAddresses.add($address & "/p2p/" & $discPeer) + + return ok(multiAddresses) + +proc updateDiscv5BootstrapNodes(nodes: string, waku: Waku): Result[void, string] = + waku.wakuDiscv5.updateBootstrapRecords(nodes).isOkOr: + return err("error in updateDiscv5BootstrapNodes: " & $error) + return ok() + +proc performPeerExchangeRequestTo*( + numPeers: uint64, waku: Waku +): Future[Result[int, string]] {.async.} = + let numPeersRecv = (await waku.node.fetchPeerExchangePeers(numPeers)).valueOr: + return err($error) + return ok(numPeersRecv) + +proc waku_discv5_update_bootnodes( + ctx: ptr FFIContext[Waku], + callback: FFICallBack, + userData: pointer, + bootnodes: cstring, +) {.ffi.} = + ## Updates the bootnode list used for discovering new peers via DiscoveryV5 + ## bootnodes - JSON array containing the bootnode ENRs i.e. `["enr:...", "enr:..."]` + + updateDiscv5BootstrapNodes($bootnodes, ctx.myLib[]).isOkOr: + error "UPDATE_DISCV5_BOOTSTRAP_NODES failed", error = error + return err($error) + + return ok("discovery request processed correctly") + +proc waku_dns_discovery( + ctx: ptr FFIContext[Waku], + callback: FFICallBack, + userData: pointer, + enrTreeUrl: cstring, + nameDnsServer: cstring, + timeoutMs: cint, +) {.ffi.} = + let nodes = (await retrieveBootstrapNodes($enrTreeUrl, $nameDnsServer)).valueOr: + error "GET_BOOTSTRAP_NODES failed", error = error + return err($error) + + ## returns a comma-separated string of bootstrap nodes' multiaddresses + return ok(nodes.join(",")) + +proc waku_start_discv5( + ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer +) {.ffi.} = + (await ctx.myLib[].wakuDiscv5.start()).isOkOr: + error "START_DISCV5 failed", error = error + return err("error starting discv5: " & $error) + + return ok("discv5 started correctly") + +proc waku_stop_discv5( + ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer +) {.ffi.} = + await ctx.myLib[].wakuDiscv5.stop() + return ok("discv5 stopped correctly") + +proc waku_peer_exchange_request( + ctx: ptr FFIContext[Waku], + callback: FFICallBack, + userData: pointer, + numPeers: uint64, +) {.ffi.} = + let numValidPeers = (await performPeerExchangeRequestTo(numPeers, ctx.myLib[])).valueOr: + error "waku_peer_exchange_request failed", error = error + return err("failed peer exchange: " & $error) + + return ok($numValidPeers) diff --git a/library/waku_thread_requests/requests/node_lifecycle_request.nim b/library/kernel_api/node_lifecycle_api.nim similarity index 60% rename from library/waku_thread_requests/requests/node_lifecycle_request.nim rename to library/kernel_api/node_lifecycle_api.nim index aa71ac6bb..8f3e99b24 100644 --- a/library/waku_thread_requests/requests/node_lifecycle_request.nim +++ b/library/kernel_api/node_lifecycle_api.nim @@ -1,43 +1,14 @@ import std/[options, json, strutils, net] -import chronos, chronicles, results, confutils, confutils/std/net +import chronos, chronicles, results, confutils, confutils/std/net, ffi import waku/node/peer_manager/peer_manager, tools/confutils/cli_args, waku/factory/waku, waku/factory/node_factory, - waku/factory/networks_config, waku/factory/app_callbacks, - waku/rest_api/endpoint/builder - -import - ../../alloc - -type NodeLifecycleMsgType* = enum - CREATE_NODE - START_NODE - STOP_NODE - -type NodeLifecycleRequest* = object - operation: NodeLifecycleMsgType - configJson: cstring ## Only used in 'CREATE_NODE' operation - appCallbacks: AppCallbacks - -proc createShared*( - T: type NodeLifecycleRequest, - op: NodeLifecycleMsgType, - configJson: cstring = "", - appCallbacks: AppCallbacks = nil, -): ptr type T = - var ret = createShared(T) - ret[].operation = op - ret[].appCallbacks = appCallbacks - ret[].configJson = configJson.alloc() - return ret - -proc destroyShared(self: ptr NodeLifecycleRequest) = - deallocShared(self[].configJson) - deallocShared(self) + waku/rest_api/endpoint/builder, + library/declare_lib proc createWaku( configJson: cstring, appCallbacks: AppCallbacks = nil @@ -87,26 +58,28 @@ proc createWaku( return ok(wakuRes) -proc process*( - self: ptr NodeLifecycleRequest, waku: ptr Waku -): Future[Result[string, string]] {.async.} = - defer: - destroyShared(self) - - case self.operation - of CREATE_NODE: - waku[] = (await createWaku(self.configJson, self.appCallbacks)).valueOr: - error "CREATE_NODE failed", error = error +registerReqFFI(CreateNodeRequest, ctx: ptr FFIContext[Waku]): + proc( + configJson: cstring, appCallbacks: AppCallbacks + ): Future[Result[string, string]] {.async.} = + ctx.myLib[] = (await createWaku(configJson, cast[AppCallbacks](appCallbacks))).valueOr: + error "CreateNodeRequest failed", error = error return err($error) - of START_NODE: - (await waku.startWaku()).isOkOr: - error "START_NODE failed", error = error - return err($error) - of STOP_NODE: - try: - await waku[].stop() - except Exception: - error "STOP_NODE failed", error = getCurrentExceptionMsg() - return err(getCurrentExceptionMsg()) + return ok("") + +proc waku_start( + ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer +) {.ffi.} = + (await startWaku(ctx[].myLib)).isOkOr: + error "START_NODE failed", error = error + return err("failed to start: " & $error) + return ok("") + +proc waku_stop( + ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer +) {.ffi.} = + (await ctx.myLib[].stop()).isOkOr: + error "STOP_NODE failed", error = error + return err("failed to stop: " & $error) return ok("") diff --git a/library/kernel_api/peer_manager_api.nim b/library/kernel_api/peer_manager_api.nim new file mode 100644 index 000000000..f0ae37f00 --- /dev/null +++ b/library/kernel_api/peer_manager_api.nim @@ -0,0 +1,123 @@ +import std/[sequtils, strutils, tables] +import chronicles, chronos, results, options, json, ffi +import waku/factory/waku, waku/node/waku_node, waku/node/peer_manager, ../declare_lib + +type PeerInfo = object + protocols: seq[string] + addresses: seq[string] + +proc waku_get_peerids_from_peerstore( + ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer +) {.ffi.} = + ## returns a comma-separated string of peerIDs + let peerIDs = + ctx.myLib[].node.peerManager.switch.peerStore.peers().mapIt($it.peerId).join(",") + return ok(peerIDs) + +proc waku_connect( + ctx: ptr FFIContext[Waku], + callback: FFICallBack, + userData: pointer, + peerMultiAddr: cstring, + timeoutMs: cuint, +) {.ffi.} = + let peers = ($peerMultiAddr).split(",").mapIt(strip(it)) + await ctx.myLib[].node.connectToNodes(peers, source = "static") + return ok("") + +proc waku_disconnect_peer_by_id( + ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer, peerId: cstring +) {.ffi.} = + let pId = PeerId.init($peerId).valueOr: + error "DISCONNECT_PEER_BY_ID failed", error = $error + return err($error) + await ctx.myLib[].node.peerManager.disconnectNode(pId) + return ok("") + +proc waku_disconnect_all_peers( + ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer +) {.ffi.} = + await ctx.myLib[].node.peerManager.disconnectAllPeers() + return ok("") + +proc waku_dial_peer( + ctx: ptr FFIContext[Waku], + callback: FFICallBack, + userData: pointer, + peerMultiAddr: cstring, + protocol: cstring, + timeoutMs: cuint, +) {.ffi.} = + let remotePeerInfo = parsePeerInfo($peerMultiAddr).valueOr: + error "DIAL_PEER failed", error = $error + return err($error) + let conn = await ctx.myLib[].node.peerManager.dialPeer(remotePeerInfo, $protocol) + if conn.isNone(): + let msg = "failed dialing peer" + error "DIAL_PEER failed", error = msg, peerId = $remotePeerInfo.peerId + return err(msg) + return ok("") + +proc waku_dial_peer_by_id( + ctx: ptr FFIContext[Waku], + callback: FFICallBack, + userData: pointer, + peerId: cstring, + protocol: cstring, + timeoutMs: cuint, +) {.ffi.} = + let pId = PeerId.init($peerId).valueOr: + error "DIAL_PEER_BY_ID failed", error = $error + return err($error) + let conn = await ctx.myLib[].node.peerManager.dialPeer(pId, $protocol) + if conn.isNone(): + let msg = "failed dialing peer" + error "DIAL_PEER_BY_ID failed", error = msg, peerId = $peerId + return err(msg) + + return ok("") + +proc waku_get_connected_peers_info( + ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer +) {.ffi.} = + ## returns a JSON string mapping peerIDs to objects with protocols and addresses + + var peersMap = initTable[string, PeerInfo]() + let peers = ctx.myLib[].node.peerManager.switch.peerStore.peers().filterIt( + it.connectedness == Connected + ) + + # Build a map of peer IDs to peer info objects + for peer in peers: + let peerIdStr = $peer.peerId + peersMap[peerIdStr] = + PeerInfo(protocols: peer.protocols, addresses: peer.addrs.mapIt($it)) + + # Convert the map to JSON string + let jsonObj = %*peersMap + let jsonStr = $jsonObj + return ok(jsonStr) + +proc waku_get_connected_peers( + ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer +) {.ffi.} = + ## returns a comma-separated string of peerIDs + let + (inPeerIds, outPeerIds) = ctx.myLib[].node.peerManager.connectedPeers() + connectedPeerids = concat(inPeerIds, outPeerIds) + + return ok(connectedPeerids.mapIt($it).join(",")) + +proc waku_get_peerids_by_protocol( + ctx: ptr FFIContext[Waku], + callback: FFICallBack, + userData: pointer, + protocol: cstring, +) {.ffi.} = + ## returns a comma-separated string of peerIDs that mount the given protocol + let connectedPeers = ctx.myLib[].node.peerManager.switch.peerStore + .peers($protocol) + .filterIt(it.connectedness == Connected) + .mapIt($it.peerId) + .join(",") + return ok(connectedPeers) diff --git a/library/kernel_api/ping_api.nim b/library/kernel_api/ping_api.nim new file mode 100644 index 000000000..4f10dcf59 --- /dev/null +++ b/library/kernel_api/ping_api.nim @@ -0,0 +1,43 @@ +import std/[json, strutils] +import chronos, results, ffi +import libp2p/[protocols/ping, switch, multiaddress, multicodec] +import waku/[factory/waku, waku_core/peers, node/waku_node], library/declare_lib + +proc waku_ping_peer( + ctx: ptr FFIContext[Waku], + callback: FFICallBack, + userData: pointer, + peerAddr: cstring, + timeoutMs: cuint, +) {.ffi.} = + let peerInfo = peers.parsePeerInfo(($peerAddr).split(",")).valueOr: + return err("PingRequest failed to parse peer addr: " & $error) + + let timeout = chronos.milliseconds(timeoutMs) + proc ping(): Future[Result[Duration, string]] {.async, gcsafe.} = + try: + let conn = + await ctx.myLib[].node.switch.dial(peerInfo.peerId, peerInfo.addrs, PingCodec) + defer: + await conn.close() + + let pingRTT = await ctx.myLib[].node.libp2pPing.ping(conn) + if pingRTT == 0.nanos: + return err("could not ping peer: rtt-0") + return ok(pingRTT) + except CatchableError as exc: + return err("could not ping peer: " & exc.msg) + + let pingFuture = ping() + let pingRTT: Duration = + if timeout == chronos.milliseconds(0): # No timeout expected + (await pingFuture).valueOr: + return err("ping failed, no timeout expected: " & error) + else: + let timedOut = not (await pingFuture.withTimeout(timeout)) + if timedOut: + return err("ping timed out") + pingFuture.read().valueOr: + return err("failed to read ping future: " & error) + + return ok($(pingRTT.nanos)) diff --git a/library/kernel_api/protocols/filter_api.nim b/library/kernel_api/protocols/filter_api.nim new file mode 100644 index 000000000..c4f99510a --- /dev/null +++ b/library/kernel_api/protocols/filter_api.nim @@ -0,0 +1,109 @@ +import options, std/[strutils, sequtils] +import chronicles, chronos, results, ffi +import + waku/waku_filter_v2/client, + waku/waku_core/message/message, + waku/factory/waku, + waku/waku_relay, + waku/waku_filter_v2/common, + waku/waku_core/subscription/push_handler, + waku/node/peer_manager/peer_manager, + waku/node/waku_node, + waku/node/kernel_api, + waku/waku_core/topics/pubsub_topic, + waku/waku_core/topics/content_topic, + library/events/json_message_event, + library/declare_lib + +const FilterOpTimeout = 5.seconds + +proc checkFilterClientMounted(waku: Waku): Result[string, string] = + if waku.node.wakuFilterClient.isNil(): + let errorMsg = "wakuFilterClient is not mounted" + error "fail filter process", error = errorMsg + return err(errorMsg) + return ok("") + +proc waku_filter_subscribe( + ctx: ptr FFIContext[Waku], + callback: FFICallBack, + userData: pointer, + pubSubTopic: cstring, + contentTopics: cstring, +) {.ffi.} = + proc onReceivedMessage(ctx: ptr FFIContext): WakuRelayHandler = + return proc(pubsubTopic: PubsubTopic, msg: WakuMessage) {.async.} = + callEventCallback(ctx, "onReceivedMessage"): + $JsonMessageEvent.new(pubsubTopic, msg) + + checkFilterClientMounted(ctx.myLib[]).isOkOr: + return err($error) + + var filterPushEventCallback = FilterPushHandler(onReceivedMessage(ctx)) + ctx.myLib[].node.wakuFilterClient.registerPushHandler(filterPushEventCallback) + + let peer = ctx.myLib[].node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr: + let errorMsg = "could not find peer with WakuFilterSubscribeCodec when subscribing" + error "fail filter subscribe", error = errorMsg + return err(errorMsg) + + let subFut = ctx.myLib[].node.filterSubscribe( + some(PubsubTopic($pubsubTopic)), + ($contentTopics).split(",").mapIt(ContentTopic(it)), + peer, + ) + if not await subFut.withTimeout(FilterOpTimeout): + let errorMsg = "filter subscription timed out" + error "fail filter unsubscribe", error = errorMsg + + return err(errorMsg) + + return ok("") + +proc waku_filter_unsubscribe( + ctx: ptr FFIContext[Waku], + callback: FFICallBack, + userData: pointer, + pubSubTopic: cstring, + contentTopics: cstring, +) {.ffi.} = + checkFilterClientMounted(ctx.myLib[]).isOkOr: + return err($error) + + let peer = ctx.myLib[].node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr: + let errorMsg = + "could not find peer with WakuFilterSubscribeCodec when unsubscribing" + error "fail filter process", error = errorMsg + return err(errorMsg) + + let subFut = ctx.myLib[].node.filterUnsubscribe( + some(PubsubTopic($pubsubTopic)), + ($contentTopics).split(",").mapIt(ContentTopic(it)), + peer, + ) + if not await subFut.withTimeout(FilterOpTimeout): + let errorMsg = "filter un-subscription timed out" + error "fail filter unsubscribe", error = errorMsg + return err(errorMsg) + return ok("") + +proc waku_filter_unsubscribe_all( + ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer +) {.ffi.} = + checkFilterClientMounted(ctx.myLib[]).isOkOr: + return err($error) + + let peer = ctx.myLib[].node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr: + let errorMsg = + "could not find peer with WakuFilterSubscribeCodec when unsubscribing all" + error "fail filter unsubscribe all", error = errorMsg + return err(errorMsg) + + let unsubFut = ctx.myLib[].node.filterUnsubscribeAll(peer) + + if not await unsubFut.withTimeout(FilterOpTimeout): + let errorMsg = "filter un-subscription all timed out" + error "fail filter unsubscribe all", error = errorMsg + + return err(errorMsg) + return ok("") diff --git a/library/kernel_api/protocols/lightpush_api.nim b/library/kernel_api/protocols/lightpush_api.nim new file mode 100644 index 000000000..e9251a3f3 --- /dev/null +++ b/library/kernel_api/protocols/lightpush_api.nim @@ -0,0 +1,51 @@ +import options, std/[json, strformat] +import chronicles, chronos, results, ffi +import + waku/waku_core/message/message, + waku/waku_core/codecs, + waku/factory/waku, + waku/waku_core/message, + waku/waku_core/topics/pubsub_topic, + waku/waku_lightpush_legacy/client, + waku/node/peer_manager/peer_manager, + library/events/json_message_event, + library/declare_lib + +proc waku_lightpush_publish( + ctx: ptr FFIContext[Waku], + callback: FFICallBack, + userData: pointer, + pubSubTopic: cstring, + jsonWakuMessage: cstring, +) {.ffi.} = + if ctx.myLib[].node.wakuLightpushClient.isNil(): + let errorMsg = "LightpushRequest waku.node.wakuLightpushClient is nil" + error "PUBLISH failed", error = errorMsg + return err(errorMsg) + + var jsonMessage: JsonMessage + try: + let jsonContent = parseJson($jsonWakuMessage) + jsonMessage = JsonMessage.fromJsonNode(jsonContent).valueOr: + raise newException(JsonParsingError, $error) + except JsonParsingError as exc: + return err(fmt"Error parsing json message: {exc.msg}") + + let msg = json_message_event.toWakuMessage(jsonMessage).valueOr: + return err("Problem building the WakuMessage: " & $error) + + let peerOpt = ctx.myLib[].node.peerManager.selectPeer(WakuLightPushCodec) + if peerOpt.isNone(): + let errorMsg = "failed to lightpublish message, no suitable remote peers" + error "PUBLISH failed", error = errorMsg + return err(errorMsg) + + let msgHashHex = ( + await ctx.myLib[].node.wakuLegacyLightpushClient.publish( + $pubsubTopic, msg, peer = peerOpt.get() + ) + ).valueOr: + error "PUBLISH failed", error = error + return err($error) + + return ok(msgHashHex) diff --git a/library/kernel_api/protocols/relay_api.nim b/library/kernel_api/protocols/relay_api.nim new file mode 100644 index 000000000..b184d6011 --- /dev/null +++ b/library/kernel_api/protocols/relay_api.nim @@ -0,0 +1,171 @@ +import std/[net, sequtils, strutils, json], strformat +import chronicles, chronos, stew/byteutils, results, ffi +import + waku/waku_core/message/message, + waku/factory/[validator_signed, waku], + tools/confutils/cli_args, + waku/waku_core/message, + waku/waku_core/topics/pubsub_topic, + waku/waku_core/topics, + waku/node/kernel_api/relay, + waku/waku_relay/protocol, + waku/node/peer_manager, + library/events/json_message_event, + library/declare_lib + +proc waku_relay_get_peers_in_mesh( + ctx: ptr FFIContext[Waku], + callback: FFICallBack, + userData: pointer, + pubSubTopic: cstring, +) {.ffi.} = + let meshPeers = ctx.myLib[].node.wakuRelay.getPeersInMesh($pubsubTopic).valueOr: + error "LIST_MESH_PEERS failed", error = error + return err($error) + ## returns a comma-separated string of peerIDs + return ok(meshPeers.mapIt($it).join(",")) + +proc waku_relay_get_num_peers_in_mesh( + ctx: ptr FFIContext[Waku], + callback: FFICallBack, + userData: pointer, + pubSubTopic: cstring, +) {.ffi.} = + let numPeersInMesh = ctx.myLib[].node.wakuRelay.getNumPeersInMesh($pubsubTopic).valueOr: + error "NUM_MESH_PEERS failed", error = error + return err($error) + return ok($numPeersInMesh) + +proc waku_relay_get_connected_peers( + ctx: ptr FFIContext[Waku], + callback: FFICallBack, + userData: pointer, + pubSubTopic: cstring, +) {.ffi.} = + ## Returns the list of all connected peers to an specific pubsub topic + let connPeers = ctx.myLib[].node.wakuRelay.getConnectedPeers($pubsubTopic).valueOr: + error "LIST_CONNECTED_PEERS failed", error = error + return err($error) + ## returns a comma-separated string of peerIDs + return ok(connPeers.mapIt($it).join(",")) + +proc waku_relay_get_num_connected_peers( + ctx: ptr FFIContext[Waku], + callback: FFICallBack, + userData: pointer, + pubSubTopic: cstring, +) {.ffi.} = + let numConnPeers = ctx.myLib[].node.wakuRelay.getNumConnectedPeers($pubsubTopic).valueOr: + error "NUM_CONNECTED_PEERS failed", error = error + return err($error) + return ok($numConnPeers) + +proc waku_relay_add_protected_shard( + ctx: ptr FFIContext[Waku], + callback: FFICallBack, + userData: pointer, + clusterId: cint, + shardId: cint, + publicKey: cstring, +) {.ffi.} = + ## Protects a shard with a public key + try: + let relayShard = RelayShard(clusterId: uint16(clusterId), shardId: uint16(shardId)) + let protectedShard = ProtectedShard.parseCmdArg($relayShard & ":" & $publicKey) + ctx.myLib[].node.wakuRelay.addSignedShardsValidator( + @[protectedShard], uint16(clusterId) + ) + except ValueError as exc: + return err("ERROR in waku_relay_add_protected_shard: " & exc.msg) + + return ok("") + +proc waku_relay_subscribe( + ctx: ptr FFIContext[Waku], + callback: FFICallBack, + userData: pointer, + pubSubTopic: cstring, +) {.ffi.} = + echo "Subscribing to topic: " & $pubSubTopic & " ..." + proc onReceivedMessage(ctx: ptr FFIContext[Waku]): WakuRelayHandler = + return proc(pubsubTopic: PubsubTopic, msg: WakuMessage) {.async.} = + callEventCallback(ctx, "onReceivedMessage"): + $JsonMessageEvent.new(pubsubTopic, msg) + + var cb = onReceivedMessage(ctx) + + ctx.myLib[].node.subscribe( + (kind: SubscriptionKind.PubsubSub, topic: $pubsubTopic), + handler = WakuRelayHandler(cb), + ).isOkOr: + error "SUBSCRIBE failed", error = error + return err($error) + return ok("") + +proc waku_relay_unsubscribe( + ctx: ptr FFIContext[Waku], + callback: FFICallBack, + userData: pointer, + pubSubTopic: cstring, +) {.ffi.} = + ctx.myLib[].node.unsubscribe((kind: SubscriptionKind.PubsubSub, topic: $pubsubTopic)).isOkOr: + error "UNSUBSCRIBE failed", error = error + return err($error) + + return ok("") + +proc waku_relay_publish( + ctx: ptr FFIContext[Waku], + callback: FFICallBack, + userData: pointer, + pubSubTopic: cstring, + jsonWakuMessage: cstring, + timeoutMs: cuint, +) {.ffi.} = + var + # https://rfc.vac.dev/spec/36/#extern-char-waku_relay_publishchar-messagejson-char-pubsubtopic-int-timeoutms + jsonMessage: JsonMessage + try: + let jsonContent = parseJson($jsonWakuMessage) + jsonMessage = JsonMessage.fromJsonNode(jsonContent).valueOr: + raise newException(JsonParsingError, $error) + except JsonParsingError as exc: + return err(fmt"Error parsing json message: {exc.msg}") + + let msg = json_message_event.toWakuMessage(jsonMessage).valueOr: + return err("Problem building the WakuMessage: " & $error) + + (await ctx.myLib[].node.wakuRelay.publish($pubsubTopic, msg)).isOkOr: + error "PUBLISH failed", error = error + return err($error) + + let msgHash = computeMessageHash($pubSubTopic, msg).to0xHex + return ok(msgHash) + +proc waku_default_pubsub_topic( + ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer +) {.ffi.} = + # https://rfc.vac.dev/spec/36/#extern-char-waku_default_pubsub_topic + return ok(DefaultPubsubTopic) + +proc waku_content_topic( + ctx: ptr FFIContext[Waku], + callback: FFICallBack, + userData: pointer, + appName: cstring, + appVersion: cuint, + contentTopicName: cstring, + encoding: cstring, +) {.ffi.} = + # https://rfc.vac.dev/spec/36/#extern-char-waku_content_topicchar-applicationname-unsigned-int-applicationversion-char-contenttopicname-char-encoding + + return ok(fmt"/{$appName}/{$appVersion}/{$contentTopicName}/{$encoding}") + +proc waku_pubsub_topic( + ctx: ptr FFIContext[Waku], + callback: FFICallBack, + userData: pointer, + topicName: cstring, +) {.ffi.} = + # https://rfc.vac.dev/spec/36/#extern-char-waku_pubsub_topicchar-name-char-encoding + return ok(fmt"/waku/2/{$topicName}") diff --git a/library/waku_thread_requests/requests/protocols/store_request.nim b/library/kernel_api/protocols/store_api.nim similarity index 57% rename from library/waku_thread_requests/requests/protocols/store_request.nim rename to library/kernel_api/protocols/store_api.nim index 3fe1e2f13..0df4d9b1f 100644 --- a/library/waku_thread_requests/requests/protocols/store_request.nim +++ b/library/kernel_api/protocols/store_api.nim @@ -1,28 +1,16 @@ import std/[json, sugar, strutils, options] -import chronos, chronicles, results, stew/byteutils +import chronos, chronicles, results, stew/byteutils, ffi import - ../../../../waku/factory/waku, - ../../../alloc, - ../../../utils, - ../../../../waku/waku_core/peers, - ../../../../waku/waku_core/time, - ../../../../waku/waku_core/message/digest, - ../../../../waku/waku_store/common, - ../../../../waku/waku_store/client, - ../../../../waku/common/paging + waku/factory/waku, + library/utils, + waku/waku_core/peers, + waku/waku_core/message/digest, + waku/waku_store/common, + waku/waku_store/client, + waku/common/paging, + library/declare_lib -type StoreReqType* = enum - REMOTE_QUERY ## to perform a query to another Store node - -type StoreRequest* = object - operation: StoreReqType - jsonQuery: cstring - peerAddr: cstring - timeoutMs: cint - -func fromJsonNode( - T: type StoreRequest, jsonContent: JsonNode -): Result[StoreQueryRequest, string] = +func fromJsonNode(jsonContent: JsonNode): Result[StoreQueryRequest, string] = var contentTopics: seq[string] if jsonContent.contains("contentTopics"): contentTopics = collect(newSeq): @@ -78,54 +66,29 @@ func fromJsonNode( ) ) -proc createShared*( - T: type StoreRequest, - op: StoreReqType, +proc waku_store_query( + ctx: ptr FFIContext[Waku], + callback: FFICallBack, + userData: pointer, jsonQuery: cstring, peerAddr: cstring, timeoutMs: cint, -): ptr type T = - var ret = createShared(T) - ret[].operation = op - ret[].timeoutMs = timeoutMs - ret[].jsonQuery = jsonQuery.alloc() - ret[].peerAddr = peerAddr.alloc() - return ret - -proc destroyShared(self: ptr StoreRequest) = - deallocShared(self[].jsonQuery) - deallocShared(self[].peerAddr) - deallocShared(self) - -proc process_remote_query( - self: ptr StoreRequest, waku: ptr Waku -): Future[Result[string, string]] {.async.} = +) {.ffi.} = let jsonContentRes = catch: - parseJson($self[].jsonQuery) + parseJson($jsonQuery) if jsonContentRes.isErr(): return err("StoreRequest failed parsing store request: " & jsonContentRes.error.msg) - let storeQueryRequest = ?StoreRequest.fromJsonNode(jsonContentRes.get()) + let storeQueryRequest = ?fromJsonNode(jsonContentRes.get()) - let peer = peers.parsePeerInfo(($self[].peerAddr).split(",")).valueOr: + let peer = peers.parsePeerInfo(($peerAddr).split(",")).valueOr: return err("StoreRequest failed to parse peer addr: " & $error) - let queryResponse = (await waku.node.wakuStoreClient.query(storeQueryRequest, peer)).valueOr: + let queryResponse = ( + await ctx.myLib[].node.wakuStoreClient.query(storeQueryRequest, peer) + ).valueOr: return err("StoreRequest failed store query: " & $error) let res = $(%*(queryResponse.toHex())) return ok(res) ## returning the response in json format - -proc process*( - self: ptr StoreRequest, waku: ptr Waku -): Future[Result[string, string]] {.async.} = - defer: - deallocShared(self) - - case self.operation - of REMOTE_QUERY: - return await self.process_remote_query(waku) - - error "store request not handled at all" - return err("store request not handled at all") diff --git a/library/libwaku.h b/library/libwaku.h index b5d6c9bab..67c89c7c2 100644 --- a/library/libwaku.h +++ b/library/libwaku.h @@ -10,241 +10,242 @@ #include // The possible returned values for the functions that return int -#define RET_OK 0 -#define RET_ERR 1 -#define RET_MISSING_CALLBACK 2 +#define RET_OK 0 +#define RET_ERR 1 +#define RET_MISSING_CALLBACK 2 #ifdef __cplusplus -extern "C" { +extern "C" +{ #endif -typedef void (*WakuCallBack) (int callerRet, const char* msg, size_t len, void* userData); + typedef void (*FFICallBack)(int callerRet, const char *msg, size_t len, void *userData); -// Creates a new instance of the waku node. -// Sets up the waku node from the given configuration. -// Returns a pointer to the Context needed by the rest of the API functions. -void* waku_new( - const char* configJson, - WakuCallBack callback, - void* userData); + // Creates a new instance of the waku node. + // Sets up the waku node from the given configuration. + // Returns a pointer to the Context needed by the rest of the API functions. + void *waku_new( + const char *configJson, + FFICallBack callback, + void *userData); -int waku_start(void* ctx, - WakuCallBack callback, - void* userData); + int waku_start(void *ctx, + FFICallBack callback, + void *userData); -int waku_stop(void* ctx, - WakuCallBack callback, - void* userData); + int waku_stop(void *ctx, + FFICallBack callback, + void *userData); -// Destroys an instance of a waku node created with waku_new -int waku_destroy(void* ctx, - WakuCallBack callback, - void* userData); + // Destroys an instance of a waku node created with waku_new + int waku_destroy(void *ctx, + FFICallBack callback, + void *userData); -int waku_version(void* ctx, - WakuCallBack callback, - void* userData); + int waku_version(void *ctx, + FFICallBack callback, + void *userData); -// Sets a callback that will be invoked whenever an event occurs. -// It is crucial that the passed callback is fast, non-blocking and potentially thread-safe. -void waku_set_event_callback(void* ctx, - WakuCallBack callback, - void* userData); + // Sets a callback that will be invoked whenever an event occurs. + // It is crucial that the passed callback is fast, non-blocking and potentially thread-safe. + void set_event_callback(void *ctx, + FFICallBack callback, + void *userData); -int waku_content_topic(void* ctx, - const char* appName, - unsigned int appVersion, - const char* contentTopicName, - const char* encoding, - WakuCallBack callback, - void* userData); + int waku_content_topic(void *ctx, + FFICallBack callback, + void *userData, + const char *appName, + unsigned int appVersion, + const char *contentTopicName, + const char *encoding); -int waku_pubsub_topic(void* ctx, - const char* topicName, - WakuCallBack callback, - void* userData); + int waku_pubsub_topic(void *ctx, + FFICallBack callback, + void *userData, + const char *topicName); -int waku_default_pubsub_topic(void* ctx, - WakuCallBack callback, - void* userData); + int waku_default_pubsub_topic(void *ctx, + FFICallBack callback, + void *userData); -int waku_relay_publish(void* ctx, - const char* pubSubTopic, - const char* jsonWakuMessage, - unsigned int timeoutMs, - WakuCallBack callback, - void* userData); + int waku_relay_publish(void *ctx, + FFICallBack callback, + void *userData, + const char *pubSubTopic, + const char *jsonWakuMessage, + unsigned int timeoutMs); -int waku_lightpush_publish(void* ctx, - const char* pubSubTopic, - const char* jsonWakuMessage, - WakuCallBack callback, - void* userData); + int waku_lightpush_publish(void *ctx, + FFICallBack callback, + void *userData, + const char *pubSubTopic, + const char *jsonWakuMessage); -int waku_relay_subscribe(void* ctx, - const char* pubSubTopic, - WakuCallBack callback, - void* userData); + int waku_relay_subscribe(void *ctx, + FFICallBack callback, + void *userData, + const char *pubSubTopic); -int waku_relay_add_protected_shard(void* ctx, - int clusterId, - int shardId, - char* publicKey, - WakuCallBack callback, - void* userData); + int waku_relay_add_protected_shard(void *ctx, + FFICallBack callback, + void *userData, + int clusterId, + int shardId, + char *publicKey); -int waku_relay_unsubscribe(void* ctx, - const char* pubSubTopic, - WakuCallBack callback, - void* userData); + int waku_relay_unsubscribe(void *ctx, + FFICallBack callback, + void *userData, + const char *pubSubTopic); -int waku_filter_subscribe(void* ctx, - const char* pubSubTopic, - const char* contentTopics, - WakuCallBack callback, - void* userData); + int waku_filter_subscribe(void *ctx, + FFICallBack callback, + void *userData, + const char *pubSubTopic, + const char *contentTopics); -int waku_filter_unsubscribe(void* ctx, - const char* pubSubTopic, - const char* contentTopics, - WakuCallBack callback, - void* userData); + int waku_filter_unsubscribe(void *ctx, + FFICallBack callback, + void *userData, + const char *pubSubTopic, + const char *contentTopics); -int waku_filter_unsubscribe_all(void* ctx, - WakuCallBack callback, - void* userData); + int waku_filter_unsubscribe_all(void *ctx, + FFICallBack callback, + void *userData); -int waku_relay_get_num_connected_peers(void* ctx, - const char* pubSubTopic, - WakuCallBack callback, - void* userData); + int waku_relay_get_num_connected_peers(void *ctx, + FFICallBack callback, + void *userData, + const char *pubSubTopic); -int waku_relay_get_connected_peers(void* ctx, - const char* pubSubTopic, - WakuCallBack callback, - void* userData); + int waku_relay_get_connected_peers(void *ctx, + FFICallBack callback, + void *userData, + const char *pubSubTopic); -int waku_relay_get_num_peers_in_mesh(void* ctx, - const char* pubSubTopic, - WakuCallBack callback, - void* userData); + int waku_relay_get_num_peers_in_mesh(void *ctx, + FFICallBack callback, + void *userData, + const char *pubSubTopic); -int waku_relay_get_peers_in_mesh(void* ctx, - const char* pubSubTopic, - WakuCallBack callback, - void* userData); + int waku_relay_get_peers_in_mesh(void *ctx, + FFICallBack callback, + void *userData, + const char *pubSubTopic); -int waku_store_query(void* ctx, - const char* jsonQuery, - const char* peerAddr, - int timeoutMs, - WakuCallBack callback, - void* userData); + int waku_store_query(void *ctx, + FFICallBack callback, + void *userData, + const char *jsonQuery, + const char *peerAddr, + int timeoutMs); -int waku_connect(void* ctx, - const char* peerMultiAddr, - unsigned int timeoutMs, - WakuCallBack callback, - void* userData); + int waku_connect(void *ctx, + FFICallBack callback, + void *userData, + const char *peerMultiAddr, + unsigned int timeoutMs); -int waku_disconnect_peer_by_id(void* ctx, - const char* peerId, - WakuCallBack callback, - void* userData); + int waku_disconnect_peer_by_id(void *ctx, + FFICallBack callback, + void *userData, + const char *peerId); -int waku_disconnect_all_peers(void* ctx, - WakuCallBack callback, - void* userData); + int waku_disconnect_all_peers(void *ctx, + FFICallBack callback, + void *userData); -int waku_dial_peer(void* ctx, - const char* peerMultiAddr, - const char* protocol, - int timeoutMs, - WakuCallBack callback, - void* userData); + int waku_dial_peer(void *ctx, + FFICallBack callback, + void *userData, + const char *peerMultiAddr, + const char *protocol, + int timeoutMs); -int waku_dial_peer_by_id(void* ctx, - const char* peerId, - const char* protocol, - int timeoutMs, - WakuCallBack callback, - void* userData); + int waku_dial_peer_by_id(void *ctx, + FFICallBack callback, + void *userData, + const char *peerId, + const char *protocol, + int timeoutMs); -int waku_get_peerids_from_peerstore(void* ctx, - WakuCallBack callback, - void* userData); + int waku_get_peerids_from_peerstore(void *ctx, + FFICallBack callback, + void *userData); -int waku_get_connected_peers_info(void* ctx, - WakuCallBack callback, - void* userData); + int waku_get_connected_peers_info(void *ctx, + FFICallBack callback, + void *userData); -int waku_get_peerids_by_protocol(void* ctx, - const char* protocol, - WakuCallBack callback, - void* userData); + int waku_get_peerids_by_protocol(void *ctx, + FFICallBack callback, + void *userData, + const char *protocol); -int waku_listen_addresses(void* ctx, - WakuCallBack callback, - void* userData); + int waku_listen_addresses(void *ctx, + FFICallBack callback, + void *userData); -int waku_get_connected_peers(void* ctx, - WakuCallBack callback, - void* userData); + int waku_get_connected_peers(void *ctx, + FFICallBack callback, + void *userData); -// Returns a list of multiaddress given a url to a DNS discoverable ENR tree -// Parameters -// char* entTreeUrl: URL containing a discoverable ENR tree -// char* nameDnsServer: The nameserver to resolve the ENR tree url. -// int timeoutMs: Timeout value in milliseconds to execute the call. -int waku_dns_discovery(void* ctx, - const char* entTreeUrl, - const char* nameDnsServer, - int timeoutMs, - WakuCallBack callback, - void* userData); + // Returns a list of multiaddress given a url to a DNS discoverable ENR tree + // Parameters + // char* entTreeUrl: URL containing a discoverable ENR tree + // char* nameDnsServer: The nameserver to resolve the ENR tree url. + // int timeoutMs: Timeout value in milliseconds to execute the call. + int waku_dns_discovery(void *ctx, + FFICallBack callback, + void *userData, + const char *entTreeUrl, + const char *nameDnsServer, + int timeoutMs); -// Updates the bootnode list used for discovering new peers via DiscoveryV5 -// bootnodes - JSON array containing the bootnode ENRs i.e. `["enr:...", "enr:..."]` -int waku_discv5_update_bootnodes(void* ctx, - char* bootnodes, - WakuCallBack callback, - void* userData); + // Updates the bootnode list used for discovering new peers via DiscoveryV5 + // bootnodes - JSON array containing the bootnode ENRs i.e. `["enr:...", "enr:..."]` + int waku_discv5_update_bootnodes(void *ctx, + FFICallBack callback, + void *userData, + char *bootnodes); -int waku_start_discv5(void* ctx, - WakuCallBack callback, - void* userData); + int waku_start_discv5(void *ctx, + FFICallBack callback, + void *userData); -int waku_stop_discv5(void* ctx, - WakuCallBack callback, - void* userData); + int waku_stop_discv5(void *ctx, + FFICallBack callback, + void *userData); -// Retrieves the ENR information -int waku_get_my_enr(void* ctx, - WakuCallBack callback, - void* userData); + // Retrieves the ENR information + int waku_get_my_enr(void *ctx, + FFICallBack callback, + void *userData); -int waku_get_my_peerid(void* ctx, - WakuCallBack callback, - void* userData); + int waku_get_my_peerid(void *ctx, + FFICallBack callback, + void *userData); -int waku_get_metrics(void* ctx, - WakuCallBack callback, - void* userData); + int waku_get_metrics(void *ctx, + FFICallBack callback, + void *userData); -int waku_peer_exchange_request(void* ctx, - int numPeers, - WakuCallBack callback, - void* userData); + int waku_peer_exchange_request(void *ctx, + FFICallBack callback, + void *userData, + int numPeers); -int waku_ping_peer(void* ctx, - const char* peerAddr, - int timeoutMs, - WakuCallBack callback, - void* userData); + int waku_ping_peer(void *ctx, + FFICallBack callback, + void *userData, + const char *peerAddr, + int timeoutMs); -int waku_is_online(void* ctx, - WakuCallBack callback, - void* userData); + int waku_is_online(void *ctx, + FFICallBack callback, + void *userData); #ifdef __cplusplus } diff --git a/library/libwaku.nim b/library/libwaku.nim index ad3afa134..eb3cdff5e 100644 --- a/library/libwaku.nim +++ b/library/libwaku.nim @@ -1,107 +1,37 @@ -{.pragma: exported, exportc, cdecl, raises: [].} -{.pragma: callback, cdecl, raises: [], gcsafe.} -{.passc: "-fPIC".} - -when defined(linux): - {.passl: "-Wl,-soname,libwaku.so".} - -import std/[json, atomics, strformat, options, atomics] -import chronicles, chronos, chronos/threadsync +import std/[atomics, options, atomics, macros] +import chronicles, chronos, chronos/threadsync, ffi import - waku/common/base64, waku/waku_core/message/message, - waku/node/waku_node, - waku/node/peer_manager, waku/waku_core/topics/pubsub_topic, - waku/waku_core/subscription/push_handler, waku/waku_relay, ./events/json_message_event, - ./waku_context, - ./waku_thread_requests/requests/node_lifecycle_request, - ./waku_thread_requests/requests/peer_manager_request, - ./waku_thread_requests/requests/protocols/relay_request, - ./waku_thread_requests/requests/protocols/store_request, - ./waku_thread_requests/requests/protocols/lightpush_request, - ./waku_thread_requests/requests/protocols/filter_request, - ./waku_thread_requests/requests/debug_node_request, - ./waku_thread_requests/requests/discovery_request, - ./waku_thread_requests/requests/ping_request, - ./waku_thread_requests/waku_thread_request, - ./alloc, - ./ffi_types, - ../waku/factory/app_callbacks + ./events/json_topic_health_change_event, + ./events/json_connection_change_event, + ./events/json_connection_status_change_event, + ../waku/factory/app_callbacks, + waku/factory/waku, + waku/node/waku_node, + waku/node/health_monitor/health_status, + ./declare_lib ################################################################################ -### Wrapper around the waku node -################################################################################ - -################################################################################ -### Not-exported components - -template checkLibwakuParams*( - ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer -) = - if not isNil(ctx): - ctx[].userData = userData - - if isNil(callback): - return RET_MISSING_CALLBACK - -proc handleRequest( - ctx: ptr WakuContext, - requestType: RequestType, - content: pointer, - callback: WakuCallBack, - userData: pointer, -): cint = - waku_context.sendRequestToWakuThread(ctx, requestType, content, callback, userData).isOkOr: - let msg = "libwaku error: " & $error - callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) - return RET_ERR - - return RET_OK - -### End of not-exported components -################################################################################ - -################################################################################ -### Library setup - -# Every Nim library must have this function called - the name is derived from -# the `--nimMainPrefix` command line option -proc libwakuNimMain() {.importc.} - -# To control when the library has been initialized -var initialized: Atomic[bool] - -if defined(android): - # Redirect chronicles to Android System logs - when compiles(defaultChroniclesStream.outputs[0].writer): - defaultChroniclesStream.outputs[0].writer = proc( - logLevel: LogLevel, msg: LogOutputStr - ) {.raises: [].} = - echo logLevel, msg - -proc initializeLibrary() {.exported.} = - if not initialized.exchange(true): - ## Every Nim library needs to call `NimMain` once exactly, to initialize the Nim runtime. - ## Being `` the value given in the optional compilation flag --nimMainPrefix:yourprefix - libwakuNimMain() - when declared(setupForeignThreadGc): - setupForeignThreadGc() - when declared(nimGC_setStackBottom): - var locals {.volatile, noinit.}: pointer - locals = addr(locals) - nimGC_setStackBottom(locals) - -### End of library setup -################################################################################ +## Include different APIs, i.e. all procs with {.ffi.} pragma +include + ./kernel_api/peer_manager_api, + ./kernel_api/discovery_api, + ./kernel_api/node_lifecycle_api, + ./kernel_api/debug_node_api, + ./kernel_api/ping_api, + ./kernel_api/protocols/relay_api, + ./kernel_api/protocols/store_api, + ./kernel_api/protocols/lightpush_api, + ./kernel_api/protocols/filter_api ################################################################################ ### Exported procs proc waku_new( - configJson: cstring, callback: WakuCallback, userData: pointer + configJson: cstring, callback: FFICallback, userData: pointer ): pointer {.dynlib, exportc, cdecl.} = initializeLibrary() @@ -111,41 +41,56 @@ proc waku_new( return nil ## Create the Waku thread that will keep waiting for req from the main thread. - var ctx = waku_context.createWakuContext().valueOr: - let msg = "Error in createWakuContext: " & $error + var ctx = ffi.createFFIContext[Waku]().valueOr: + let msg = "Error in createFFIContext: " & $error callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) return nil ctx.userData = userData + proc onReceivedMessage(ctx: ptr FFIContext): WakuRelayHandler = + return proc(pubsubTopic: PubsubTopic, msg: WakuMessage) {.async.} = + callEventCallback(ctx, "onReceivedMessage"): + $JsonMessageEvent.new(pubsubTopic, msg) + + proc onTopicHealthChange(ctx: ptr FFIContext): TopicHealthChangeHandler = + return proc(pubsubTopic: PubsubTopic, topicHealth: TopicHealth) {.async.} = + callEventCallback(ctx, "onTopicHealthChange"): + $JsonTopicHealthChangeEvent.new(pubsubTopic, topicHealth) + + proc onConnectionChange(ctx: ptr FFIContext): ConnectionChangeHandler = + return proc(peerId: PeerId, peerEvent: PeerEventKind) {.async.} = + callEventCallback(ctx, "onConnectionChange"): + $JsonConnectionChangeEvent.new($peerId, peerEvent) + + proc onConnectionStatusChange(ctx: ptr FFIContext): ConnectionStatusChangeHandler = + return proc(status: ConnectionStatus) {.async.} = + callEventCallback(ctx, "onConnectionStatusChange"): + $JsonConnectionStatusChangeEvent.new(status) + let appCallbacks = AppCallbacks( relayHandler: onReceivedMessage(ctx), topicHealthChangeHandler: onTopicHealthChange(ctx), connectionChangeHandler: onConnectionChange(ctx), + connectionStatusChangeHandler: onConnectionStatusChange(ctx) ) - let retCode = handleRequest( - ctx, - RequestType.LIFECYCLE, - NodeLifecycleRequest.createShared( - NodeLifecycleMsgType.CREATE_NODE, configJson, appCallbacks - ), - callback, - userData, - ) - - if retCode == RET_ERR: + ffi.sendRequestToFFIThread( + ctx, CreateNodeRequest.ffiNewReq(callback, userData, configJson, appCallbacks) + ).isOkOr: + let msg = "error in sendRequestToFFIThread: " & $error + callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) return nil return ctx proc waku_destroy( - ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer -): cint {.dynlib, exportc.} = + ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer +): cint {.dynlib, exportc, cdecl.} = initializeLibrary() - checkLibwakuParams(ctx, callback, userData) + checkParams(ctx, callback, userData) - waku_context.destroyWakuContext(ctx).isOkOr: + ffi.destroyFFIContext(ctx).isOkOr: let msg = "libwaku error: " & $error callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) return RET_ERR @@ -155,699 +100,5 @@ proc waku_destroy( return RET_OK -proc waku_version( - ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - callback( - RET_OK, - cast[ptr cchar](WakuNodeVersionString), - cast[csize_t](len(WakuNodeVersionString)), - userData, - ) - - return RET_OK - -proc waku_set_event_callback( - ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer -) {.dynlib, exportc.} = - initializeLibrary() - ctx[].eventCallback = cast[pointer](callback) - ctx[].eventUserData = userData - -proc waku_content_topic( - ctx: ptr WakuContext, - appName: cstring, - appVersion: cuint, - contentTopicName: cstring, - encoding: cstring, - callback: WakuCallBack, - userData: pointer, -): cint {.dynlib, exportc.} = - # https://rfc.vac.dev/spec/36/#extern-char-waku_content_topicchar-applicationname-unsigned-int-applicationversion-char-contenttopicname-char-encoding - - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - let contentTopic = fmt"/{$appName}/{$appVersion}/{$contentTopicName}/{$encoding}" - callback( - RET_OK, unsafeAddr contentTopic[0], cast[csize_t](len(contentTopic)), userData - ) - - return RET_OK - -proc waku_pubsub_topic( - ctx: ptr WakuContext, topicName: cstring, callback: WakuCallBack, userData: pointer -): cint {.dynlib, exportc, cdecl.} = - # https://rfc.vac.dev/spec/36/#extern-char-waku_pubsub_topicchar-name-char-encoding - - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - let outPubsubTopic = fmt"/waku/2/{$topicName}" - callback( - RET_OK, unsafeAddr outPubsubTopic[0], cast[csize_t](len(outPubsubTopic)), userData - ) - - return RET_OK - -proc waku_default_pubsub_topic( - ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer -): cint {.dynlib, exportc.} = - # https://rfc.vac.dev/spec/36/#extern-char-waku_default_pubsub_topic - - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - callback( - RET_OK, - cast[ptr cchar](DefaultPubsubTopic), - cast[csize_t](len(DefaultPubsubTopic)), - userData, - ) - - return RET_OK - -proc waku_relay_publish( - ctx: ptr WakuContext, - pubSubTopic: cstring, - jsonWakuMessage: cstring, - timeoutMs: cuint, - callback: WakuCallBack, - userData: pointer, -): cint {.dynlib, exportc, cdecl.} = - # https://rfc.vac.dev/spec/36/#extern-char-waku_relay_publishchar-messagejson-char-pubsubtopic-int-timeoutms - - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - var jsonMessage: JsonMessage - try: - let jsonContent = parseJson($jsonWakuMessage) - jsonMessage = JsonMessage.fromJsonNode(jsonContent).valueOr: - raise newException(JsonParsingError, $error) - except JsonParsingError: - let msg = fmt"Error parsing json message: {getCurrentExceptionMsg()}" - callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) - return RET_ERR - - let wakuMessage = jsonMessage.toWakuMessage().valueOr: - let msg = "Problem building the WakuMessage: " & $error - callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) - return RET_ERR - - handleRequest( - ctx, - RequestType.RELAY, - RelayRequest.createShared(RelayMsgType.PUBLISH, pubSubTopic, nil, wakuMessage), - callback, - userData, - ) - -proc waku_start( - ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - handleRequest( - ctx, - RequestType.LIFECYCLE, - NodeLifecycleRequest.createShared(NodeLifecycleMsgType.START_NODE), - callback, - userData, - ) - -proc waku_stop( - ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - handleRequest( - ctx, - RequestType.LIFECYCLE, - NodeLifecycleRequest.createShared(NodeLifecycleMsgType.STOP_NODE), - callback, - userData, - ) - -proc waku_relay_subscribe( - ctx: ptr WakuContext, - pubSubTopic: cstring, - callback: WakuCallBack, - userData: pointer, -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - var cb = onReceivedMessage(ctx) - - handleRequest( - ctx, - RequestType.RELAY, - RelayRequest.createShared(RelayMsgType.SUBSCRIBE, pubSubTopic, WakuRelayHandler(cb)), - callback, - userData, - ) - -proc waku_relay_add_protected_shard( - ctx: ptr WakuContext, - clusterId: cint, - shardId: cint, - publicKey: cstring, - callback: WakuCallBack, - userData: pointer, -): cint {.dynlib, exportc, cdecl.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.RELAY, - RelayRequest.createShared( - RelayMsgType.ADD_PROTECTED_SHARD, - clusterId = clusterId, - shardId = shardId, - publicKey = publicKey, - ), - callback, - userData, - ) - -proc waku_relay_unsubscribe( - ctx: ptr WakuContext, - pubSubTopic: cstring, - callback: WakuCallBack, - userData: pointer, -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.RELAY, - RelayRequest.createShared( - RelayMsgType.UNSUBSCRIBE, pubSubTopic, WakuRelayHandler(onReceivedMessage(ctx)) - ), - callback, - userData, - ) - -proc waku_relay_get_num_connected_peers( - ctx: ptr WakuContext, - pubSubTopic: cstring, - callback: WakuCallBack, - userData: pointer, -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.RELAY, - RelayRequest.createShared(RelayMsgType.NUM_CONNECTED_PEERS, pubSubTopic), - callback, - userData, - ) - -proc waku_relay_get_connected_peers( - ctx: ptr WakuContext, - pubSubTopic: cstring, - callback: WakuCallBack, - userData: pointer, -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.RELAY, - RelayRequest.createShared(RelayMsgType.LIST_CONNECTED_PEERS, pubSubTopic), - callback, - userData, - ) - -proc waku_relay_get_num_peers_in_mesh( - ctx: ptr WakuContext, - pubSubTopic: cstring, - callback: WakuCallBack, - userData: pointer, -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.RELAY, - RelayRequest.createShared(RelayMsgType.NUM_MESH_PEERS, pubSubTopic), - callback, - userData, - ) - -proc waku_relay_get_peers_in_mesh( - ctx: ptr WakuContext, - pubSubTopic: cstring, - callback: WakuCallBack, - userData: pointer, -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.RELAY, - RelayRequest.createShared(RelayMsgType.LIST_MESH_PEERS, pubSubTopic), - callback, - userData, - ) - -proc waku_filter_subscribe( - ctx: ptr WakuContext, - pubSubTopic: cstring, - contentTopics: cstring, - callback: WakuCallBack, - userData: pointer, -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.FILTER, - FilterRequest.createShared( - FilterMsgType.SUBSCRIBE, - pubSubTopic, - contentTopics, - FilterPushHandler(onReceivedMessage(ctx)), - ), - callback, - userData, - ) - -proc waku_filter_unsubscribe( - ctx: ptr WakuContext, - pubSubTopic: cstring, - contentTopics: cstring, - callback: WakuCallBack, - userData: pointer, -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.FILTER, - FilterRequest.createShared(FilterMsgType.UNSUBSCRIBE, pubSubTopic, contentTopics), - callback, - userData, - ) - -proc waku_filter_unsubscribe_all( - ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.FILTER, - FilterRequest.createShared(FilterMsgType.UNSUBSCRIBE_ALL), - callback, - userData, - ) - -proc waku_lightpush_publish( - ctx: ptr WakuContext, - pubSubTopic: cstring, - jsonWakuMessage: cstring, - callback: WakuCallBack, - userData: pointer, -): cint {.dynlib, exportc, cdecl.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - var jsonMessage: JsonMessage - try: - let jsonContent = parseJson($jsonWakuMessage) - jsonMessage = JsonMessage.fromJsonNode(jsonContent).valueOr: - raise newException(JsonParsingError, $error) - except JsonParsingError: - let msg = fmt"Error parsing json message: {getCurrentExceptionMsg()}" - callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) - return RET_ERR - - let wakuMessage = jsonMessage.toWakuMessage().valueOr: - let msg = "Problem building the WakuMessage: " & $error - callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) - return RET_ERR - - handleRequest( - ctx, - RequestType.LIGHTPUSH, - LightpushRequest.createShared(LightpushMsgType.PUBLISH, pubSubTopic, wakuMessage), - callback, - userData, - ) - -proc waku_connect( - ctx: ptr WakuContext, - peerMultiAddr: cstring, - timeoutMs: cuint, - callback: WakuCallBack, - userData: pointer, -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.PEER_MANAGER, - PeerManagementRequest.createShared( - PeerManagementMsgType.CONNECT_TO, $peerMultiAddr, chronos.milliseconds(timeoutMs) - ), - callback, - userData, - ) - -proc waku_disconnect_peer_by_id( - ctx: ptr WakuContext, peerId: cstring, callback: WakuCallBack, userData: pointer -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.PEER_MANAGER, - PeerManagementRequest.createShared( - op = PeerManagementMsgType.DISCONNECT_PEER_BY_ID, peerId = $peerId - ), - callback, - userData, - ) - -proc waku_disconnect_all_peers( - ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.PEER_MANAGER, - PeerManagementRequest.createShared(op = PeerManagementMsgType.DISCONNECT_ALL_PEERS), - callback, - userData, - ) - -proc waku_dial_peer( - ctx: ptr WakuContext, - peerMultiAddr: cstring, - protocol: cstring, - timeoutMs: cuint, - callback: WakuCallBack, - userData: pointer, -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.PEER_MANAGER, - PeerManagementRequest.createShared( - op = PeerManagementMsgType.DIAL_PEER, - peerMultiAddr = $peerMultiAddr, - protocol = $protocol, - ), - callback, - userData, - ) - -proc waku_dial_peer_by_id( - ctx: ptr WakuContext, - peerId: cstring, - protocol: cstring, - timeoutMs: cuint, - callback: WakuCallBack, - userData: pointer, -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.PEER_MANAGER, - PeerManagementRequest.createShared( - op = PeerManagementMsgType.DIAL_PEER_BY_ID, peerId = $peerId, protocol = $protocol - ), - callback, - userData, - ) - -proc waku_get_peerids_from_peerstore( - ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.PEER_MANAGER, - PeerManagementRequest.createShared(PeerManagementMsgType.GET_ALL_PEER_IDS), - callback, - userData, - ) - -proc waku_get_connected_peers_info( - ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.PEER_MANAGER, - PeerManagementRequest.createShared(PeerManagementMsgType.GET_CONNECTED_PEERS_INFO), - callback, - userData, - ) - -proc waku_get_connected_peers( - ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.PEER_MANAGER, - PeerManagementRequest.createShared(PeerManagementMsgType.GET_CONNECTED_PEERS), - callback, - userData, - ) - -proc waku_get_peerids_by_protocol( - ctx: ptr WakuContext, protocol: cstring, callback: WakuCallBack, userData: pointer -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.PEER_MANAGER, - PeerManagementRequest.createShared( - op = PeerManagementMsgType.GET_PEER_IDS_BY_PROTOCOL, protocol = $protocol - ), - callback, - userData, - ) - -proc waku_store_query( - ctx: ptr WakuContext, - jsonQuery: cstring, - peerAddr: cstring, - timeoutMs: cint, - callback: WakuCallBack, - userData: pointer, -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.STORE, - StoreRequest.createShared(StoreReqType.REMOTE_QUERY, jsonQuery, peerAddr, timeoutMs), - callback, - userData, - ) - -proc waku_listen_addresses( - ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.DEBUG, - DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_LISTENING_ADDRESSES), - callback, - userData, - ) - -proc waku_dns_discovery( - ctx: ptr WakuContext, - entTreeUrl: cstring, - nameDnsServer: cstring, - timeoutMs: cint, - callback: WakuCallBack, - userData: pointer, -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.DISCOVERY, - DiscoveryRequest.createRetrieveBootstrapNodesRequest( - DiscoveryMsgType.GET_BOOTSTRAP_NODES, entTreeUrl, nameDnsServer, timeoutMs - ), - callback, - userData, - ) - -proc waku_discv5_update_bootnodes( - ctx: ptr WakuContext, bootnodes: cstring, callback: WakuCallBack, userData: pointer -): cint {.dynlib, exportc.} = - ## Updates the bootnode list used for discovering new peers via DiscoveryV5 - ## bootnodes - JSON array containing the bootnode ENRs i.e. `["enr:...", "enr:..."]` - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.DISCOVERY, - DiscoveryRequest.createUpdateBootstrapNodesRequest( - DiscoveryMsgType.UPDATE_DISCV5_BOOTSTRAP_NODES, bootnodes - ), - callback, - userData, - ) - -proc waku_get_my_enr( - ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.DEBUG, - DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_MY_ENR), - callback, - userData, - ) - -proc waku_get_my_peerid( - ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.DEBUG, - DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_MY_PEER_ID), - callback, - userData, - ) - -proc waku_get_metrics( - ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.DEBUG, - DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_METRICS), - callback, - userData, - ) - -proc waku_start_discv5( - ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.DISCOVERY, - DiscoveryRequest.createDiscV5StartRequest(), - callback, - userData, - ) - -proc waku_stop_discv5( - ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.DISCOVERY, - DiscoveryRequest.createDiscV5StopRequest(), - callback, - userData, - ) - -proc waku_peer_exchange_request( - ctx: ptr WakuContext, numPeers: uint64, callback: WakuCallBack, userData: pointer -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.DISCOVERY, - DiscoveryRequest.createPeerExchangeRequest(numPeers), - callback, - userData, - ) - -proc waku_ping_peer( - ctx: ptr WakuContext, - peerAddr: cstring, - timeoutMs: cuint, - callback: WakuCallBack, - userData: pointer, -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.PING, - PingRequest.createShared(peerAddr, chronos.milliseconds(timeoutMs)), - callback, - userData, - ) - -proc waku_is_online( - ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer -): cint {.dynlib, exportc.} = - initializeLibrary() - checkLibwakuParams(ctx, callback, userData) - - handleRequest( - ctx, - RequestType.DEBUG, - DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_ONLINE_STATE), - callback, - userData, - ) - -### End of exported procs -################################################################################ +# ### End of exported procs +# ################################################################################ diff --git a/library/waku_context.nim b/library/waku_context.nim deleted file mode 100644 index ab4b996af..000000000 --- a/library/waku_context.nim +++ /dev/null @@ -1,223 +0,0 @@ -{.pragma: exported, exportc, cdecl, raises: [].} -{.pragma: callback, cdecl, raises: [], gcsafe.} -{.passc: "-fPIC".} - -import std/[options, atomics, os, net, locks] -import chronicles, chronos, chronos/threadsync, taskpools/channels_spsc_single, results -import - waku/common/logging, - waku/factory/waku, - waku/node/peer_manager, - waku/waku_relay/[protocol, topic_health], - waku/waku_core/[topics/pubsub_topic, message], - ./waku_thread_requests/[waku_thread_request, requests/debug_node_request], - ./ffi_types, - ./events/[ - json_message_event, json_topic_health_change_event, json_connection_change_event, - json_waku_not_responding_event, - ] - -type WakuContext* = object - wakuThread: Thread[(ptr WakuContext)] - watchdogThread: Thread[(ptr WakuContext)] - # monitors the Waku thread and notifies the Waku SDK consumer if it hangs - lock: Lock - reqChannel: ChannelSPSCSingle[ptr WakuThreadRequest] - reqSignal: ThreadSignalPtr - # to inform The Waku Thread (a.k.a TWT) that a new request is sent - reqReceivedSignal: ThreadSignalPtr - # to inform the main thread that the request is rx by TWT - userData*: pointer - eventCallback*: pointer - eventUserdata*: pointer - running: Atomic[bool] # To control when the threads are running - -const git_version* {.strdefine.} = "n/a" -const versionString = "version / git commit hash: " & waku.git_version - -template callEventCallback(ctx: ptr WakuContext, eventName: string, body: untyped) = - if isNil(ctx[].eventCallback): - error eventName & " - eventCallback is nil" - return - - foreignThreadGc: - try: - let event = body - cast[WakuCallBack](ctx[].eventCallback)( - RET_OK, unsafeAddr event[0], cast[csize_t](len(event)), ctx[].eventUserData - ) - except Exception, CatchableError: - let msg = - "Exception " & eventName & " when calling 'eventCallBack': " & - getCurrentExceptionMsg() - cast[WakuCallBack](ctx[].eventCallback)( - RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), ctx[].eventUserData - ) - -proc onConnectionChange*(ctx: ptr WakuContext): ConnectionChangeHandler = - return proc(peerId: PeerId, peerEvent: PeerEventKind) {.async.} = - callEventCallback(ctx, "onConnectionChange"): - $JsonConnectionChangeEvent.new($peerId, peerEvent) - -proc onReceivedMessage*(ctx: ptr WakuContext): WakuRelayHandler = - return proc(pubsubTopic: PubsubTopic, msg: WakuMessage) {.async.} = - callEventCallback(ctx, "onReceivedMessage"): - $JsonMessageEvent.new(pubsubTopic, msg) - -proc onTopicHealthChange*(ctx: ptr WakuContext): TopicHealthChangeHandler = - return proc(pubsubTopic: PubsubTopic, topicHealth: TopicHealth) {.async.} = - callEventCallback(ctx, "onTopicHealthChange"): - $JsonTopicHealthChangeEvent.new(pubsubTopic, topicHealth) - -proc onWakuNotResponding*(ctx: ptr WakuContext) = - callEventCallback(ctx, "onWakuNotResponsive"): - $JsonWakuNotRespondingEvent.new() - -proc sendRequestToWakuThread*( - ctx: ptr WakuContext, - reqType: RequestType, - reqContent: pointer, - callback: WakuCallBack, - userData: pointer, - timeout = InfiniteDuration, -): Result[void, string] = - ctx.lock.acquire() - # This lock is only necessary while we use a SP Channel and while the signalling - # between threads assumes that there aren't concurrent requests. - # Rearchitecting the signaling + migrating to a MP Channel will allow us to receive - # requests concurrently and spare us the need of locks - defer: - ctx.lock.release() - - let req = WakuThreadRequest.createShared(reqType, reqContent, callback, userData) - ## Sending the request - let sentOk = ctx.reqChannel.trySend(req) - if not sentOk: - deallocShared(req) - return err("Couldn't send a request to the waku thread: " & $req[]) - - let fireSync = ctx.reqSignal.fireSync().valueOr: - deallocShared(req) - return err("failed fireSync: " & $error) - - if not fireSync: - deallocShared(req) - return err("Couldn't fireSync in time") - - ## wait until the Waku Thread properly received the request - ctx.reqReceivedSignal.waitSync(timeout).isOkOr: - deallocShared(req) - return err("Couldn't receive reqReceivedSignal signal") - - ## Notice that in case of "ok", the deallocShared(req) is performed by the Waku Thread in the - ## process proc. See the 'waku_thread_request.nim' module for more details. - ok() - -proc watchdogThreadBody(ctx: ptr WakuContext) {.thread.} = - ## Watchdog thread that monitors the Waku thread and notifies the library user if it hangs. - - let watchdogRun = proc(ctx: ptr WakuContext) {.async.} = - const WatchdogStartDelay = 10.seconds - const WatchdogTimeinterval = 1.seconds - const WakuNotRespondingTimeout = 3.seconds - - # Give time for the node to be created and up before sending watchdog requests - await sleepAsync(WatchdogStartDelay) - while true: - await sleepAsync(WatchdogTimeinterval) - - if ctx.running.load == false: - info "Watchdog thread exiting because WakuContext is not running" - break - - let wakuCallback = proc( - callerRet: cint, msg: ptr cchar, len: csize_t, userData: pointer - ) {.cdecl, gcsafe, raises: [].} = - discard ## Don't do anything. Just respecting the callback signature. - const nilUserData = nil - - trace "Sending watchdog request to Waku thread" - - sendRequestToWakuThread( - ctx, - RequestType.DEBUG, - DebugNodeRequest.createShared(DebugNodeMsgType.CHECK_WAKU_NOT_BLOCKED), - wakuCallback, - nilUserData, - WakuNotRespondingTimeout, - ).isOkOr: - error "Failed to send watchdog request to Waku thread", error = $error - onWakuNotResponding(ctx) - - waitFor watchdogRun(ctx) - -proc wakuThreadBody(ctx: ptr WakuContext) {.thread.} = - ## Waku thread that attends library user requests (stop, connect_to, etc.) - - logging.setupLog(logging.LogLevel.DEBUG, logging.LogFormat.TEXT) - - let wakuRun = proc(ctx: ptr WakuContext) {.async.} = - var waku: Waku - while true: - await ctx.reqSignal.wait() - - if ctx.running.load == false: - break - - ## Trying to get a request from the libwaku requestor thread - var request: ptr WakuThreadRequest - let recvOk = ctx.reqChannel.tryRecv(request) - if not recvOk: - error "waku thread could not receive a request" - continue - - ## Handle the request - asyncSpawn WakuThreadRequest.process(request, addr waku) - - ctx.reqReceivedSignal.fireSync().isOkOr: - error "could not fireSync back to requester thread", error = error - - waitFor wakuRun(ctx) - -proc createWakuContext*(): Result[ptr WakuContext, string] = - ## This proc is called from the main thread and it creates - ## the Waku working thread. - var ctx = createShared(WakuContext, 1) - ctx.reqSignal = ThreadSignalPtr.new().valueOr: - return err("couldn't create reqSignal ThreadSignalPtr") - ctx.reqReceivedSignal = ThreadSignalPtr.new().valueOr: - return err("couldn't create reqReceivedSignal ThreadSignalPtr") - ctx.lock.initLock() - - ctx.running.store(true) - - try: - createThread(ctx.wakuThread, wakuThreadBody, ctx) - except ValueError, ResourceExhaustedError: - freeShared(ctx) - return err("failed to create the Waku thread: " & getCurrentExceptionMsg()) - - try: - createThread(ctx.watchdogThread, watchdogThreadBody, ctx) - except ValueError, ResourceExhaustedError: - freeShared(ctx) - return err("failed to create the watchdog thread: " & getCurrentExceptionMsg()) - - return ok(ctx) - -proc destroyWakuContext*(ctx: ptr WakuContext): Result[void, string] = - ctx.running.store(false) - - let signaledOnTime = ctx.reqSignal.fireSync().valueOr: - return err("error in destroyWakuContext: " & $error) - if not signaledOnTime: - return err("failed to signal reqSignal on time in destroyWakuContext") - - joinThread(ctx.wakuThread) - joinThread(ctx.watchdogThread) - ctx.lock.deinitLock() - ?ctx.reqSignal.close() - ?ctx.reqReceivedSignal.close() - freeShared(ctx) - - return ok() diff --git a/library/waku_thread_requests/requests/debug_node_request.nim b/library/waku_thread_requests/requests/debug_node_request.nim deleted file mode 100644 index c9aa5a743..000000000 --- a/library/waku_thread_requests/requests/debug_node_request.nim +++ /dev/null @@ -1,63 +0,0 @@ -import std/json -import - chronicles, - chronos, - results, - eth/p2p/discoveryv5/enr, - strutils, - libp2p/peerid, - metrics -import - ../../../waku/factory/waku, - ../../../waku/node/waku_node, - ../../../waku/node/health_monitor - -type DebugNodeMsgType* = enum - RETRIEVE_LISTENING_ADDRESSES - RETRIEVE_MY_ENR - RETRIEVE_MY_PEER_ID - RETRIEVE_METRICS - RETRIEVE_ONLINE_STATE - CHECK_WAKU_NOT_BLOCKED - -type DebugNodeRequest* = object - operation: DebugNodeMsgType - -proc createShared*(T: type DebugNodeRequest, op: DebugNodeMsgType): ptr type T = - var ret = createShared(T) - ret[].operation = op - return ret - -proc destroyShared(self: ptr DebugNodeRequest) = - deallocShared(self) - -proc getMultiaddresses(node: WakuNode): seq[string] = - return node.info().listenAddresses - -proc getMetrics(): string = - {.gcsafe.}: - return defaultRegistry.toText() ## defaultRegistry is {.global.} in metrics module - -proc process*( - self: ptr DebugNodeRequest, waku: Waku -): Future[Result[string, string]] {.async.} = - defer: - destroyShared(self) - - case self.operation - of RETRIEVE_LISTENING_ADDRESSES: - ## returns a comma-separated string of the listen addresses - return ok(waku.node.getMultiaddresses().join(",")) - of RETRIEVE_MY_ENR: - return ok(waku.node.enr.toURI()) - of RETRIEVE_MY_PEER_ID: - return ok($waku.node.peerId()) - of RETRIEVE_METRICS: - return ok(getMetrics()) - of RETRIEVE_ONLINE_STATE: - return ok($waku.healthMonitor.onlineMonitor.amIOnline()) - of CHECK_WAKU_NOT_BLOCKED: - return ok("waku thread is not blocked") - - error "unsupported operation in DebugNodeRequest" - return err("unsupported operation in DebugNodeRequest") diff --git a/library/waku_thread_requests/requests/discovery_request.nim b/library/waku_thread_requests/requests/discovery_request.nim deleted file mode 100644 index 405483a46..000000000 --- a/library/waku_thread_requests/requests/discovery_request.nim +++ /dev/null @@ -1,151 +0,0 @@ -import std/json -import chronos, chronicles, results, strutils, libp2p/multiaddress -import - ../../../waku/factory/waku, - ../../../waku/discovery/waku_dnsdisc, - ../../../waku/discovery/waku_discv5, - ../../../waku/waku_core/peers, - ../../../waku/node/waku_node, - ../../../waku/node/kernel_api, - ../../alloc - -type DiscoveryMsgType* = enum - GET_BOOTSTRAP_NODES - UPDATE_DISCV5_BOOTSTRAP_NODES - START_DISCV5 - STOP_DISCV5 - PEER_EXCHANGE - -type DiscoveryRequest* = object - operation: DiscoveryMsgType - - ## used in GET_BOOTSTRAP_NODES - enrTreeUrl: cstring - nameDnsServer: cstring - timeoutMs: cint - - ## used in UPDATE_DISCV5_BOOTSTRAP_NODES - nodes: cstring - - ## used in PEER_EXCHANGE - numPeers: uint64 - -proc createShared( - T: type DiscoveryRequest, - op: DiscoveryMsgType, - enrTreeUrl: cstring, - nameDnsServer: cstring, - timeoutMs: cint, - nodes: cstring, - numPeers: uint64, -): ptr type T = - var ret = createShared(T) - ret[].operation = op - ret[].enrTreeUrl = enrTreeUrl.alloc() - ret[].nameDnsServer = nameDnsServer.alloc() - ret[].timeoutMs = timeoutMs - ret[].nodes = nodes.alloc() - ret[].numPeers = numPeers - return ret - -proc createRetrieveBootstrapNodesRequest*( - T: type DiscoveryRequest, - op: DiscoveryMsgType, - enrTreeUrl: cstring, - nameDnsServer: cstring, - timeoutMs: cint, -): ptr type T = - return T.createShared(op, enrTreeUrl, nameDnsServer, timeoutMs, "", 0) - -proc createUpdateBootstrapNodesRequest*( - T: type DiscoveryRequest, op: DiscoveryMsgType, nodes: cstring -): ptr type T = - return T.createShared(op, "", "", 0, nodes, 0) - -proc createDiscV5StartRequest*(T: type DiscoveryRequest): ptr type T = - return T.createShared(START_DISCV5, "", "", 0, "", 0) - -proc createDiscV5StopRequest*(T: type DiscoveryRequest): ptr type T = - return T.createShared(STOP_DISCV5, "", "", 0, "", 0) - -proc createPeerExchangeRequest*( - T: type DiscoveryRequest, numPeers: uint64 -): ptr type T = - return T.createShared(PEER_EXCHANGE, "", "", 0, "", numPeers) - -proc destroyShared(self: ptr DiscoveryRequest) = - deallocShared(self[].enrTreeUrl) - deallocShared(self[].nameDnsServer) - deallocShared(self[].nodes) - deallocShared(self) - -proc retrieveBootstrapNodes( - enrTreeUrl: string, ipDnsServer: string -): Future[Result[seq[string], string]] {.async.} = - let dnsNameServers = @[parseIpAddress(ipDnsServer)] - let discoveredPeers: seq[RemotePeerInfo] = ( - await retrieveDynamicBootstrapNodes(enrTreeUrl, dnsNameServers) - ).valueOr: - return err("failed discovering peers from DNS: " & $error) - - var multiAddresses = newSeq[string]() - - for discPeer in discoveredPeers: - for address in discPeer.addrs: - multiAddresses.add($address & "/p2p/" & $discPeer) - - return ok(multiAddresses) - -proc updateDiscv5BootstrapNodes(nodes: string, waku: ptr Waku): Result[void, string] = - waku.wakuDiscv5.updateBootstrapRecords(nodes).isOkOr: - return err("error in updateDiscv5BootstrapNodes: " & $error) - return ok() - -proc performPeerExchangeRequestTo( - numPeers: uint64, waku: ptr Waku -): Future[Result[int, string]] {.async.} = - let numPeersRecv = (await waku.node.fetchPeerExchangePeers(numPeers)).valueOr: - return err($error) - return ok(numPeersRecv) - -proc process*( - self: ptr DiscoveryRequest, waku: ptr Waku -): Future[Result[string, string]] {.async.} = - defer: - destroyShared(self) - - case self.operation - of START_DISCV5: - let res = await waku.wakuDiscv5.start() - res.isOkOr: - error "START_DISCV5 failed", error = error - return err($error) - - return ok("discv5 started correctly") - of STOP_DISCV5: - await waku.wakuDiscv5.stop() - - return ok("discv5 stopped correctly") - of GET_BOOTSTRAP_NODES: - let nodes = ( - await retrieveBootstrapNodes($self[].enrTreeUrl, $self[].nameDnsServer) - ).valueOr: - error "GET_BOOTSTRAP_NODES failed", error = error - return err($error) - - ## returns a comma-separated string of bootstrap nodes' multiaddresses - return ok(nodes.join(",")) - of UPDATE_DISCV5_BOOTSTRAP_NODES: - updateDiscv5BootstrapNodes($self[].nodes, waku).isOkOr: - error "UPDATE_DISCV5_BOOTSTRAP_NODES failed", error = error - return err($error) - - return ok("discovery request processed correctly") - of PEER_EXCHANGE: - let numValidPeers = (await performPeerExchangeRequestTo(self[].numPeers, waku)).valueOr: - error "PEER_EXCHANGE failed", error = error - return err($error) - return ok($numValidPeers) - - error "discovery request not handled" - return err("discovery request not handled") diff --git a/library/waku_thread_requests/requests/peer_manager_request.nim b/library/waku_thread_requests/requests/peer_manager_request.nim deleted file mode 100644 index cac5ca30e..000000000 --- a/library/waku_thread_requests/requests/peer_manager_request.nim +++ /dev/null @@ -1,135 +0,0 @@ -import std/[sequtils, strutils, tables] -import chronicles, chronos, results, options, json -import - ../../../waku/factory/waku, - ../../../waku/node/waku_node, - ../../alloc, - ../../../waku/node/peer_manager - -type PeerManagementMsgType* {.pure.} = enum - CONNECT_TO - GET_ALL_PEER_IDS - GET_CONNECTED_PEERS_INFO - GET_PEER_IDS_BY_PROTOCOL - DISCONNECT_PEER_BY_ID - DISCONNECT_ALL_PEERS - DIAL_PEER - DIAL_PEER_BY_ID - GET_CONNECTED_PEERS - -type PeerManagementRequest* = object - operation: PeerManagementMsgType - peerMultiAddr: cstring - dialTimeout: Duration - protocol: cstring - peerId: cstring - -type PeerInfo = object - protocols: seq[string] - addresses: seq[string] - -proc createShared*( - T: type PeerManagementRequest, - op: PeerManagementMsgType, - peerMultiAddr = "", - dialTimeout = chronos.milliseconds(0), ## arbitrary Duration as not all ops needs dialTimeout - peerId = "", - protocol = "", -): ptr type T = - var ret = createShared(T) - ret[].operation = op - ret[].peerMultiAddr = peerMultiAddr.alloc() - ret[].peerId = peerId.alloc() - ret[].protocol = protocol.alloc() - ret[].dialTimeout = dialTimeout - return ret - -proc destroyShared(self: ptr PeerManagementRequest) = - if not isNil(self[].peerMultiAddr): - deallocShared(self[].peerMultiAddr) - - if not isNil(self[].peerId): - deallocShared(self[].peerId) - - if not isNil(self[].protocol): - deallocShared(self[].protocol) - - deallocShared(self) - -proc process*( - self: ptr PeerManagementRequest, waku: Waku -): Future[Result[string, string]] {.async.} = - defer: - destroyShared(self) - - case self.operation - of CONNECT_TO: - let peers = ($self[].peerMultiAddr).split(",").mapIt(strip(it)) - await waku.node.connectToNodes(peers, source = "static") - return ok("") - of GET_ALL_PEER_IDS: - ## returns a comma-separated string of peerIDs - let peerIDs = - waku.node.peerManager.switch.peerStore.peers().mapIt($it.peerId).join(",") - return ok(peerIDs) - of GET_CONNECTED_PEERS_INFO: - ## returns a JSON string mapping peerIDs to objects with protocols and addresses - - var peersMap = initTable[string, PeerInfo]() - let peers = waku.node.peerManager.switch.peerStore.peers().filterIt( - it.connectedness == Connected - ) - - # Build a map of peer IDs to peer info objects - for peer in peers: - let peerIdStr = $peer.peerId - peersMap[peerIdStr] = - PeerInfo(protocols: peer.protocols, addresses: peer.addrs.mapIt($it)) - - # Convert the map to JSON string - let jsonObj = %*peersMap - let jsonStr = $jsonObj - return ok(jsonStr) - of GET_PEER_IDS_BY_PROTOCOL: - ## returns a comma-separated string of peerIDs that mount the given protocol - let connectedPeers = waku.node.peerManager.switch.peerStore - .peers($self[].protocol) - .filterIt(it.connectedness == Connected) - .mapIt($it.peerId) - .join(",") - return ok(connectedPeers) - of DISCONNECT_PEER_BY_ID: - let peerId = PeerId.init($self[].peerId).valueOr: - error "DISCONNECT_PEER_BY_ID failed", error = $error - return err($error) - await waku.node.peerManager.disconnectNode(peerId) - return ok("") - of DISCONNECT_ALL_PEERS: - await waku.node.peerManager.disconnectAllPeers() - return ok("") - of DIAL_PEER: - let remotePeerInfo = parsePeerInfo($self[].peerMultiAddr).valueOr: - error "DIAL_PEER failed", error = $error - return err($error) - let conn = await waku.node.peerManager.dialPeer(remotePeerInfo, $self[].protocol) - if conn.isNone(): - let msg = "failed dialing peer" - error "DIAL_PEER failed", error = msg, peerId = $remotePeerInfo.peerId - return err(msg) - of DIAL_PEER_BY_ID: - let peerId = PeerId.init($self[].peerId).valueOr: - error "DIAL_PEER_BY_ID failed", error = $error - return err($error) - let conn = await waku.node.peerManager.dialPeer(peerId, $self[].protocol) - if conn.isNone(): - let msg = "failed dialing peer" - error "DIAL_PEER_BY_ID failed", error = msg, peerId = $peerId - return err(msg) - of GET_CONNECTED_PEERS: - ## returns a comma-separated string of peerIDs - let - (inPeerIds, outPeerIds) = waku.node.peerManager.connectedPeers() - connectedPeerids = concat(inPeerIds, outPeerIds) - return ok(connectedPeerids.mapIt($it).join(",")) - - return ok("") diff --git a/library/waku_thread_requests/requests/ping_request.nim b/library/waku_thread_requests/requests/ping_request.nim deleted file mode 100644 index 716b9ed68..000000000 --- a/library/waku_thread_requests/requests/ping_request.nim +++ /dev/null @@ -1,54 +0,0 @@ -import std/[json, strutils] -import chronos, results -import libp2p/[protocols/ping, switch, multiaddress, multicodec] -import ../../../waku/[factory/waku, waku_core/peers, node/waku_node], ../../alloc - -type PingRequest* = object - peerAddr: cstring - timeout: Duration - -proc createShared*( - T: type PingRequest, peerAddr: cstring, timeout: Duration -): ptr type T = - var ret = createShared(T) - ret[].peerAddr = peerAddr.alloc() - ret[].timeout = timeout - return ret - -proc destroyShared(self: ptr PingRequest) = - deallocShared(self[].peerAddr) - deallocShared(self) - -proc process*( - self: ptr PingRequest, waku: ptr Waku -): Future[Result[string, string]] {.async.} = - defer: - destroyShared(self) - - let peerInfo = peers.parsePeerInfo(($self[].peerAddr).split(",")).valueOr: - return err("PingRequest failed to parse peer addr: " & $error) - - proc ping(): Future[Result[Duration, string]] {.async, gcsafe.} = - try: - let conn = await waku.node.switch.dial(peerInfo.peerId, peerInfo.addrs, PingCodec) - defer: - await conn.close() - - let pingRTT = await waku.node.libp2pPing.ping(conn) - if pingRTT == 0.nanos: - return err("could not ping peer: rtt-0") - return ok(pingRTT) - except CatchableError: - return err("could not ping peer: " & getCurrentExceptionMsg()) - - let pingFuture = ping() - let pingRTT: Duration = - if self[].timeout == chronos.milliseconds(0): # No timeout expected - ?(await pingFuture) - else: - let timedOut = not (await pingFuture.withTimeout(self[].timeout)) - if timedOut: - return err("ping timed out") - ?(pingFuture.read()) - - ok($(pingRTT.nanos)) diff --git a/library/waku_thread_requests/requests/protocols/filter_request.nim b/library/waku_thread_requests/requests/protocols/filter_request.nim deleted file mode 100644 index cd401d443..000000000 --- a/library/waku_thread_requests/requests/protocols/filter_request.nim +++ /dev/null @@ -1,106 +0,0 @@ -import options, std/[strutils, sequtils] -import chronicles, chronos, results -import - ../../../../waku/waku_filter_v2/client, - ../../../../waku/waku_core/message/message, - ../../../../waku/factory/waku, - ../../../../waku/waku_filter_v2/common, - ../../../../waku/waku_core/subscription/push_handler, - ../../../../waku/node/peer_manager/peer_manager, - ../../../../waku/node/waku_node, - ../../../../waku/node/kernel_api, - ../../../../waku/waku_core/topics/pubsub_topic, - ../../../../waku/waku_core/topics/content_topic, - ../../../alloc - -type FilterMsgType* = enum - SUBSCRIBE - UNSUBSCRIBE - UNSUBSCRIBE_ALL - -type FilterRequest* = object - operation: FilterMsgType - pubsubTopic: cstring - contentTopics: cstring ## comma-separated list of content-topics - filterPushEventCallback: FilterPushHandler ## handles incoming filter pushed msgs - -proc createShared*( - T: type FilterRequest, - op: FilterMsgType, - pubsubTopic: cstring = "", - contentTopics: cstring = "", - filterPushEventCallback: FilterPushHandler = nil, -): ptr type T = - var ret = createShared(T) - ret[].operation = op - ret[].pubsubTopic = pubsubTopic.alloc() - ret[].contentTopics = contentTopics.alloc() - ret[].filterPushEventCallback = filterPushEventCallback - - return ret - -proc destroyShared(self: ptr FilterRequest) = - deallocShared(self[].pubsubTopic) - deallocShared(self[].contentTopics) - deallocShared(self) - -proc process*( - self: ptr FilterRequest, waku: ptr Waku -): Future[Result[string, string]] {.async.} = - defer: - destroyShared(self) - - const FilterOpTimeout = 5.seconds - if waku.node.wakuFilterClient.isNil(): - let errorMsg = "FilterRequest waku.node.wakuFilterClient is nil" - error "fail filter process", error = errorMsg, op = $(self.operation) - return err(errorMsg) - - case self.operation - of SUBSCRIBE: - waku.node.wakuFilterClient.registerPushHandler(self.filterPushEventCallback) - - let peer = waku.node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr: - let errorMsg = - "could not find peer with WakuFilterSubscribeCodec when subscribing" - error "fail filter process", error = errorMsg, op = $(self.operation) - return err(errorMsg) - - let pubsubTopic = some(PubsubTopic($self[].pubsubTopic)) - let contentTopics = ($(self[].contentTopics)).split(",").mapIt(ContentTopic(it)) - - let subFut = waku.node.filterSubscribe(pubsubTopic, contentTopics, peer) - if not await subFut.withTimeout(FilterOpTimeout): - let errorMsg = "filter subscription timed out" - error "fail filter process", error = errorMsg, op = $(self.operation) - return err(errorMsg) - of UNSUBSCRIBE: - let peer = waku.node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr: - let errorMsg = - "could not find peer with WakuFilterSubscribeCodec when unsubscribing" - error "fail filter process", error = errorMsg, op = $(self.operation) - return err(errorMsg) - - let pubsubTopic = some(PubsubTopic($self[].pubsubTopic)) - let contentTopics = ($(self[].contentTopics)).split(",").mapIt(ContentTopic(it)) - - let subFut = waku.node.filterUnsubscribe(pubsubTopic, contentTopics, peer) - if not await subFut.withTimeout(FilterOpTimeout): - let errorMsg = "filter un-subscription timed out" - error "fail filter process", error = errorMsg, op = $(self.operation) - return err(errorMsg) - of UNSUBSCRIBE_ALL: - let peer = waku.node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr: - let errorMsg = - "could not find peer with WakuFilterSubscribeCodec when unsubscribing all" - error "fail filter process", error = errorMsg, op = $(self.operation) - return err(errorMsg) - - let unsubFut = waku.node.filterUnsubscribeAll(peer) - - if not await unsubFut.withTimeout(FilterOpTimeout): - let errorMsg = "filter un-subscription all timed out" - error "fail filter process", error = errorMsg, op = $(self.operation) - return err(errorMsg) - - return ok("") diff --git a/library/waku_thread_requests/requests/protocols/lightpush_request.nim b/library/waku_thread_requests/requests/protocols/lightpush_request.nim deleted file mode 100644 index bc3d9de2c..000000000 --- a/library/waku_thread_requests/requests/protocols/lightpush_request.nim +++ /dev/null @@ -1,109 +0,0 @@ -import options -import chronicles, chronos, results -import - ../../../../waku/waku_core/message/message, - ../../../../waku/waku_core/codecs, - ../../../../waku/factory/waku, - ../../../../waku/waku_core/message, - ../../../../waku/waku_core/time, # Timestamp - ../../../../waku/waku_core/topics/pubsub_topic, - ../../../../waku/waku_lightpush_legacy/client, - ../../../../waku/waku_lightpush_legacy/common, - ../../../../waku/node/peer_manager/peer_manager, - ../../../alloc - -type LightpushMsgType* = enum - PUBLISH - -type ThreadSafeWakuMessage* = object - payload: SharedSeq[byte] - contentTopic: cstring - meta: SharedSeq[byte] - version: uint32 - timestamp: Timestamp - ephemeral: bool - when defined(rln): - proof: SharedSeq[byte] - -type LightpushRequest* = object - operation: LightpushMsgType - pubsubTopic: cstring - message: ThreadSafeWakuMessage # only used in 'PUBLISH' requests - -proc createShared*( - T: type LightpushRequest, - op: LightpushMsgType, - pubsubTopic: cstring, - m = WakuMessage(), -): ptr type T = - var ret = createShared(T) - ret[].operation = op - ret[].pubsubTopic = pubsubTopic.alloc() - ret[].message = ThreadSafeWakuMessage( - payload: allocSharedSeq(m.payload), - contentTopic: m.contentTopic.alloc(), - meta: allocSharedSeq(m.meta), - version: m.version, - timestamp: m.timestamp, - ephemeral: m.ephemeral, - ) - when defined(rln): - ret[].message.proof = allocSharedSeq(m.proof) - - return ret - -proc destroyShared(self: ptr LightpushRequest) = - deallocSharedSeq(self[].message.payload) - deallocShared(self[].message.contentTopic) - deallocSharedSeq(self[].message.meta) - when defined(rln): - deallocSharedSeq(self[].message.proof) - - deallocShared(self) - -proc toWakuMessage(m: ThreadSafeWakuMessage): WakuMessage = - var wakuMessage = WakuMessage() - - wakuMessage.payload = m.payload.toSeq() - wakuMessage.contentTopic = $m.contentTopic - wakuMessage.meta = m.meta.toSeq() - wakuMessage.version = m.version - wakuMessage.timestamp = m.timestamp - wakuMessage.ephemeral = m.ephemeral - - when defined(rln): - wakuMessage.proof = m.proof - - return wakuMessage - -proc process*( - self: ptr LightpushRequest, waku: ptr Waku -): Future[Result[string, string]] {.async.} = - defer: - destroyShared(self) - - case self.operation - of PUBLISH: - let msg = self.message.toWakuMessage() - let pubsubTopic = $self.pubsubTopic - - if waku.node.wakuLightpushClient.isNil(): - let errorMsg = "LightpushRequest waku.node.wakuLightpushClient is nil" - error "PUBLISH failed", error = errorMsg - return err(errorMsg) - - let peerOpt = waku.node.peerManager.selectPeer(WakuLightPushCodec) - if peerOpt.isNone(): - let errorMsg = "failed to lightpublish message, no suitable remote peers" - error "PUBLISH failed", error = errorMsg - return err(errorMsg) - - let msgHashHex = ( - await waku.node.wakuLegacyLightpushClient.publish( - pubsubTopic, msg, peer = peerOpt.get() - ) - ).valueOr: - error "PUBLISH failed", error = error - return err($error) - - return ok(msgHashHex) diff --git a/library/waku_thread_requests/requests/protocols/relay_request.nim b/library/waku_thread_requests/requests/protocols/relay_request.nim deleted file mode 100644 index e110f689e..000000000 --- a/library/waku_thread_requests/requests/protocols/relay_request.nim +++ /dev/null @@ -1,168 +0,0 @@ -import std/[net, sequtils, strutils] -import chronicles, chronos, stew/byteutils, results -import - waku/waku_core/message/message, - waku/factory/[validator_signed, waku], - tools/confutils/cli_args, - waku/waku_node, - waku/waku_core/message, - waku/waku_core/time, # Timestamp - waku/waku_core/topics/pubsub_topic, - waku/waku_core/topics, - waku/waku_relay/protocol, - waku/node/peer_manager - -import - ../../../alloc - -type RelayMsgType* = enum - SUBSCRIBE - UNSUBSCRIBE - PUBLISH - NUM_CONNECTED_PEERS - LIST_CONNECTED_PEERS - ## to return the list of all connected peers to an specific pubsub topic - NUM_MESH_PEERS - LIST_MESH_PEERS - ## to return the list of only the peers that conform the mesh for a particular pubsub topic - ADD_PROTECTED_SHARD ## Protects a shard with a public key - -type ThreadSafeWakuMessage* = object - payload: SharedSeq[byte] - contentTopic: cstring - meta: SharedSeq[byte] - version: uint32 - timestamp: Timestamp - ephemeral: bool - when defined(rln): - proof: SharedSeq[byte] - -type RelayRequest* = object - operation: RelayMsgType - pubsubTopic: cstring - relayEventCallback: WakuRelayHandler # not used in 'PUBLISH' requests - message: ThreadSafeWakuMessage # only used in 'PUBLISH' requests - clusterId: cint # only used in 'ADD_PROTECTED_SHARD' requests - shardId: cint # only used in 'ADD_PROTECTED_SHARD' requests - publicKey: cstring # only used in 'ADD_PROTECTED_SHARD' requests - -proc createShared*( - T: type RelayRequest, - op: RelayMsgType, - pubsubTopic: cstring = nil, - relayEventCallback: WakuRelayHandler = nil, - m = WakuMessage(), - clusterId: cint = 0, - shardId: cint = 0, - publicKey: cstring = nil, -): ptr type T = - var ret = createShared(T) - ret[].operation = op - ret[].pubsubTopic = pubsubTopic.alloc() - ret[].clusterId = clusterId - ret[].shardId = shardId - ret[].publicKey = publicKey.alloc() - ret[].relayEventCallback = relayEventCallback - ret[].message = ThreadSafeWakuMessage( - payload: allocSharedSeq(m.payload), - contentTopic: m.contentTopic.alloc(), - meta: allocSharedSeq(m.meta), - version: m.version, - timestamp: m.timestamp, - ephemeral: m.ephemeral, - ) - when defined(rln): - ret[].message.proof = allocSharedSeq(m.proof) - - return ret - -proc destroyShared(self: ptr RelayRequest) = - deallocSharedSeq(self[].message.payload) - deallocShared(self[].message.contentTopic) - deallocSharedSeq(self[].message.meta) - when defined(rln): - deallocSharedSeq(self[].message.proof) - deallocShared(self[].pubsubTopic) - deallocShared(self[].publicKey) - deallocShared(self) - -proc toWakuMessage(m: ThreadSafeWakuMessage): WakuMessage = - var wakuMessage = WakuMessage() - - wakuMessage.payload = m.payload.toSeq() - wakuMessage.contentTopic = $m.contentTopic - wakuMessage.meta = m.meta.toSeq() - wakuMessage.version = m.version - wakuMessage.timestamp = m.timestamp - wakuMessage.ephemeral = m.ephemeral - - when defined(rln): - wakuMessage.proof = m.proof - - return wakuMessage - -proc process*( - self: ptr RelayRequest, waku: ptr Waku -): Future[Result[string, string]] {.async.} = - defer: - destroyShared(self) - - if waku.node.wakuRelay.isNil(): - return err("Operation not supported without Waku Relay enabled.") - - case self.operation - of SUBSCRIBE: - waku.node.subscribe( - (kind: SubscriptionKind.PubsubSub, topic: $self.pubsubTopic), - handler = self.relayEventCallback, - ).isOkOr: - error "SUBSCRIBE failed", error - return err($error) - of UNSUBSCRIBE: - waku.node.unsubscribe((kind: SubscriptionKind.PubsubSub, topic: $self.pubsubTopic)).isOkOr: - error "UNSUBSCRIBE failed", error - return err($error) - of PUBLISH: - let msg = self.message.toWakuMessage() - let pubsubTopic = $self.pubsubTopic - - (await waku.node.wakuRelay.publish(pubsubTopic, msg)).isOkOr: - error "PUBLISH failed", error - return err($error) - - let msgHash = computeMessageHash(pubSubTopic, msg).to0xHex - return ok(msgHash) - of NUM_CONNECTED_PEERS: - let numConnPeers = waku.node.wakuRelay.getNumConnectedPeers($self.pubsubTopic).valueOr: - error "NUM_CONNECTED_PEERS failed", error - return err($error) - return ok($numConnPeers) - of LIST_CONNECTED_PEERS: - let connPeers = waku.node.wakuRelay.getConnectedPeers($self.pubsubTopic).valueOr: - error "LIST_CONNECTED_PEERS failed", error = error - return err($error) - ## returns a comma-separated string of peerIDs - return ok(connPeers.mapIt($it).join(",")) - of NUM_MESH_PEERS: - let numPeersInMesh = waku.node.wakuRelay.getNumPeersInMesh($self.pubsubTopic).valueOr: - error "NUM_MESH_PEERS failed", error = error - return err($error) - return ok($numPeersInMesh) - of LIST_MESH_PEERS: - let meshPeers = waku.node.wakuRelay.getPeersInMesh($self.pubsubTopic).valueOr: - error "LIST_MESH_PEERS failed", error = error - return err($error) - ## returns a comma-separated string of peerIDs - return ok(meshPeers.mapIt($it).join(",")) - of ADD_PROTECTED_SHARD: - try: - let relayShard = - RelayShard(clusterId: uint16(self.clusterId), shardId: uint16(self.shardId)) - let protectedShard = - ProtectedShard.parseCmdArg($relayShard & ":" & $self.publicKey) - waku.node.wakuRelay.addSignedShardsValidator( - @[protectedShard], uint16(self.clusterId) - ) - except ValueError: - return err(getCurrentExceptionMsg()) - return ok("") diff --git a/library/waku_thread_requests/waku_thread_request.nim b/library/waku_thread_requests/waku_thread_request.nim deleted file mode 100644 index 50462fba7..000000000 --- a/library/waku_thread_requests/waku_thread_request.nim +++ /dev/null @@ -1,104 +0,0 @@ -## This file contains the base message request type that will be handled. -## The requests are created by the main thread and processed by -## the Waku Thread. - -import std/json, results -import chronos, chronos/threadsync -import - ../../waku/factory/waku, - ../ffi_types, - ./requests/node_lifecycle_request, - ./requests/peer_manager_request, - ./requests/protocols/relay_request, - ./requests/protocols/store_request, - ./requests/protocols/lightpush_request, - ./requests/protocols/filter_request, - ./requests/debug_node_request, - ./requests/discovery_request, - ./requests/ping_request - -type RequestType* {.pure.} = enum - LIFECYCLE - PEER_MANAGER - PING - RELAY - STORE - DEBUG - DISCOVERY - LIGHTPUSH - FILTER - -type WakuThreadRequest* = object - reqType: RequestType - reqContent: pointer - callback: WakuCallBack - userData: pointer - -proc createShared*( - T: type WakuThreadRequest, - reqType: RequestType, - reqContent: pointer, - callback: WakuCallBack, - userData: pointer, -): ptr type T = - var ret = createShared(T) - ret[].reqType = reqType - ret[].reqContent = reqContent - ret[].callback = callback - ret[].userData = userData - return ret - -proc handleRes[T: string | void]( - res: Result[T, string], request: ptr WakuThreadRequest -) = - ## Handles the Result responses, which can either be Result[string, string] or - ## Result[void, string]. - - defer: - deallocShared(request) - - if res.isErr(): - foreignThreadGc: - let msg = "libwaku error: handleRes fireSyncRes error: " & $res.error - request[].callback( - RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), request[].userData - ) - return - - foreignThreadGc: - var msg: cstring = "" - when T is string: - msg = res.get().cstring() - request[].callback( - RET_OK, unsafeAddr msg[0], cast[csize_t](len(msg)), request[].userData - ) - return - -proc process*( - T: type WakuThreadRequest, request: ptr WakuThreadRequest, waku: ptr Waku -) {.async.} = - let retFut = - case request[].reqType - of LIFECYCLE: - cast[ptr NodeLifecycleRequest](request[].reqContent).process(waku) - of PEER_MANAGER: - cast[ptr PeerManagementRequest](request[].reqContent).process(waku[]) - of PING: - cast[ptr PingRequest](request[].reqContent).process(waku) - of RELAY: - cast[ptr RelayRequest](request[].reqContent).process(waku) - of STORE: - cast[ptr StoreRequest](request[].reqContent).process(waku) - of DEBUG: - cast[ptr DebugNodeRequest](request[].reqContent).process(waku[]) - of DISCOVERY: - cast[ptr DiscoveryRequest](request[].reqContent).process(waku) - of LIGHTPUSH: - cast[ptr LightpushRequest](request[].reqContent).process(waku) - of FILTER: - cast[ptr FilterRequest](request[].reqContent).process(waku) - - handleRes(await retFut, request) - -proc `$`*(self: WakuThreadRequest): string = - return $self.reqType diff --git a/nix/atlas.nix b/nix/atlas.nix deleted file mode 100644 index 43336e07a..000000000 --- a/nix/atlas.nix +++ /dev/null @@ -1,12 +0,0 @@ -{ pkgs ? import { } }: - -let - tools = pkgs.callPackage ./tools.nix {}; - sourceFile = ../vendor/nimbus-build-system/vendor/Nim/koch.nim; -in pkgs.fetchFromGitHub { - owner = "nim-lang"; - repo = "atlas"; - rev = tools.findKeyValue "^ +AtlasStableCommit = \"([a-f0-9]+)\"$" sourceFile; - # WARNING: Requires manual updates when Nim compiler version changes. - hash = "sha256-G1TZdgbRPSgxXZ3VsBP2+XFCLHXVb3an65MuQx67o/k="; -} \ No newline at end of file diff --git a/nix/checksums.nix b/nix/checksums.nix index d79345d24..c9c9f3d45 100644 --- a/nix/checksums.nix +++ b/nix/checksums.nix @@ -6,7 +6,7 @@ let in pkgs.fetchFromGitHub { owner = "nim-lang"; repo = "checksums"; - rev = tools.findKeyValue "^ +ChecksumsStableCommit = \"([a-f0-9]+)\"$" sourceFile; + rev = tools.findKeyValue "^ +ChecksumsStableCommit = \"([a-f0-9]+)\".*$" sourceFile; # WARNING: Requires manual updates when Nim compiler version changes. - hash = "sha256-Bm5iJoT2kAvcTexiLMFBa9oU5gf7d4rWjo3OiN7obWQ="; + hash = "sha256-JZhWqn4SrAgNw/HLzBK0rrj3WzvJ3Tv1nuDMn83KoYY="; } diff --git a/nix/default.nix b/nix/default.nix index 29eec844d..7df58df60 100644 --- a/nix/default.nix +++ b/nix/default.nix @@ -1,17 +1,15 @@ { - config ? {}, - pkgs ? import { }, + pkgs, src ? ../., targets ? ["libwaku-android-arm64"], - verbosity ? 2, + verbosity ? 1, useSystemNim ? true, quickAndDirty ? true, stableSystems ? [ "x86_64-linux" "aarch64-linux" ], - androidArch, - abidir, - zerokitPkg, + abidir ? null, + zerokitRln, }: assert pkgs.lib.assertMsg ((src.submodules or true) == true) @@ -20,91 +18,126 @@ assert pkgs.lib.assertMsg ((src.submodules or true) == true) let inherit (pkgs) stdenv lib writeScriptBin callPackage; - revision = lib.substring 0 8 (src.rev or "dirty"); + androidManifest = ""; -in stdenv.mkDerivation rec { + tools = pkgs.callPackage ./tools.nix {}; + version = tools.findKeyValue "^version = \"([a-f0-9.-]+)\"$" ../waku.nimble; + revision = lib.substring 0 8 (src.rev or src.dirtyRev or "00000000"); + copyLibwaku = lib.elem "libwaku" targets; + copyLiblogosdelivery = lib.elem "liblogosdelivery" targets; + copyWakunode2 = lib.elem "wakunode2" targets; + hasKnownInstallTarget = copyLibwaku || copyLiblogosdelivery || copyWakunode2; - pname = "nwaku"; - - version = "1.0.0-${revision}"; +in stdenv.mkDerivation { + pname = "logos-messaging-nim"; + version = "${version}-${revision}"; inherit src; + # Runtime dependencies buildInputs = with pkgs; [ - openssl - gmp - zip + openssl gmp zip ]; # Dependencies that should only exist in the build environment. nativeBuildInputs = let # Fix for Nim compiler calling 'git rev-parse' and 'lsb_release'. fakeGit = writeScriptBin "git" "echo ${version}"; - # Fix for the zerokit package that is built with cargo/rustup/cross. - fakeCargo = writeScriptBin "cargo" "echo ${version}"; - # Fix for the zerokit package that is built with cargo/rustup/cross. - fakeRustup = writeScriptBin "rustup" "echo ${version}"; - # Fix for the zerokit package that is built with cargo/rustup/cross. - fakeCross = writeScriptBin "cross" "echo ${version}"; - in - with pkgs; [ - cmake - which - lsb-release - zerokitPkg - nim-unwrapped-2_0 - fakeGit - fakeCargo - fakeRustup - fakeCross + in with pkgs; [ + cmake which zerokitRln nim-unwrapped-2_2 fakeGit + ] ++ lib.optionals stdenv.isDarwin [ + pkgs.darwin.cctools gcc # Necessary for libbacktrace ]; # Environment variables required for Android builds - ANDROID_SDK_ROOT="${pkgs.androidPkgs.sdk}"; - ANDROID_NDK_HOME="${pkgs.androidPkgs.ndk}"; + ANDROID_SDK_ROOT = "${pkgs.androidPkgs.sdk}"; + ANDROID_NDK_HOME = "${pkgs.androidPkgs.ndk}"; NIMFLAGS = "-d:disableMarchNative -d:git_revision_override=${revision}"; XDG_CACHE_HOME = "/tmp"; - androidManifest = ""; makeFlags = targets ++ [ "V=${toString verbosity}" "QUICK_AND_DIRTY_COMPILER=${if quickAndDirty then "1" else "0"}" "QUICK_AND_DIRTY_NIMBLE=${if quickAndDirty then "1" else "0"}" "USE_SYSTEM_NIM=${if useSystemNim then "1" else "0"}" + "LIBRLN_FILE=${zerokitRln}/lib/librln.${if abidir != null then "so" else "a"}" + "POSTGRES=1" ]; configurePhase = '' patchShebangs . vendor/nimbus-build-system > /dev/null + + # build_nim.sh guards "rm -rf dist/checksums" with NIX_BUILD_TOP != "/build", + # but on macOS the nix sandbox uses /private/tmp/... so the check fails and + # dist/checksums (provided via preBuild) gets deleted. Fix the check to skip + # the removal whenever NIX_BUILD_TOP is set (i.e. any nix build). + substituteInPlace vendor/nimbus-build-system/scripts/build_nim.sh \ + --replace 'if [[ "''${NIX_BUILD_TOP}" != "/build" ]]; then' \ + 'if [[ -z "''${NIX_BUILD_TOP}" ]]; then' + make nimbus-build-system-paths make nimbus-build-system-nimble-dir ''; - preBuild = '' - ln -s waku.nimble waku.nims + # For the Nim v2.2.4 built with NBS we added sat and zippy + preBuild = lib.optionalString (!useSystemNim) '' pushd vendor/nimbus-build-system/vendor/Nim mkdir dist - cp -r ${callPackage ./nimble.nix {}} dist/nimble - chmod 777 -R dist/nimble - mkdir -p dist/nimble/dist - cp -r ${callPackage ./checksums.nix {}} dist/checksums # need both - cp -r ${callPackage ./checksums.nix {}} dist/nimble/dist/checksums - cp -r ${callPackage ./atlas.nix {}} dist/atlas - chmod 777 -R dist/atlas - mkdir dist/atlas/dist - cp -r ${callPackage ./sat.nix {}} dist/nimble/dist/sat - cp -r ${callPackage ./sat.nix {}} dist/atlas/dist/sat - cp -r ${callPackage ./csources.nix {}} csources_v2 + mkdir -p dist/nimble/vendor/sat + mkdir -p dist/nimble/vendor/checksums + mkdir -p dist/nimble/vendor/zippy + + cp -r ${callPackage ./nimble.nix {}}/. dist/nimble + cp -r ${callPackage ./checksums.nix {}}/. dist/checksums + cp -r ${callPackage ./csources.nix {}}/. csources_v2 + cp -r ${callPackage ./sat.nix {}}/. dist/nimble/vendor/sat + cp -r ${callPackage ./checksums.nix {}}/. dist/nimble/vendor/checksums + cp -r ${callPackage ./zippy.nix {}}/. dist/nimble/vendor/zippy chmod 777 -R dist/nimble csources_v2 popd - mkdir -p vendor/zerokit/target/${androidArch}/release - cp ${zerokitPkg}/librln.so vendor/zerokit/target/${androidArch}/release/ ''; - installPhase = '' + installPhase = if abidir != null then '' mkdir -p $out/jni cp -r ./build/android/${abidir}/* $out/jni/ echo '${androidManifest}' > $out/jni/AndroidManifest.xml cd $out && zip -r libwaku.aar * + '' else '' + mkdir -p $out/bin $out/include + + # Copy artifacts from build directory (created by Make during buildPhase) + # Note: build/ is in the source tree, not result/ (which is a post-build symlink) + if [ -d build ]; then + ${lib.optionalString copyLibwaku '' + cp build/libwaku.{so,dylib,dll,a,lib} $out/bin/ 2>/dev/null || true + ''} + + ${lib.optionalString copyLiblogosdelivery '' + cp build/liblogosdelivery.{so,dylib,dll,a,lib} $out/bin/ 2>/dev/null || true + ''} + + ${lib.optionalString copyWakunode2 '' + cp build/wakunode2 $out/bin/ 2>/dev/null || true + ''} + + ${lib.optionalString (!hasKnownInstallTarget) '' + cp build/lib*.{so,dylib,dll,a,lib} $out/bin/ 2>/dev/null || true + ''} + fi + + # Copy header files + ${lib.optionalString copyLibwaku '' + cp library/libwaku.h $out/include/ 2>/dev/null || true + ''} + + ${lib.optionalString copyLiblogosdelivery '' + cp liblogosdelivery/liblogosdelivery.h $out/include/ 2>/dev/null || true + ''} + + ${lib.optionalString (!hasKnownInstallTarget) '' + cp library/libwaku.h $out/include/ 2>/dev/null || true + cp liblogosdelivery/liblogosdelivery.h $out/include/ 2>/dev/null || true + ''} ''; meta = with pkgs.lib; { diff --git a/nix/nimble.nix b/nix/nimble.nix index 5bd7b0f32..337ecd672 100644 --- a/nix/nimble.nix +++ b/nix/nimble.nix @@ -6,7 +6,7 @@ let in pkgs.fetchFromGitHub { owner = "nim-lang"; repo = "nimble"; - rev = tools.findKeyValue "^ +NimbleStableCommit = \"([a-f0-9]+)\".+" sourceFile; + rev = tools.findKeyValue "^ +NimbleStableCommit = \"([a-f0-9]+)\".*$" sourceFile; # WARNING: Requires manual updates when Nim compiler version changes. - hash = "sha256-MVHf19UbOWk8Zba2scj06PxdYYOJA6OXrVyDQ9Ku6Us="; -} \ No newline at end of file + hash = "sha256-8iutVgNzDtttZ7V+7S11KfLEuwhKA9TsgS51mlUI08k="; +} diff --git a/nix/pkgs/android-sdk/compose.nix b/nix/pkgs/android-sdk/compose.nix index c73aaee43..9a8536ddb 100644 --- a/nix/pkgs/android-sdk/compose.nix +++ b/nix/pkgs/android-sdk/compose.nix @@ -5,19 +5,16 @@ { androidenv, lib, stdenv }: -assert lib.assertMsg (stdenv.system != "aarch64-darwin") - "aarch64-darwin not supported for Android SDK. Use: NIXPKGS_SYSTEM_OVERRIDE=x86_64-darwin"; - # The "android-sdk-license" license is accepted # by setting android_sdk.accept_license = true. androidenv.composeAndroidPackages { cmdLineToolsVersion = "9.0"; toolsVersion = "26.1.1"; - platformToolsVersion = "33.0.3"; + platformToolsVersion = "34.0.5"; buildToolsVersions = [ "34.0.0" ]; platformVersions = [ "34" ]; cmakeVersions = [ "3.22.1" ]; - ndkVersion = "25.2.9519653"; + ndkVersion = "27.2.12479018"; includeNDK = true; includeExtras = [ "extras;android;m2repository" diff --git a/nix/sat.nix b/nix/sat.nix index 31f264468..92db58a2e 100644 --- a/nix/sat.nix +++ b/nix/sat.nix @@ -6,7 +6,8 @@ let in pkgs.fetchFromGitHub { owner = "nim-lang"; repo = "sat"; - rev = tools.findKeyValue "^ +SatStableCommit = \"([a-f0-9]+)\"$" sourceFile; + rev = tools.findKeyValue "^ +SatStableCommit = \"([a-f0-9]+)\".*$" sourceFile; + # WARNING: Requires manual updates when Nim compiler version changes. # WARNING: Requires manual updates when Nim compiler version changes. hash = "sha256-JFrrSV+mehG0gP7NiQ8hYthL0cjh44HNbXfuxQNhq7c="; -} \ No newline at end of file +} diff --git a/nix/shell.nix b/nix/shell.nix index 0db73dc25..3b83ac93d 100644 --- a/nix/shell.nix +++ b/nix/shell.nix @@ -1,23 +1,19 @@ -{ - pkgs ? import { }, -}: -let - optionalDarwinDeps = pkgs.lib.optionals pkgs.stdenv.isDarwin [ - pkgs.libiconv - pkgs.darwin.apple_sdk.frameworks.Security - ]; -in +{ pkgs }: + pkgs.mkShell { inputsFrom = [ pkgs.androidShell - ] ++ optionalDarwinDeps; + ] ++ pkgs.lib.optionals pkgs.stdenv.isDarwin [ + pkgs.libiconv + pkgs.darwin.apple_sdk.frameworks.Security + ]; buildInputs = with pkgs; [ git cargo rustup + rustc cmake - nim-unwrapped-2_0 + nim-unwrapped-2_2 ]; - } diff --git a/nix/submodules.json b/nix/submodules.json new file mode 100644 index 000000000..2f94e5f2b --- /dev/null +++ b/nix/submodules.json @@ -0,0 +1,247 @@ +[ + { + "path": "vendor/db_connector", + "url": "https://github.com/nim-lang/db_connector.git", + "rev": "74aef399e5c232f95c9fc5c987cebac846f09d62" + } + , + { + "path": "vendor/dnsclient.nim", + "url": "https://github.com/ba0f3/dnsclient.nim.git", + "rev": "23214235d4784d24aceed99bbfe153379ea557c8" + } + , + { + "path": "vendor/nim-bearssl", + "url": "https://github.com/status-im/nim-bearssl.git", + "rev": "11e798b62b8e6beabe958e048e9e24c7e0f9ee63" + } + , + { + "path": "vendor/nim-chronicles", + "url": "https://github.com/status-im/nim-chronicles.git", + "rev": "54f5b726025e8c7385e3a6529d3aa27454c6e6ff" + } + , + { + "path": "vendor/nim-chronos", + "url": "https://github.com/status-im/nim-chronos.git", + "rev": "85af4db764ecd3573c4704139560df3943216cf1" + } + , + { + "path": "vendor/nim-confutils", + "url": "https://github.com/status-im/nim-confutils.git", + "rev": "e214b3992a31acece6a9aada7d0a1ad37c928f3b" + } + , + { + "path": "vendor/nim-dnsdisc", + "url": "https://github.com/status-im/nim-dnsdisc.git", + "rev": "b71d029f4da4ec56974d54c04518bada00e1b623" + } + , + { + "path": "vendor/nim-eth", + "url": "https://github.com/status-im/nim-eth.git", + "rev": "d9135e6c3c5d6d819afdfb566aa8d958756b73a8" + } + , + { + "path": "vendor/nim-faststreams", + "url": "https://github.com/status-im/nim-faststreams.git", + "rev": "c3ac3f639ed1d62f59d3077d376a29c63ac9750c" + } + , + { + "path": "vendor/nim-ffi", + "url": "https://github.com/logos-messaging/nim-ffi", + "rev": "06111de155253b34e47ed2aaed1d61d08d62cc1b" + } + , + { + "path": "vendor/nim-http-utils", + "url": "https://github.com/status-im/nim-http-utils.git", + "rev": "79cbab1460f4c0cdde2084589d017c43a3d7b4f1" + } + , + { + "path": "vendor/nim-json-rpc", + "url": "https://github.com/status-im/nim-json-rpc.git", + "rev": "9665c265035f49f5ff94bbffdeadde68e19d6221" + } + , + { + "path": "vendor/nim-json-serialization", + "url": "https://github.com/status-im/nim-json-serialization.git", + "rev": "b65fd6a7e64c864dabe40e7dfd6c7d07db0014ac" + } + , + { + "path": "vendor/nim-jwt", + "url": "https://github.com/vacp2p/nim-jwt.git", + "rev": "18f8378de52b241f321c1f9ea905456e89b95c6f" + } + , + { + "path": "vendor/nim-libbacktrace", + "url": "https://github.com/status-im/nim-libbacktrace.git", + "rev": "d8bd4ce5c46bb6d2f984f6b3f3d7380897d95ecb" + } + , + { + "path": "vendor/nim-libp2p", + "url": "https://github.com/vacp2p/nim-libp2p.git", + "rev": "eb7e6ff89889e41b57515f891ba82986c54809fb" + } + , + { + "path": "vendor/nim-lsquic", + "url": "https://github.com/vacp2p/nim-lsquic", + "rev": "f3fe33462601ea34eb2e8e9c357c92e61f8d121b" + } + , + { + "path": "vendor/nim-metrics", + "url": "https://github.com/status-im/nim-metrics.git", + "rev": "ecf64c6078d1276d3b7d9b3d931fbdb70004db11" + } + , + { + "path": "vendor/nim-minilru", + "url": "https://github.com/status-im/nim-minilru.git", + "rev": "0c4b2bce959591f0a862e9b541ba43c6d0cf3476" + } + , + { + "path": "vendor/nim-nat-traversal", + "url": "https://github.com/status-im/nim-nat-traversal.git", + "rev": "860e18c37667b5dd005b94c63264560c35d88004" + } + , + { + "path": "vendor/nim-presto", + "url": "https://github.com/status-im/nim-presto.git", + "rev": "92b1c7ff141e6920e1f8a98a14c35c1fa098e3be" + } + , + { + "path": "vendor/nim-regex", + "url": "https://github.com/nitely/nim-regex.git", + "rev": "4593305ed1e49731fc75af1dc572dd2559aad19c" + } + , + { + "path": "vendor/nim-results", + "url": "https://github.com/arnetheduck/nim-results.git", + "rev": "df8113dda4c2d74d460a8fa98252b0b771bf1f27" + } + , + { + "path": "vendor/nim-secp256k1", + "url": "https://github.com/status-im/nim-secp256k1.git", + "rev": "9dd3df62124aae79d564da636bb22627c53c7676" + } + , + { + "path": "vendor/nim-serialization", + "url": "https://github.com/status-im/nim-serialization.git", + "rev": "6f525d5447d97256750ca7856faead03e562ed20" + } + , + { + "path": "vendor/nim-sqlite3-abi", + "url": "https://github.com/arnetheduck/nim-sqlite3-abi.git", + "rev": "bdf01cf4236fb40788f0733466cdf6708783cbac" + } + , + { + "path": "vendor/nim-stew", + "url": "https://github.com/status-im/nim-stew.git", + "rev": "e5740014961438610d336cd81706582dbf2c96f0" + } + , + { + "path": "vendor/nim-stint", + "url": "https://github.com/status-im/nim-stint.git", + "rev": "470b7892561b5179ab20bd389a69217d6213fe58" + } + , + { + "path": "vendor/nim-taskpools", + "url": "https://github.com/status-im/nim-taskpools.git", + "rev": "9e8ccc754631ac55ac2fd495e167e74e86293edb" + } + , + { + "path": "vendor/nim-testutils", + "url": "https://github.com/status-im/nim-testutils.git", + "rev": "94d68e796c045d5b37cabc6be32d7bfa168f8857" + } + , + { + "path": "vendor/nim-toml-serialization", + "url": "https://github.com/status-im/nim-toml-serialization.git", + "rev": "fea85b27f0badcf617033ca1bc05444b5fd8aa7a" + } + , + { + "path": "vendor/nim-unicodedb", + "url": "https://github.com/nitely/nim-unicodedb.git", + "rev": "66f2458710dc641dd4640368f9483c8a0ec70561" + } + , + { + "path": "vendor/nim-unittest2", + "url": "https://github.com/status-im/nim-unittest2.git", + "rev": "8b51e99b4a57fcfb31689230e75595f024543024" + } + , + { + "path": "vendor/nim-web3", + "url": "https://github.com/status-im/nim-web3.git", + "rev": "81ee8ce479d86acb73be7c4f365328e238d9b4a3" + } + , + { + "path": "vendor/nim-websock", + "url": "https://github.com/status-im/nim-websock.git", + "rev": "ebe308a79a7b440a11dfbe74f352be86a3883508" + } + , + { + "path": "vendor/nim-zlib", + "url": "https://github.com/status-im/nim-zlib.git", + "rev": "daa8723fd32299d4ca621c837430c29a5a11e19a" + } + , + { + "path": "vendor/nimbus-build-system", + "url": "https://github.com/status-im/nimbus-build-system.git", + "rev": "e6c2c9da39c2d368d9cf420ac22692e99715d22c" + } + , + { + "path": "vendor/nimcrypto", + "url": "https://github.com/cheatfate/nimcrypto.git", + "rev": "721fb99ee099b632eb86dfad1f0d96ee87583774" + } + , + { + "path": "vendor/nph", + "url": "https://github.com/arnetheduck/nph.git", + "rev": "c6e03162dc2820d3088660f644818d7040e95791" + } + , + { + "path": "vendor/waku-rlnv2-contract", + "url": "https://github.com/logos-messaging/waku-rlnv2-contract.git", + "rev": "8a338f354481e8a3f3d64a72e38fad4c62e32dcd" + } + , + { + "path": "vendor/zerokit", + "url": "https://github.com/vacp2p/zerokit.git", + "rev": "70c79fbc989d4f87d9352b2f4bddcb60ebe55b19" + } +] diff --git a/nix/zippy.nix b/nix/zippy.nix new file mode 100644 index 000000000..ec59dfc07 --- /dev/null +++ b/nix/zippy.nix @@ -0,0 +1,9 @@ +{ pkgs }: + +pkgs.fetchFromGitHub { + owner = "guzba"; + repo = "zippy"; + rev = "a99f6a7d8a8e3e0213b3cad0daf0ea974bf58e3f"; + # WARNING: Requires manual updates when Nim compiler version changes. + hash = "sha256-e2ma2Oyp0dlNx8pJsdZl5o5KnaoAX87tqfY0RLG3DZs="; +} \ No newline at end of file diff --git a/scripts/build_rln.sh b/scripts/build_rln.sh index 5e1b0caa5..b36ebe807 100755 --- a/scripts/build_rln.sh +++ b/scripts/build_rln.sh @@ -1,7 +1,8 @@ #!/usr/bin/env bash -# This script is used to build the rln library for the current platform, or download it from the -# release page if it is available. +# This script is used to build the rln library for the current platform. +# Previously downloaded prebuilt binaries, but due to compatibility issues +# we now always build from source. set -e @@ -14,41 +15,26 @@ output_filename=$3 [[ -z "${rln_version}" ]] && { echo "No rln version specified"; exit 1; } [[ -z "${output_filename}" ]] && { echo "No output filename specified"; exit 1; } -# Get the host triplet -host_triplet=$(rustc --version --verbose | awk '/host:/{print $2}') +echo "Building RLN library from source (version ${rln_version})..." -tarball="${host_triplet}" +# Check if submodule version = version in Makefile +cargo metadata --format-version=1 --no-deps --manifest-path "${build_dir}/rln/Cargo.toml" -tarball+="-rln.tar.gz" - -# Download the prebuilt rln library if it is available -if curl --silent --fail-with-body -L \ - "https://github.com/vacp2p/zerokit/releases/download/$rln_version/$tarball" \ - -o "${tarball}"; -then - echo "Downloaded ${tarball}" - tar -xzf "${tarball}" - mv "release/librln.a" "${output_filename}" - rm -rf "${tarball}" release +detected_OS=$(uname -s) +if [[ "$detected_OS" == MINGW* || "$detected_OS" == MSYS* ]]; then + submodule_version=$(cargo metadata --format-version=1 --no-deps --manifest-path "${build_dir}/rln/Cargo.toml" | sed -n 's/.*"name":"rln","version":"\([^"]*\)".*/\1/p') else - echo "Failed to download ${tarball}" - # Build rln instead - # first, check if submodule version = version in Makefile - cargo metadata --format-version=1 --no-deps --manifest-path "${build_dir}/rln/Cargo.toml" - - detected_OS=$(uname -s) - if [[ "$detected_OS" == MINGW* || "$detected_OS" == MSYS* ]]; then - submodule_version=$(cargo metadata --format-version=1 --no-deps --manifest-path "${build_dir}/rln/Cargo.toml" | sed -n 's/.*"name":"rln","version":"\([^"]*\)".*/\1/p') - else - submodule_version=$(cargo metadata --format-version=1 --no-deps --manifest-path "${build_dir}/rln/Cargo.toml" | jq -r '.packages[] | select(.name == "rln") | .version') - fi - - if [[ "v${submodule_version}" != "${rln_version}" ]]; then - echo "Submodule version (v${submodule_version}) does not match version in Makefile (${rln_version})" - echo "Please update the submodule to ${rln_version}" - exit 1 - fi - # if submodule version = version in Makefile, build rln - cargo build --release -p rln --manifest-path "${build_dir}/rln/Cargo.toml" - cp "${build_dir}/target/release/librln.a" "${output_filename}" + submodule_version=$(cargo metadata --format-version=1 --no-deps --manifest-path "${build_dir}/rln/Cargo.toml" | jq -r '.packages[] | select(.name == "rln") | .version') fi + +if [[ "v${submodule_version}" != "${rln_version}" ]]; then + echo "Submodule version (v${submodule_version}) does not match version in Makefile (${rln_version})" + echo "Please update the submodule to ${rln_version}" + exit 1 +fi + +# Build rln from source +cargo build --release -p rln --manifest-path "${build_dir}/rln/Cargo.toml" +cp "${build_dir}/target/release/librln.a" "${output_filename}" + +echo "Successfully built ${output_filename}" diff --git a/scripts/build_rln_android.sh b/scripts/build_rln_android.sh index 93a8c47ff..15b81ce9c 100755 --- a/scripts/build_rln_android.sh +++ b/scripts/build_rln_android.sh @@ -25,4 +25,3 @@ cargo clean cross rustc --release --lib --target=${android_arch} --crate-type=cdylib cp ../target/${android_arch}/release/librln.so ${output_dir}/. popd - diff --git a/scripts/generate_nix_submodules.sh b/scripts/generate_nix_submodules.sh new file mode 100755 index 000000000..51073294c --- /dev/null +++ b/scripts/generate_nix_submodules.sh @@ -0,0 +1,82 @@ +#!/usr/bin/env bash + +# Generates nix/submodules.json from .gitmodules and git ls-tree. +# This allows Nix to fetch all git submodules without requiring +# locally initialized submodules or the '?submodules=1' URI flag. +# +# Usage: ./scripts/generate_nix_submodules.sh +# +# Run this script after: +# - Adding/removing submodules +# - Updating submodule commits (e.g. after 'make update') +# - Any change to .gitmodules +# +# Compatible with macOS bash 3.x (no associative arrays). + +set -euo pipefail + +REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +OUTPUT="${REPO_ROOT}/nix/submodules.json" + +cd "$REPO_ROOT" + +TMP_URLS=$(mktemp) +TMP_REVS=$(mktemp) +trap 'rm -f "$TMP_URLS" "$TMP_REVS"' EXIT + +# Parse .gitmodules: extract (path, url) pairs +current_path="" +while IFS= read -r line; do + case "$line" in + *"path = "*) + current_path="${line#*path = }" + ;; + *"url = "*) + if [ -n "$current_path" ]; then + url="${line#*url = }" + url="${url%/}" + printf '%s\t%s\n' "$current_path" "$url" >> "$TMP_URLS" + current_path="" + fi + ;; + esac +done < .gitmodules + +# Get pinned commit hashes from git tree +git ls-tree HEAD vendor/ | while IFS= read -r tree_line; do + mode=$(echo "$tree_line" | awk '{print $1}') + type=$(echo "$tree_line" | awk '{print $2}') + hash=$(echo "$tree_line" | awk '{print $3}') + path=$(echo "$tree_line" | awk '{print $4}') + if [ "$type" = "commit" ]; then + path="${path%/}" + printf '%s\t%s\n' "$path" "$hash" >> "$TMP_REVS" + fi +done + +# Generate JSON by joining urls and revs on path +printf '[\n' > "$OUTPUT" +first=true + +sort "$TMP_URLS" | while IFS="$(printf '\t')" read -r path url; do + rev=$(grep "^${path} " "$TMP_REVS" | cut -f2 || true) + + if [ -z "$rev" ]; then + echo "WARNING: No commit hash found for submodule '$path', skipping" >&2 + continue + fi + + if [ "$first" = true ]; then + first=false + else + printf ' ,\n' >> "$OUTPUT" + fi + + printf ' {\n "path": "%s",\n "url": "%s",\n "rev": "%s"\n }\n' \ + "$path" "$url" "$rev" >> "$OUTPUT" +done + +printf ']\n' >> "$OUTPUT" + +count=$(grep -c '"path"' "$OUTPUT" || echo 0) +echo "Generated $OUTPUT with $count submodule entries" diff --git a/scripts/install_anvil.sh b/scripts/install_anvil.sh index 1bf4bd7b1..c573ac31c 100755 --- a/scripts/install_anvil.sh +++ b/scripts/install_anvil.sh @@ -2,14 +2,51 @@ # Install Anvil -if ! command -v anvil &> /dev/null; then +REQUIRED_FOUNDRY_VERSION="$1" + +if command -v anvil &> /dev/null; then + # Foundry is already installed; check the current version. + CURRENT_FOUNDRY_VERSION=$(anvil --version 2>/dev/null | awk '{print $2}') + + if [ -n "$CURRENT_FOUNDRY_VERSION" ]; then + # Compare CURRENT_FOUNDRY_VERSION < REQUIRED_FOUNDRY_VERSION using sort -V + lower_version=$(printf '%s\n%s\n' "$CURRENT_FOUNDRY_VERSION" "$REQUIRED_FOUNDRY_VERSION" | sort -V | head -n1) + + if [ "$lower_version" != "$REQUIRED_FOUNDRY_VERSION" ]; then + echo "Anvil is already installed with version $CURRENT_FOUNDRY_VERSION, which is older than the required $REQUIRED_FOUNDRY_VERSION. Please update Foundry manually if needed." + fi + fi +else BASE_DIR="${XDG_CONFIG_HOME:-$HOME}" FOUNDRY_DIR="${FOUNDRY_DIR:-"$BASE_DIR/.foundry"}" FOUNDRY_BIN_DIR="$FOUNDRY_DIR/bin" + echo "Installing Foundry..." curl -L https://foundry.paradigm.xyz | bash - # Extract the source path from the download result - echo "foundryup_path: $FOUNDRY_BIN_DIR" - # run foundryup - $FOUNDRY_BIN_DIR/foundryup + + # Add Foundry to PATH for this script session + export PATH="$FOUNDRY_BIN_DIR:$PATH" + + # Verify foundryup is available + if ! command -v foundryup >/dev/null 2>&1; then + echo "Error: foundryup installation failed or not found in $FOUNDRY_BIN_DIR" + exit 1 + fi + + # Run foundryup to install the required version + if [ -n "$REQUIRED_FOUNDRY_VERSION" ]; then + echo "Installing Foundry tools version $REQUIRED_FOUNDRY_VERSION..." + foundryup --install "$REQUIRED_FOUNDRY_VERSION" + else + echo "Installing latest Foundry tools..." + foundryup + fi + + # Verify anvil was installed + if ! command -v anvil >/dev/null 2>&1; then + echo "Error: anvil installation failed" + exit 1 + fi + + echo "Anvil successfully installed: $(anvil --version)" fi \ No newline at end of file diff --git a/scripts/install_nasm_in_windows.sh b/scripts/install_nasm_in_windows.sh new file mode 100644 index 000000000..2bba5ecd4 --- /dev/null +++ b/scripts/install_nasm_in_windows.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env sh +set -e + +NASM_VERSION="2.16.01" +NASM_ZIP="nasm-${NASM_VERSION}-win64.zip" +NASM_URL="https://www.nasm.us/pub/nasm/releasebuilds/${NASM_VERSION}/win64/${NASM_ZIP}" + +INSTALL_DIR="$HOME/.local/nasm" +BIN_DIR="$INSTALL_DIR/bin" + +echo "Installing NASM ${NASM_VERSION}..." + +# Create directories +mkdir -p "$BIN_DIR" +cd "$INSTALL_DIR" + +# Download +if [ ! -f "$NASM_ZIP" ]; then + echo "Downloading NASM..." + curl -LO "$NASM_URL" +fi + +# Extract +echo "Extracting..." +unzip -o "$NASM_ZIP" + +# Move binaries +cp nasm-*/nasm.exe "$BIN_DIR/" +cp nasm-*/ndisasm.exe "$BIN_DIR/" + +# Add to PATH in bashrc (idempotent) +if ! grep -q 'nasm/bin' "$HOME/.bashrc"; then + echo '' >> "$HOME/.bashrc" + echo '# NASM' >> "$HOME/.bashrc" + echo 'export PATH="$HOME/.local/nasm/bin:$PATH"' >> "$HOME/.bashrc" +fi + diff --git a/scripts/install_pnpm.sh b/scripts/install_pnpm.sh index 34ba47b07..fcfc82ccd 100755 --- a/scripts/install_pnpm.sh +++ b/scripts/install_pnpm.sh @@ -1,8 +1,37 @@ #!/usr/bin/env bash # Install pnpm -if ! command -v pnpm &> /dev/null; then - echo "pnpm is not installed, installing it now..." - npm i pnpm --global + +REQUIRED_PNPM_VERSION="$1" + +if command -v pnpm &> /dev/null; then + # pnpm is already installed; check the current version. + CURRENT_PNPM_VERSION=$(pnpm --version 2>/dev/null) + + if [ -n "$CURRENT_PNPM_VERSION" ]; then + # Compare CURRENT_PNPM_VERSION < REQUIRED_PNPM_VERSION using sort -V + lower_version=$(printf '%s\n%s\n' "$CURRENT_PNPM_VERSION" "$REQUIRED_PNPM_VERSION" | sort -V | head -n1) + + if [ "$lower_version" != "$REQUIRED_PNPM_VERSION" ]; then + echo "pnpm is already installed with version $CURRENT_PNPM_VERSION, which is older than the required $REQUIRED_PNPM_VERSION. Please update pnpm manually if needed." + fi + fi +else + # Install pnpm using npm + if [ -n "$REQUIRED_PNPM_VERSION" ]; then + echo "Installing pnpm version $REQUIRED_PNPM_VERSION..." + npm install -g pnpm@$REQUIRED_PNPM_VERSION + else + echo "Installing latest pnpm..." + npm install -g pnpm + fi + + # Verify pnpm was installed + if ! command -v pnpm >/dev/null 2>&1; then + echo "Error: pnpm installation failed" + exit 1 + fi + + echo "pnpm successfully installed: $(pnpm --version)" fi diff --git a/scripts/install_rln_tests_dependencies.sh b/scripts/install_rln_tests_dependencies.sh index e19e0ef3c..c8c083b54 100755 --- a/scripts/install_rln_tests_dependencies.sh +++ b/scripts/install_rln_tests_dependencies.sh @@ -1,7 +1,9 @@ #!/usr/bin/env bash # Install Anvil -./scripts/install_anvil.sh +FOUNDRY_VERSION="$1" +./scripts/install_anvil.sh "$FOUNDRY_VERSION" -#Install pnpm -./scripts/install_pnpm.sh \ No newline at end of file +# Install pnpm +PNPM_VERSION="$2" +./scripts/install_pnpm.sh "$PNPM_VERSION" \ No newline at end of file diff --git a/scripts/libwaku_windows_setup.mk b/scripts/libwaku_windows_setup.mk new file mode 100644 index 000000000..503d0c405 --- /dev/null +++ b/scripts/libwaku_windows_setup.mk @@ -0,0 +1,53 @@ +# --------------------------------------------------------- +# Windows Setup Makefile +# --------------------------------------------------------- + +# Extend PATH (Make preserves environment variables) +export PATH := /c/msys64/usr/bin:/c/msys64/mingw64/bin:/c/msys64/usr/lib:/c/msys64/mingw64/lib:$(PATH) + +# Tools required +DEPS = gcc g++ make cmake cargo upx rustc python + +# Default target +.PHONY: windows-setup +windows-setup: check-deps update-submodules create-tmp libunwind miniupnpc libnatpmp + @echo "Windows setup completed successfully!" + +.PHONY: check-deps +check-deps: + @echo "Checking libwaku build dependencies..." + @for dep in $(DEPS); do \ + if ! which $$dep >/dev/null 2>&1; then \ + echo "✗ Missing dependency: $$dep"; \ + exit 1; \ + else \ + echo "✓ Found: $$dep"; \ + fi; \ + done + +.PHONY: update-submodules +update-submodules: + @echo "Updating libwaku git submodules..." + git submodule update --init --recursive + +.PHONY: create-tmp +create-tmp: + @echo "Creating tmp directory..." + mkdir -p tmp + +.PHONY: libunwind +libunwind: + @echo "Building libunwind..." + cd vendor/nim-libbacktrace && make all V=1 + +.PHONY: miniupnpc +miniupnpc: + @echo "Building miniupnpc..." + cd vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc && \ + make -f Makefile.mingw CC=gcc CXX=g++ libminiupnpc.a V=1 + +.PHONY: libnatpmp +libnatpmp: + @echo "Building libnatpmp..." + cd vendor/nim-nat-traversal/vendor/libnatpmp-upstream && \ + make CC="gcc -fPIC -D_WIN32_WINNT=0x0600 -DNATPMP_STATICLIB" libnatpmp.a V=1 diff --git a/scripts/regenerate_anvil_state.sh b/scripts/regenerate_anvil_state.sh new file mode 100755 index 000000000..9474591d9 --- /dev/null +++ b/scripts/regenerate_anvil_state.sh @@ -0,0 +1,104 @@ +#!/usr/bin/env bash + +# Simple script to regenerate the Anvil state file +# This creates a state file compatible with the current Foundry version + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" +STATE_DIR="$PROJECT_ROOT/tests/waku_rln_relay/anvil_state" +STATE_FILE="$STATE_DIR/state-deployed-contracts-mint-and-approved.json" +STATE_FILE_GZ="${STATE_FILE}.gz" + +echo "===================================" +echo "Anvil State File Regeneration Tool" +echo "===================================" +echo "" + +# Check if Foundry is installed +if ! command -v anvil &> /dev/null; then + echo "ERROR: anvil is not installed!" + echo "Please run: make rln-deps" + exit 1 +fi + +ANVIL_VERSION=$(anvil --version 2>/dev/null | head -n1) +echo "Using Foundry: $ANVIL_VERSION" +echo "" + +# Backup existing state file +if [ -f "$STATE_FILE_GZ" ]; then + BACKUP_FILE="${STATE_FILE_GZ}.backup-$(date +%Y%m%d-%H%M%S)" + echo "Backing up existing state file to: $(basename $BACKUP_FILE)" + cp "$STATE_FILE_GZ" "$BACKUP_FILE" +fi + +# Remove old state files +rm -f "$STATE_FILE" "$STATE_FILE_GZ" + +echo "" +echo "Running test to generate fresh state file..." +echo "This will:" +echo " 1. Build RLN library" +echo " 2. Start Anvil with state dump enabled" +echo " 3. Deploy contracts" +echo " 4. Save state and compress it" +echo "" + +cd "$PROJECT_ROOT" + +# Run a single test that deploys contracts +# The test framework will handle state dump +make test tests/waku_rln_relay/test_rln_group_manager_onchain.nim "RLN instances" || { + echo "" + echo "Test execution completed (exit status: $?)" + echo "Checking if state file was generated..." +} + +# Check if state file was created +if [ -f "$STATE_FILE" ]; then + echo "" + echo "✓ State file generated: $STATE_FILE" + + # Compress it + gzip -c "$STATE_FILE" > "$STATE_FILE_GZ" + echo "✓ Compressed: $STATE_FILE_GZ" + + # File sizes + STATE_SIZE=$(du -h "$STATE_FILE" | cut -f1) + GZ_SIZE=$(du -h "$STATE_FILE_GZ" | cut -f1) + echo "" + echo "File sizes:" + echo " Uncompressed: $STATE_SIZE" + echo " Compressed: $GZ_SIZE" + + # Optionally remove uncompressed + echo "" + read -p "Remove uncompressed state file? [y/N] " -n 1 -r + echo + if [[ $REPLY =~ ^[Yy]$ ]]; then + rm "$STATE_FILE" + echo "✓ Removed uncompressed file" + fi + + echo "" + echo "============================================" + echo "✓ SUCCESS! State file regenerated" + echo "============================================" + echo "" + echo "Next steps:" + echo " 1. Test locally: make test tests/node/test_wakunode_lightpush.nim" + echo " 2. If tests pass, commit: git add $STATE_FILE_GZ" + echo " 3. Push and verify CI passes" + echo "" +else + echo "" + echo "============================================" + echo "✗ ERROR: State file was not generated" + echo "============================================" + echo "" + echo "The state file should have been created at: $STATE_FILE" + echo "Please check the test output above for errors." + exit 1 +fi diff --git a/simulations/mixnet/config.toml b/simulations/mixnet/config.toml index 17e9242d3..5cd1aa936 100644 --- a/simulations/mixnet/config.toml +++ b/simulations/mixnet/config.toml @@ -1,16 +1,17 @@ -log-level = "INFO" +log-level = "TRACE" relay = true -#mix = true +mix = true filter = true -store = false +store = true lightpush = true max-connections = 150 -peer-exchange = true +peer-exchange = false metrics-logging = false cluster-id = 2 -discv5-discovery = true +discv5-discovery = false discv5-udp-port = 9000 discv5-enr-auto-update = true +enable-kad-discovery = true rest = true rest-admin = true ports-shift = 1 @@ -18,8 +19,10 @@ num-shards-in-network = 1 shard = [0] agent-string = "nwaku-mix" nodekey = "f98e3fba96c32e8d1967d460f1b79457380e1a895f7971cecc8528abe733781a" -#mixkey = "a87db88246ec0eedda347b9b643864bee3d6933eb15ba41e6d58cb678d813258" -rendezvous = true +mixkey = "a87db88246ec0eedda347b9b643864bee3d6933eb15ba41e6d58cb678d813258" +rendezvous = false listen-address = "127.0.0.1" nat = "extip:127.0.0.1" +ext-multiaddr = ["/ip4/127.0.0.1/tcp/60001"] +ext-multiaddr-only = true ip-colocation-limit=0 diff --git a/simulations/mixnet/config1.toml b/simulations/mixnet/config1.toml index e06a527c1..73cccb8c6 100644 --- a/simulations/mixnet/config1.toml +++ b/simulations/mixnet/config1.toml @@ -1,17 +1,18 @@ -log-level = "INFO" +log-level = "TRACE" relay = true mix = true filter = true store = false lightpush = true max-connections = 150 -peer-exchange = true +peer-exchange = false metrics-logging = false cluster-id = 2 -discv5-discovery = true +discv5-discovery = false discv5-udp-port = 9001 discv5-enr-auto-update = true discv5-bootstrap-node = ["enr:-LG4QBaAbcA921hmu3IrreLqGZ4y3VWCjBCgNN9mpX9vqkkbSrM3HJHZTXnb5iVXgc5pPtDhWLxkB6F3yY25hSwMezkEgmlkgnY0gmlwhH8AAAGKbXVsdGlhZGRyc4oACATAqEQ-BuphgnJzhQACAQAAiXNlY3AyNTZrMaEDpEW1UlUGHRJg6g_zGuCddKWmIUBGZCQX13xGfh9J6KiDdGNwguphg3VkcIIjKYV3YWt1Mg0"] +kad-bootstrap-node = ["/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o"] rest = true rest-admin = true ports-shift = 2 @@ -20,8 +21,10 @@ shard = [0] agent-string = "nwaku-mix" nodekey = "09e9d134331953357bd38bbfce8edb377f4b6308b4f3bfbe85c610497053d684" mixkey = "c86029e02c05a7e25182974b519d0d52fcbafeca6fe191fbb64857fb05be1a53" -rendezvous = true +rendezvous = false listen-address = "127.0.0.1" nat = "extip:127.0.0.1" +ext-multiaddr = ["/ip4/127.0.0.1/tcp/60002"] +ext-multiaddr-only = true ip-colocation-limit=0 #staticnode = ["/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o", "/ip4/127.0.0.1/tcp/60003/p2p/16Uiu2HAmTEDHwAziWUSz6ZE23h5vxG2o4Nn7GazhMor4bVuMXTrA","/ip4/127.0.0.1/tcp/60004/p2p/16Uiu2HAmPwRKZajXtfb1Qsv45VVfRZgK3ENdfmnqzSrVm3BczF6f","/ip4/127.0.0.1/tcp/60005/p2p/16Uiu2HAmRhxmCHBYdXt1RibXrjAUNJbduAhzaTHwFCZT4qWnqZAu"] diff --git a/simulations/mixnet/config2.toml b/simulations/mixnet/config2.toml index 93822603b..c40e41103 100644 --- a/simulations/mixnet/config2.toml +++ b/simulations/mixnet/config2.toml @@ -1,17 +1,18 @@ -log-level = "INFO" +log-level = "TRACE" relay = true mix = true filter = true store = false lightpush = true max-connections = 150 -peer-exchange = true +peer-exchange = false metrics-logging = false cluster-id = 2 -discv5-discovery = true +discv5-discovery = false discv5-udp-port = 9002 discv5-enr-auto-update = true discv5-bootstrap-node = ["enr:-LG4QBaAbcA921hmu3IrreLqGZ4y3VWCjBCgNN9mpX9vqkkbSrM3HJHZTXnb5iVXgc5pPtDhWLxkB6F3yY25hSwMezkEgmlkgnY0gmlwhH8AAAGKbXVsdGlhZGRyc4oACATAqEQ-BuphgnJzhQACAQAAiXNlY3AyNTZrMaEDpEW1UlUGHRJg6g_zGuCddKWmIUBGZCQX13xGfh9J6KiDdGNwguphg3VkcIIjKYV3YWt1Mg0"] +kad-bootstrap-node = ["/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o"] rest = false rest-admin = false ports-shift = 3 @@ -20,8 +21,10 @@ shard = [0] agent-string = "nwaku-mix" nodekey = "ed54db994682e857d77cd6fb81be697382dc43aa5cd78e16b0ec8098549f860e" mixkey = "b858ac16bbb551c4b2973313b1c8c8f7ea469fca03f1608d200bbf58d388ec7f" -rendezvous = true +rendezvous = false listen-address = "127.0.0.1" nat = "extip:127.0.0.1" +ext-multiaddr = ["/ip4/127.0.0.1/tcp/60003"] +ext-multiaddr-only = true ip-colocation-limit=0 #staticnode = ["/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o", "/ip4/127.0.0.1/tcp/60002/p2p/16Uiu2HAmLtKaFaSWDohToWhWUZFLtqzYZGPFuXwKrojFVF6az5UF","/ip4/127.0.0.1/tcp/60004/p2p/16Uiu2HAmPwRKZajXtfb1Qsv45VVfRZgK3ENdfmnqzSrVm3BczF6f","/ip4/127.0.0.1/tcp/60005/p2p/16Uiu2HAmRhxmCHBYdXt1RibXrjAUNJbduAhzaTHwFCZT4qWnqZAu"] diff --git a/simulations/mixnet/config3.toml b/simulations/mixnet/config3.toml index 6f339dfff..80c19b34b 100644 --- a/simulations/mixnet/config3.toml +++ b/simulations/mixnet/config3.toml @@ -1,17 +1,18 @@ -log-level = "INFO" +log-level = "TRACE" relay = true mix = true filter = true store = false lightpush = true max-connections = 150 -peer-exchange = true +peer-exchange = false metrics-logging = false cluster-id = 2 -discv5-discovery = true +discv5-discovery = false discv5-udp-port = 9003 discv5-enr-auto-update = true discv5-bootstrap-node = ["enr:-LG4QBaAbcA921hmu3IrreLqGZ4y3VWCjBCgNN9mpX9vqkkbSrM3HJHZTXnb5iVXgc5pPtDhWLxkB6F3yY25hSwMezkEgmlkgnY0gmlwhH8AAAGKbXVsdGlhZGRyc4oACATAqEQ-BuphgnJzhQACAQAAiXNlY3AyNTZrMaEDpEW1UlUGHRJg6g_zGuCddKWmIUBGZCQX13xGfh9J6KiDdGNwguphg3VkcIIjKYV3YWt1Mg0"] +kad-bootstrap-node = ["/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o"] rest = false rest-admin = false ports-shift = 4 @@ -20,8 +21,10 @@ shard = [0] agent-string = "nwaku-mix" nodekey = "42f96f29f2d6670938b0864aced65a332dcf5774103b4c44ec4d0ea4ef3c47d6" mixkey = "d8bd379bb394b0f22dd236d63af9f1a9bc45266beffc3fbbe19e8b6575f2535b" -rendezvous = true +rendezvous = false listen-address = "127.0.0.1" nat = "extip:127.0.0.1" +ext-multiaddr = ["/ip4/127.0.0.1/tcp/60004"] +ext-multiaddr-only = true ip-colocation-limit=0 #staticnode = ["/ip4/127.0.0.1/tcp/60002/p2p/16Uiu2HAmLtKaFaSWDohToWhWUZFLtqzYZGPFuXwKrojFVF6az5UF", "/ip4/127.0.0.1/tcp/60003/p2p/16Uiu2HAmTEDHwAziWUSz6ZE23h5vxG2o4Nn7GazhMor4bVuMXTrA","/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o","/ip4/127.0.0.1/tcp/60005/p2p/16Uiu2HAmRhxmCHBYdXt1RibXrjAUNJbduAhzaTHwFCZT4qWnqZAu"] diff --git a/simulations/mixnet/config4.toml b/simulations/mixnet/config4.toml index 23115ac03..ed5b2dad0 100644 --- a/simulations/mixnet/config4.toml +++ b/simulations/mixnet/config4.toml @@ -1,17 +1,18 @@ -log-level = "INFO" +log-level = "TRACE" relay = true mix = true filter = true store = false lightpush = true max-connections = 150 -peer-exchange = true +peer-exchange = false metrics-logging = false cluster-id = 2 -discv5-discovery = true +discv5-discovery = false discv5-udp-port = 9004 discv5-enr-auto-update = true discv5-bootstrap-node = ["enr:-LG4QBaAbcA921hmu3IrreLqGZ4y3VWCjBCgNN9mpX9vqkkbSrM3HJHZTXnb5iVXgc5pPtDhWLxkB6F3yY25hSwMezkEgmlkgnY0gmlwhH8AAAGKbXVsdGlhZGRyc4oACATAqEQ-BuphgnJzhQACAQAAiXNlY3AyNTZrMaEDpEW1UlUGHRJg6g_zGuCddKWmIUBGZCQX13xGfh9J6KiDdGNwguphg3VkcIIjKYV3YWt1Mg0"] +kad-bootstrap-node = ["/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o"] rest = false rest-admin = false ports-shift = 5 @@ -20,8 +21,10 @@ shard = [0] agent-string = "nwaku-mix" nodekey = "3ce887b3c34b7a92dd2868af33941ed1dbec4893b054572cd5078da09dd923d4" mixkey = "780fff09e51e98df574e266bf3266ec6a3a1ddfcf7da826a349a29c137009d49" -rendezvous = true +rendezvous = false listen-address = "127.0.0.1" nat = "extip:127.0.0.1" +ext-multiaddr = ["/ip4/127.0.0.1/tcp/60005"] +ext-multiaddr-only = true ip-colocation-limit=0 #staticnode = ["/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o", "/ip4/127.0.0.1/tcp/60003/p2p/16Uiu2HAmTEDHwAziWUSz6ZE23h5vxG2o4Nn7GazhMor4bVuMXTrA","/ip4/127.0.0.1/tcp/60004/p2p/16Uiu2HAmPwRKZajXtfb1Qsv45VVfRZgK3ENdfmnqzSrVm3BczF6f","/ip4/127.0.0.1/tcp/60002/p2p/16Uiu2HAmLtKaFaSWDohToWhWUZFLtqzYZGPFuXwKrojFVF6az5UF"] diff --git a/simulations/mixnet/run_chat_mix.sh b/simulations/mixnet/run_chat_mix.sh index 11a28c06b..f711c055e 100755 --- a/simulations/mixnet/run_chat_mix.sh +++ b/simulations/mixnet/run_chat_mix.sh @@ -1 +1,2 @@ -../../build/chat2mix --cluster-id=2 --num-shards-in-network=1 --shard=0 --servicenode="/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o" --log-level=TRACE --mixnode="/ip4/127.0.0.1/tcp/60002/p2p/16Uiu2HAmLtKaFaSWDohToWhWUZFLtqzYZGPFuXwKrojFVF6az5UF:9231e86da6432502900a84f867004ce78632ab52cd8e30b1ec322cd795710c2a" --mixnode="/ip4/127.0.0.1/tcp/60003/p2p/16Uiu2HAmTEDHwAziWUSz6ZE23h5vxG2o4Nn7GazhMor4bVuMXTrA:275cd6889e1f29ca48e5b9edb800d1a94f49f13d393a0ecf1a07af753506de6c" --mixnode="/ip4/127.0.0.1/tcp/60004/p2p/16Uiu2HAmPwRKZajXtfb1Qsv45VVfRZgK3ENdfmnqzSrVm3BczF6f:e0ed594a8d506681be075e8e23723478388fb182477f7a469309a25e7076fc18" --mixnode="/ip4/127.0.0.1/tcp/60005/p2p/16Uiu2HAmRhxmCHBYdXt1RibXrjAUNJbduAhzaTHwFCZT4qWnqZAu:8fd7a1a7c19b403d231452a9b1ea40eb1cc76f455d918ef8980e7685f9eeeb1f" +../../build/chat2mix --cluster-id=2 --num-shards-in-network=1 --shard=0 --servicenode="/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o" --log-level=TRACE --kad-bootstrap-node="/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o" +#--mixnode="/ip4/127.0.0.1/tcp/60002/p2p/16Uiu2HAmLtKaFaSWDohToWhWUZFLtqzYZGPFuXwKrojFVF6az5UF:9231e86da6432502900a84f867004ce78632ab52cd8e30b1ec322cd795710c2a" --mixnode="/ip4/127.0.0.1/tcp/60003/p2p/16Uiu2HAmTEDHwAziWUSz6ZE23h5vxG2o4Nn7GazhMor4bVuMXTrA:275cd6889e1f29ca48e5b9edb800d1a94f49f13d393a0ecf1a07af753506de6c" --mixnode="/ip4/127.0.0.1/tcp/60004/p2p/16Uiu2HAmPwRKZajXtfb1Qsv45VVfRZgK3ENdfmnqzSrVm3BczF6f:e0ed594a8d506681be075e8e23723478388fb182477f7a469309a25e7076fc18" --mixnode="/ip4/127.0.0.1/tcp/60005/p2p/16Uiu2HAmRhxmCHBYdXt1RibXrjAUNJbduAhzaTHwFCZT4qWnqZAu:8fd7a1a7c19b403d231452a9b1ea40eb1cc76f455d918ef8980e7685f9eeeb1f" diff --git a/simulations/mixnet/run_chat_mix1.sh b/simulations/mixnet/run_chat_mix1.sh index 11a28c06b..7323bb3a9 100755 --- a/simulations/mixnet/run_chat_mix1.sh +++ b/simulations/mixnet/run_chat_mix1.sh @@ -1 +1,2 @@ -../../build/chat2mix --cluster-id=2 --num-shards-in-network=1 --shard=0 --servicenode="/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o" --log-level=TRACE --mixnode="/ip4/127.0.0.1/tcp/60002/p2p/16Uiu2HAmLtKaFaSWDohToWhWUZFLtqzYZGPFuXwKrojFVF6az5UF:9231e86da6432502900a84f867004ce78632ab52cd8e30b1ec322cd795710c2a" --mixnode="/ip4/127.0.0.1/tcp/60003/p2p/16Uiu2HAmTEDHwAziWUSz6ZE23h5vxG2o4Nn7GazhMor4bVuMXTrA:275cd6889e1f29ca48e5b9edb800d1a94f49f13d393a0ecf1a07af753506de6c" --mixnode="/ip4/127.0.0.1/tcp/60004/p2p/16Uiu2HAmPwRKZajXtfb1Qsv45VVfRZgK3ENdfmnqzSrVm3BczF6f:e0ed594a8d506681be075e8e23723478388fb182477f7a469309a25e7076fc18" --mixnode="/ip4/127.0.0.1/tcp/60005/p2p/16Uiu2HAmRhxmCHBYdXt1RibXrjAUNJbduAhzaTHwFCZT4qWnqZAu:8fd7a1a7c19b403d231452a9b1ea40eb1cc76f455d918ef8980e7685f9eeeb1f" +../../build/chat2mix --cluster-id=2 --num-shards-in-network=1 --shard=0 --servicenode="/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o" --log-level=TRACE +#--mixnode="/ip4/127.0.0.1/tcp/60002/p2p/16Uiu2HAmLtKaFaSWDohToWhWUZFLtqzYZGPFuXwKrojFVF6az5UF:9231e86da6432502900a84f867004ce78632ab52cd8e30b1ec322cd795710c2a" --mixnode="/ip4/127.0.0.1/tcp/60003/p2p/16Uiu2HAmTEDHwAziWUSz6ZE23h5vxG2o4Nn7GazhMor4bVuMXTrA:275cd6889e1f29ca48e5b9edb800d1a94f49f13d393a0ecf1a07af753506de6c" --mixnode="/ip4/127.0.0.1/tcp/60004/p2p/16Uiu2HAmPwRKZajXtfb1Qsv45VVfRZgK3ENdfmnqzSrVm3BczF6f:e0ed594a8d506681be075e8e23723478388fb182477f7a469309a25e7076fc18" --mixnode="/ip4/127.0.0.1/tcp/60005/p2p/16Uiu2HAmRhxmCHBYdXt1RibXrjAUNJbduAhzaTHwFCZT4qWnqZAu:8fd7a1a7c19b403d231452a9b1ea40eb1cc76f455d918ef8980e7685f9eeeb1f" diff --git a/simulations/mixnet/run_lp_service_node.sh b/simulations/mixnet/run_lp_service_node.sh deleted file mode 100755 index 1d005796e..000000000 --- a/simulations/mixnet/run_lp_service_node.sh +++ /dev/null @@ -1 +0,0 @@ -../../build/wakunode2 --config-file="config.toml" diff --git a/simulations/mixnet/run_mix_node.sh b/simulations/mixnet/run_mix_node.sh new file mode 100755 index 000000000..2b293540c --- /dev/null +++ b/simulations/mixnet/run_mix_node.sh @@ -0,0 +1 @@ +../../build/wakunode2 --config-file="config.toml" 2>&1 | tee mix_node.log diff --git a/simulations/mixnet/run_mix_node1.sh b/simulations/mixnet/run_mix_node1.sh index 024eb3f99..617312122 100755 --- a/simulations/mixnet/run_mix_node1.sh +++ b/simulations/mixnet/run_mix_node1.sh @@ -1 +1 @@ -../../build/wakunode2 --config-file="config1.toml" +../../build/wakunode2 --config-file="config1.toml" 2>&1 | tee mix_node1.log diff --git a/simulations/mixnet/run_mix_node2.sh b/simulations/mixnet/run_mix_node2.sh index e55a9bac8..5fc2ef498 100755 --- a/simulations/mixnet/run_mix_node2.sh +++ b/simulations/mixnet/run_mix_node2.sh @@ -1 +1 @@ -../../build/wakunode2 --config-file="config2.toml" +../../build/wakunode2 --config-file="config2.toml" 2>&1 | tee mix_node2.log diff --git a/simulations/mixnet/run_mix_node3.sh b/simulations/mixnet/run_mix_node3.sh index dca8119a3..d77d04c02 100755 --- a/simulations/mixnet/run_mix_node3.sh +++ b/simulations/mixnet/run_mix_node3.sh @@ -1 +1 @@ -../../build/wakunode2 --config-file="config3.toml" +../../build/wakunode2 --config-file="config3.toml" 2>&1 | tee mix_node3.log diff --git a/simulations/mixnet/run_mix_node4.sh b/simulations/mixnet/run_mix_node4.sh index 9cf25158b..3a2b0299d 100755 --- a/simulations/mixnet/run_mix_node4.sh +++ b/simulations/mixnet/run_mix_node4.sh @@ -1 +1 @@ -../../build/wakunode2 --config-file="config4.toml" +../../build/wakunode2 --config-file="config4.toml" 2>&1 | tee mix_node4.log diff --git a/tests/all_tests_waku.nim b/tests/all_tests_waku.nim index 3d22cd9c2..4d4225f9f 100644 --- a/tests/all_tests_waku.nim +++ b/tests/all_tests_waku.nim @@ -89,6 +89,7 @@ import ./test_waku_netconfig, ./test_waku_switch, ./test_waku_rendezvous, + ./test_waku_metadata, ./waku_discv5/test_waku_discv5 # Waku Keystore test suite diff --git a/tests/api/test_all.nim b/tests/api/test_all.nim index 99c1b3b4c..4617c8cdb 100644 --- a/tests/api/test_all.nim +++ b/tests/api/test_all.nim @@ -1,3 +1,8 @@ {.used.} -import ./test_entry_nodes, ./test_node_conf +import + ./test_entry_nodes, + ./test_node_conf, + ./test_api_send, + ./test_api_subscription, + ./test_api_health diff --git a/tests/api/test_api_health.nim b/tests/api/test_api_health.nim new file mode 100644 index 000000000..f3dd340af --- /dev/null +++ b/tests/api/test_api_health.nim @@ -0,0 +1,294 @@ +{.used.} + +import std/[options, sequtils, times] +import chronos, testutils/unittests, stew/byteutils, libp2p/[switch, peerinfo] +import ../testlib/[common, wakucore, wakunode, testasync] + +import + waku, + waku/[waku_node, waku_core, waku_relay/protocol, common/broker/broker_context], + waku/node/health_monitor/[topic_health, health_status, protocol_health, health_report], + waku/requests/health_requests, + waku/requests/node_requests, + waku/events/health_events, + waku/common/waku_protocol, + waku/factory/waku_conf +import tools/confutils/cli_args + +const TestTimeout = chronos.seconds(10) +const DefaultShard = PubsubTopic("/waku/2/rs/3/0") +const TestContentTopic = ContentTopic("/waku/2/default-content/proto") + +proc dummyHandler( + topic: PubsubTopic, msg: WakuMessage +): Future[void] {.async, gcsafe.} = + discard + +proc waitForConnectionStatus( + brokerCtx: BrokerContext, expected: ConnectionStatus +) {.async.} = + var future = newFuture[void]("waitForConnectionStatus") + + let handler: EventConnectionStatusChangeListenerProc = proc( + e: EventConnectionStatusChange + ) {.async: (raises: []), gcsafe.} = + if not future.finished: + if e.connectionStatus == expected: + future.complete() + + let handle = EventConnectionStatusChange.listen(brokerCtx, handler).valueOr: + raiseAssert error + + try: + if not await future.withTimeout(TestTimeout): + raiseAssert "Timeout waiting for status: " & $expected + finally: + EventConnectionStatusChange.dropListener(brokerCtx, handle) + +proc waitForShardHealthy( + brokerCtx: BrokerContext +): Future[EventShardTopicHealthChange] {.async.} = + var future = newFuture[EventShardTopicHealthChange]("waitForShardHealthy") + + let handler: EventShardTopicHealthChangeListenerProc = proc( + e: EventShardTopicHealthChange + ) {.async: (raises: []), gcsafe.} = + if not future.finished: + if e.health == TopicHealth.MINIMALLY_HEALTHY or + e.health == TopicHealth.SUFFICIENTLY_HEALTHY: + future.complete(e) + + let handle = EventShardTopicHealthChange.listen(brokerCtx, handler).valueOr: + raiseAssert error + + try: + if await future.withTimeout(TestTimeout): + return future.read() + else: + raiseAssert "Timeout waiting for shard health event" + finally: + EventShardTopicHealthChange.dropListener(brokerCtx, handle) + +suite "LM API health checking": + var + serviceNode {.threadvar.}: WakuNode + client {.threadvar.}: Waku + servicePeerInfo {.threadvar.}: RemotePeerInfo + + asyncSetup: + lockNewGlobalBrokerContext: + serviceNode = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + (await serviceNode.mountRelay()).isOkOr: + raiseAssert error + serviceNode.mountMetadata(3, @[0'u16]).isOkOr: + raiseAssert error + await serviceNode.mountLibp2pPing() + await serviceNode.start() + + servicePeerInfo = serviceNode.peerInfo.toRemotePeerInfo() + serviceNode.wakuRelay.subscribe(DefaultShard, dummyHandler) + + lockNewGlobalBrokerContext: + var conf = defaultWakuNodeConf().valueOr: + raiseAssert error + conf.mode = Core + conf.listenAddress = parseIpAddress("0.0.0.0") + conf.tcpPort = Port(0) + conf.discv5UdpPort = Port(0) + conf.clusterId = 3'u16 + conf.numShardsInNetwork = 1 + conf.rest = false + + client = (await createNode(conf)).valueOr: + raiseAssert error + (await startWaku(addr client)).isOkOr: + raiseAssert error + + asyncTeardown: + discard await client.stop() + await serviceNode.stop() + + asyncTest "RequestShardTopicsHealth, check PubsubTopic health": + client.node.wakuRelay.subscribe(DefaultShard, dummyHandler) + await client.node.connectToNodes(@[servicePeerInfo]) + + var isHealthy = false + let start = Moment.now() + while Moment.now() - start < TestTimeout: + let req = RequestShardTopicsHealth.request(client.brokerCtx, @[DefaultShard]).valueOr: + raiseAssert "RequestShardTopicsHealth failed" + + if req.topicHealth.len > 0: + let h = req.topicHealth[0].health + if h == TopicHealth.MINIMALLY_HEALTHY or h == TopicHealth.SUFFICIENTLY_HEALTHY: + isHealthy = true + break + await sleepAsync(chronos.milliseconds(100)) + + check isHealthy == true + + asyncTest "RequestShardTopicsHealth, check disconnected PubsubTopic": + const GhostShard = PubsubTopic("/waku/2/rs/1/666") + client.node.wakuRelay.subscribe(GhostShard, dummyHandler) + + let req = RequestShardTopicsHealth.request(client.brokerCtx, @[GhostShard]).valueOr: + raiseAssert "Request failed" + + check req.topicHealth.len > 0 + check req.topicHealth[0].health == TopicHealth.UNHEALTHY + + asyncTest "RequestProtocolHealth, check relay status": + await client.node.connectToNodes(@[servicePeerInfo]) + + var isReady = false + let start = Moment.now() + while Moment.now() - start < TestTimeout: + let relayReq = await RequestProtocolHealth.request( + client.brokerCtx, WakuProtocol.RelayProtocol + ) + if relayReq.isOk() and relayReq.get().healthStatus.health == HealthStatus.READY: + isReady = true + break + await sleepAsync(chronos.milliseconds(100)) + + check isReady == true + + let storeReq = + await RequestProtocolHealth.request(client.brokerCtx, WakuProtocol.StoreProtocol) + if storeReq.isOk(): + check storeReq.get().healthStatus.health != HealthStatus.READY + + asyncTest "RequestProtocolHealth, check unmounted protocol": + let req = + await RequestProtocolHealth.request(client.brokerCtx, WakuProtocol.StoreProtocol) + check req.isOk() + + let status = req.get().healthStatus + check status.health == HealthStatus.NOT_MOUNTED + check status.desc.isNone() + + asyncTest "RequestConnectionStatus, check connectivity state": + let initialReq = RequestConnectionStatus.request(client.brokerCtx).valueOr: + raiseAssert "RequestConnectionStatus failed" + check initialReq.connectionStatus == ConnectionStatus.Disconnected + + await client.node.connectToNodes(@[servicePeerInfo]) + + var isConnected = false + let start = Moment.now() + while Moment.now() - start < TestTimeout: + let req = RequestConnectionStatus.request(client.brokerCtx).valueOr: + raiseAssert "RequestConnectionStatus failed" + + if req.connectionStatus == ConnectionStatus.PartiallyConnected or + req.connectionStatus == ConnectionStatus.Connected: + isConnected = true + break + await sleepAsync(chronos.milliseconds(100)) + + check isConnected == true + + asyncTest "EventConnectionStatusChange, detect connect and disconnect": + let connectFuture = + waitForConnectionStatus(client.brokerCtx, ConnectionStatus.PartiallyConnected) + + await client.node.connectToNodes(@[servicePeerInfo]) + await connectFuture + + let disconnectFuture = + waitForConnectionStatus(client.brokerCtx, ConnectionStatus.Disconnected) + await client.node.disconnectNode(servicePeerInfo) + await disconnectFuture + + asyncTest "EventShardTopicHealthChange, detect health improvement": + client.node.wakuRelay.subscribe(DefaultShard, dummyHandler) + + let healthEventFuture = waitForShardHealthy(client.brokerCtx) + + await client.node.connectToNodes(@[servicePeerInfo]) + + let event = await healthEventFuture + check event.topic == DefaultShard + + asyncTest "RequestHealthReport, check aggregate report": + let req = await RequestHealthReport.request(client.brokerCtx) + + check req.isOk() + + let report = req.get().healthReport + check report.nodeHealth == HealthStatus.READY + check report.protocolsHealth.len > 0 + check report.protocolsHealth.anyIt(it.protocol == $WakuProtocol.RelayProtocol) + + asyncTest "RequestContentTopicsHealth, smoke test": + let fictionalTopic = ContentTopic("/waku/2/this-does-not-exist/proto") + + let req = RequestContentTopicsHealth.request(client.brokerCtx, @[fictionalTopic]) + + check req.isOk() + + let res = req.get() + check res.contentTopicHealth.len == 1 + check res.contentTopicHealth[0].topic == fictionalTopic + check res.contentTopicHealth[0].health == TopicHealth.NOT_SUBSCRIBED + + asyncTest "RequestContentTopicsHealth, core mode trivial 1-shard autosharding": + let cTopic = ContentTopic("/waku/2/my-content-topic/proto") + + let shardReq = + RequestRelayShard.request(client.brokerCtx, none(PubsubTopic), cTopic) + + check shardReq.isOk() + let targetShard = $shardReq.get().relayShard + + client.node.wakuRelay.subscribe(targetShard, dummyHandler) + serviceNode.wakuRelay.subscribe(targetShard, dummyHandler) + + await client.node.connectToNodes(@[servicePeerInfo]) + + var isHealthy = false + let start = Moment.now() + while Moment.now() - start < TestTimeout: + let req = RequestContentTopicsHealth.request(client.brokerCtx, @[cTopic]).valueOr: + raiseAssert "Request failed" + + if req.contentTopicHealth.len > 0: + let h = req.contentTopicHealth[0].health + if h == TopicHealth.MINIMALLY_HEALTHY or h == TopicHealth.SUFFICIENTLY_HEALTHY: + isHealthy = true + break + + await sleepAsync(chronos.milliseconds(100)) + + check isHealthy == true + + asyncTest "RequestProtocolHealth, edge mode smoke test": + var edgeWaku: Waku + + lockNewGlobalBrokerContext: + var edgeConf = defaultWakuNodeConf().valueOr: + raiseAssert error + edgeConf.mode = Edge + edgeConf.listenAddress = parseIpAddress("0.0.0.0") + edgeConf.tcpPort = Port(0) + edgeConf.discv5UdpPort = Port(0) + edgeConf.clusterId = 3'u16 + edgeConf.maxMessageSize = "150 KiB" + edgeConf.rest = false + + edgeWaku = (await createNode(edgeConf)).valueOr: + raiseAssert "Failed to create edge node: " & error + + (await startWaku(addr edgeWaku)).isOkOr: + raiseAssert "Failed to start edge waku: " & error + + let relayReq = await RequestProtocolHealth.request( + edgeWaku.brokerCtx, WakuProtocol.RelayProtocol + ) + check relayReq.isOk() + check relayReq.get().healthStatus.health == HealthStatus.NOT_MOUNTED + + check not edgeWaku.node.wakuFilterClient.isNil() + + discard await edgeWaku.stop() diff --git a/tests/api/test_api_send.nim b/tests/api/test_api_send.nim new file mode 100644 index 000000000..30a176119 --- /dev/null +++ b/tests/api/test_api_send.nim @@ -0,0 +1,435 @@ +{.used.} + +import std/strutils +import chronos, testutils/unittests, stew/byteutils, libp2p/[switch, peerinfo] +import ../testlib/[common, wakucore, wakunode, testasync] +import ../waku_archive/archive_utils +import + waku, waku/[waku_node, waku_core, waku_relay/protocol, common/broker/broker_context] +import waku/factory/waku_conf +import tools/confutils/cli_args + +type SendEventOutcome {.pure.} = enum + Sent + Propagated + Error + +type SendEventListenerManager = ref object + brokerCtx: BrokerContext + sentListener: MessageSentEventListener + errorListener: MessageErrorEventListener + propagatedListener: MessagePropagatedEventListener + sentFuture: Future[void] + errorFuture: Future[void] + propagatedFuture: Future[void] + sentCount: int + errorCount: int + propagatedCount: int + sentRequestIds: seq[RequestId] + errorRequestIds: seq[RequestId] + propagatedRequestIds: seq[RequestId] + +proc newSendEventListenerManager(brokerCtx: BrokerContext): SendEventListenerManager = + let manager = SendEventListenerManager(brokerCtx: brokerCtx) + manager.sentFuture = newFuture[void]("sentEvent") + manager.errorFuture = newFuture[void]("errorEvent") + manager.propagatedFuture = newFuture[void]("propagatedEvent") + + manager.sentListener = MessageSentEvent.listen( + brokerCtx, + proc(event: MessageSentEvent) {.async: (raises: []).} = + inc manager.sentCount + manager.sentRequestIds.add(event.requestId) + echo "SENT EVENT TRIGGERED (#", + manager.sentCount, "): requestId=", event.requestId + if not manager.sentFuture.finished(): + manager.sentFuture.complete() + , + ).valueOr: + raiseAssert error + + manager.errorListener = MessageErrorEvent.listen( + brokerCtx, + proc(event: MessageErrorEvent) {.async: (raises: []).} = + inc manager.errorCount + manager.errorRequestIds.add(event.requestId) + echo "ERROR EVENT TRIGGERED (#", manager.errorCount, "): ", event.error + if not manager.errorFuture.finished(): + manager.errorFuture.fail( + newException(CatchableError, "Error event triggered: " & event.error) + ) + , + ).valueOr: + raiseAssert error + + manager.propagatedListener = MessagePropagatedEvent.listen( + brokerCtx, + proc(event: MessagePropagatedEvent) {.async: (raises: []).} = + inc manager.propagatedCount + manager.propagatedRequestIds.add(event.requestId) + echo "PROPAGATED EVENT TRIGGERED (#", + manager.propagatedCount, "): requestId=", event.requestId + if not manager.propagatedFuture.finished(): + manager.propagatedFuture.complete() + , + ).valueOr: + raiseAssert error + + return manager + +proc teardown(manager: SendEventListenerManager) = + MessageSentEvent.dropListener(manager.brokerCtx, manager.sentListener) + MessageErrorEvent.dropListener(manager.brokerCtx, manager.errorListener) + MessagePropagatedEvent.dropListener(manager.brokerCtx, manager.propagatedListener) + +proc waitForEvents( + manager: SendEventListenerManager, timeout: Duration +): Future[bool] {.async.} = + return await allFutures( + manager.sentFuture, manager.propagatedFuture, manager.errorFuture + ) + .withTimeout(timeout) + +proc outcomes(manager: SendEventListenerManager): set[SendEventOutcome] = + if manager.sentFuture.completed(): + result.incl(SendEventOutcome.Sent) + if manager.propagatedFuture.completed(): + result.incl(SendEventOutcome.Propagated) + if manager.errorFuture.failed(): + result.incl(SendEventOutcome.Error) + +proc validate(manager: SendEventListenerManager, expected: set[SendEventOutcome]) = + echo "EVENT COUNTS: sent=", + manager.sentCount, ", propagated=", manager.propagatedCount, ", error=", + manager.errorCount + check manager.outcomes() == expected + +proc validate( + manager: SendEventListenerManager, + expected: set[SendEventOutcome], + expectedRequestId: RequestId, +) = + manager.validate(expected) + for requestId in manager.sentRequestIds: + check requestId == expectedRequestId + for requestId in manager.propagatedRequestIds: + check requestId == expectedRequestId + for requestId in manager.errorRequestIds: + check requestId == expectedRequestId + +proc createApiNodeConf(mode: cli_args.WakuMode = cli_args.WakuMode.Core): WakuNodeConf = + var conf = defaultWakuNodeConf().valueOr: + raiseAssert error + conf.mode = mode + conf.listenAddress = parseIpAddress("0.0.0.0") + conf.tcpPort = Port(0) + conf.discv5UdpPort = Port(0) + conf.clusterId = 3'u16 + conf.numShardsInNetwork = 1 + conf.reliabilityEnabled = true + conf.rest = false + result = conf + +suite "Waku API - Send": + var + relayNode1 {.threadvar.}: WakuNode + relayNode1PeerInfo {.threadvar.}: RemotePeerInfo + relayNode1PeerId {.threadvar.}: PeerId + + relayNode2 {.threadvar.}: WakuNode + relayNode2PeerInfo {.threadvar.}: RemotePeerInfo + relayNode2PeerId {.threadvar.}: PeerId + + lightpushNode {.threadvar.}: WakuNode + lightpushNodePeerInfo {.threadvar.}: RemotePeerInfo + lightpushNodePeerId {.threadvar.}: PeerId + + storeNode {.threadvar.}: WakuNode + storeNodePeerInfo {.threadvar.}: RemotePeerInfo + storeNodePeerId {.threadvar.}: PeerId + + asyncSetup: + lockNewGlobalBrokerContext: + relayNode1 = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + relayNode1.mountMetadata(3, @[0'u16]).isOkOr: + raiseAssert "Failed to mount metadata: " & error + (await relayNode1.mountRelay()).isOkOr: + raiseAssert "Failed to mount relay" + await relayNode1.mountLibp2pPing() + await relayNode1.start() + + lockNewGlobalBrokerContext: + relayNode2 = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + relayNode2.mountMetadata(3, @[0'u16]).isOkOr: + raiseAssert "Failed to mount metadata: " & error + (await relayNode2.mountRelay()).isOkOr: + raiseAssert "Failed to mount relay" + await relayNode2.mountLibp2pPing() + await relayNode2.start() + + lockNewGlobalBrokerContext: + lightpushNode = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + lightpushNode.mountMetadata(3, @[0'u16]).isOkOr: + raiseAssert "Failed to mount metadata: " & error + (await lightpushNode.mountRelay()).isOkOr: + raiseAssert "Failed to mount relay" + (await lightpushNode.mountLightPush()).isOkOr: + raiseAssert "Failed to mount lightpush" + await lightpushNode.mountLibp2pPing() + await lightpushNode.start() + + lockNewGlobalBrokerContext: + storeNode = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + storeNode.mountMetadata(3, @[0'u16]).isOkOr: + raiseAssert "Failed to mount metadata: " & error + (await storeNode.mountRelay()).isOkOr: + raiseAssert "Failed to mount relay" + # Mount archive so store can persist messages + let archiveDriver = newSqliteArchiveDriver() + storeNode.mountArchive(archiveDriver).isOkOr: + raiseAssert "Failed to mount archive: " & error + await storeNode.mountStore() + await storeNode.mountLibp2pPing() + await storeNode.start() + + relayNode1PeerInfo = relayNode1.peerInfo.toRemotePeerInfo() + relayNode1PeerId = relayNode1.peerInfo.peerId + + relayNode2PeerInfo = relayNode2.peerInfo.toRemotePeerInfo() + relayNode2PeerId = relayNode2.peerInfo.peerId + + lightpushNodePeerInfo = lightpushNode.peerInfo.toRemotePeerInfo() + lightpushNodePeerId = lightpushNode.peerInfo.peerId + + storeNodePeerInfo = storeNode.peerInfo.toRemotePeerInfo() + storeNodePeerId = storeNode.peerInfo.peerId + + # Subscribe all relay nodes to the default shard topic + const testPubsubTopic = PubsubTopic("/waku/2/rs/3/0") + proc dummyHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + discard + + relayNode1.subscribe((kind: PubsubSub, topic: testPubsubTopic), dummyHandler).isOkOr: + raiseAssert "Failed to subscribe relayNode1: " & error + relayNode2.subscribe((kind: PubsubSub, topic: testPubsubTopic), dummyHandler).isOkOr: + raiseAssert "Failed to subscribe relayNode2: " & error + + lightpushNode.subscribe((kind: PubsubSub, topic: testPubsubTopic), dummyHandler).isOkOr: + raiseAssert "Failed to subscribe lightpushNode: " & error + storeNode.subscribe((kind: PubsubSub, topic: testPubsubTopic), dummyHandler).isOkOr: + raiseAssert "Failed to subscribe storeNode: " & error + + # Subscribe all relay nodes to the default shard topic + await relayNode1.connectToNodes(@[relayNode2PeerInfo, storeNodePeerInfo]) + await lightpushNode.connectToNodes(@[relayNode2PeerInfo]) + + asyncTeardown: + await allFutures( + relayNode1.stop(), relayNode2.stop(), lightpushNode.stop(), storeNode.stop() + ) + + asyncTest "Check API availability (unhealthy node)": + var node: Waku + lockNewGlobalBrokerContext: + node = (await createNode(createApiNodeConf())).valueOr: + raiseAssert error + (await startWaku(addr node)).isOkOr: + raiseAssert "Failed to start Waku node: " & error + # node is not connected ! + + let envelope = MessageEnvelope.init( + ContentTopic("/waku/2/default-content/proto"), "test payload" + ) + + let sendResult = await node.send(envelope) + + # TODO: The API is not enforcing a health check before the send, + # so currently this test cannot successfully fail to send. + check sendResult.isOk() + + (await node.stop()).isOkOr: + raiseAssert "Failed to stop node: " & error + + asyncTest "Send fully validated": + var node: Waku + lockNewGlobalBrokerContext: + node = (await createNode(createApiNodeConf())).valueOr: + raiseAssert error + (await startWaku(addr node)).isOkOr: + raiseAssert "Failed to start Waku node: " & error + + await node.node.connectToNodes( + @[relayNode1PeerInfo, lightpushNodePeerInfo, storeNodePeerInfo] + ) + + let eventManager = newSendEventListenerManager(node.brokerCtx) + defer: + eventManager.teardown() + + let envelope = MessageEnvelope.init( + ContentTopic("/waku/2/default-content/proto"), "test payload" + ) + + let requestId = (await node.send(envelope)).valueOr: + raiseAssert error + + # Wait for events with timeout + const eventTimeout = 10.seconds + discard await eventManager.waitForEvents(eventTimeout) + + eventManager.validate( + {SendEventOutcome.Sent, SendEventOutcome.Propagated}, requestId + ) + + (await node.stop()).isOkOr: + raiseAssert "Failed to stop node: " & error + + asyncTest "Send only propagates": + var node: Waku + lockNewGlobalBrokerContext: + node = (await createNode(createApiNodeConf())).valueOr: + raiseAssert error + (await startWaku(addr node)).isOkOr: + raiseAssert "Failed to start Waku node: " & error + + await node.node.connectToNodes(@[relayNode1PeerInfo]) + + let eventManager = newSendEventListenerManager(node.brokerCtx) + defer: + eventManager.teardown() + + let envelope = MessageEnvelope.init( + ContentTopic("/waku/2/default-content/proto"), "test payload" + ) + + let requestId = (await node.send(envelope)).valueOr: + raiseAssert error + + # Wait for events with timeout + const eventTimeout = 10.seconds + discard await eventManager.waitForEvents(eventTimeout) + + eventManager.validate({SendEventOutcome.Propagated}, requestId) + + (await node.stop()).isOkOr: + raiseAssert "Failed to stop node: " & error + + asyncTest "Send only propagates fallback to lightpush": + var node: Waku + lockNewGlobalBrokerContext: + node = (await createNode(createApiNodeConf())).valueOr: + raiseAssert error + (await startWaku(addr node)).isOkOr: + raiseAssert "Failed to start Waku node: " & error + + await node.node.connectToNodes(@[lightpushNodePeerInfo]) + + let eventManager = newSendEventListenerManager(node.brokerCtx) + defer: + eventManager.teardown() + + let envelope = MessageEnvelope.init( + ContentTopic("/waku/2/default-content/proto"), "test payload" + ) + + let requestId = (await node.send(envelope)).valueOr: + raiseAssert error + + # Wait for events with timeout + const eventTimeout = 10.seconds + discard await eventManager.waitForEvents(eventTimeout) + + eventManager.validate({SendEventOutcome.Propagated}, requestId) + + (await node.stop()).isOkOr: + raiseAssert "Failed to stop node: " & error + + asyncTest "Send fully validates fallback to lightpush": + var node: Waku + lockNewGlobalBrokerContext: + node = (await createNode(createApiNodeConf())).valueOr: + raiseAssert error + (await startWaku(addr node)).isOkOr: + raiseAssert "Failed to start Waku node: " & error + + await node.node.connectToNodes(@[lightpushNodePeerInfo, storeNodePeerInfo]) + + let eventManager = newSendEventListenerManager(node.brokerCtx) + defer: + eventManager.teardown() + + let envelope = MessageEnvelope.init( + ContentTopic("/waku/2/default-content/proto"), "test payload" + ) + + let requestId = (await node.send(envelope)).valueOr: + raiseAssert error + + # Wait for events with timeout + const eventTimeout = 10.seconds + discard await eventManager.waitForEvents(eventTimeout) + + eventManager.validate( + {SendEventOutcome.Propagated, SendEventOutcome.Sent}, requestId + ) + (await node.stop()).isOkOr: + raiseAssert "Failed to stop node: " & error + + asyncTest "Send fails with event": + var fakeLightpushNode: WakuNode + lockNewGlobalBrokerContext: + fakeLightpushNode = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + fakeLightpushNode.mountMetadata(3, @[0'u16]).isOkOr: + raiseAssert "Failed to mount metadata: " & error + (await fakeLightpushNode.mountRelay()).isOkOr: + raiseAssert "Failed to mount relay" + (await fakeLightpushNode.mountLightPush()).isOkOr: + raiseAssert "Failed to mount lightpush" + await fakeLightpushNode.mountLibp2pPing() + await fakeLightpushNode.start() + let fakeLightpushNodePeerInfo = fakeLightpushNode.peerInfo.toRemotePeerInfo() + proc dummyHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + discard + + fakeLightpushNode.subscribe( + (kind: PubsubSub, topic: PubsubTopic("/waku/2/rs/3/0")), dummyHandler + ).isOkOr: + raiseAssert "Failed to subscribe fakeLightpushNode: " & error + + var node: Waku + lockNewGlobalBrokerContext: + node = (await createNode(createApiNodeConf(cli_args.WakuMode.Edge))).valueOr: + raiseAssert error + (await startWaku(addr node)).isOkOr: + raiseAssert "Failed to start Waku node: " & error + + await node.node.connectToNodes(@[fakeLightpushNodePeerInfo]) + + let eventManager = newSendEventListenerManager(node.brokerCtx) + defer: + eventManager.teardown() + + let envelope = MessageEnvelope.init( + ContentTopic("/waku/2/default-content/proto"), "test payload" + ) + + let requestId = (await node.send(envelope)).valueOr: + raiseAssert error + + echo "Sent message with requestId=", requestId + # Wait for events with timeout + const eventTimeout = 62.seconds + discard await eventManager.waitForEvents(eventTimeout) + + eventManager.validate({SendEventOutcome.Error}, requestId) + (await node.stop()).isOkOr: + raiseAssert "Failed to stop node: " & error diff --git a/tests/api/test_api_subscription.nim b/tests/api/test_api_subscription.nim new file mode 100644 index 000000000..6639e3dea --- /dev/null +++ b/tests/api/test_api_subscription.nim @@ -0,0 +1,400 @@ +{.used.} + +import std/[strutils, net, options, sets] +import chronos, testutils/unittests, stew/byteutils +import libp2p/[peerid, peerinfo, multiaddress, crypto/crypto] +import ../testlib/[common, wakucore, wakunode, testasync] + +import + waku, + waku/[ + waku_node, + waku_core, + common/broker/broker_context, + events/message_events, + waku_relay/protocol, + ] +import waku/factory/waku_conf +import tools/confutils/cli_args + +# TODO: Edge testing (after MAPI edge support is completed) + +const TestTimeout = chronos.seconds(10) +const NegativeTestTimeout = chronos.seconds(2) + +type ReceiveEventListenerManager = ref object + brokerCtx: BrokerContext + receivedListener: MessageReceivedEventListener + receivedEvent: AsyncEvent + receivedMessages: seq[WakuMessage] + targetCount: int + +proc newReceiveEventListenerManager( + brokerCtx: BrokerContext, expectedCount: int = 1 +): ReceiveEventListenerManager = + let manager = ReceiveEventListenerManager( + brokerCtx: brokerCtx, receivedMessages: @[], targetCount: expectedCount + ) + manager.receivedEvent = newAsyncEvent() + + manager.receivedListener = MessageReceivedEvent + .listen( + brokerCtx, + proc(event: MessageReceivedEvent) {.async: (raises: []).} = + manager.receivedMessages.add(event.message) + + if manager.receivedMessages.len >= manager.targetCount: + manager.receivedEvent.fire() + , + ) + .expect("Failed to listen to MessageReceivedEvent") + + return manager + +proc teardown(manager: ReceiveEventListenerManager) = + MessageReceivedEvent.dropListener(manager.brokerCtx, manager.receivedListener) + +proc waitForEvents( + manager: ReceiveEventListenerManager, timeout: Duration +): Future[bool] {.async.} = + return await manager.receivedEvent.wait().withTimeout(timeout) + +type TestNetwork = ref object + publisher: WakuNode + subscriber: Waku + publisherPeerInfo: RemotePeerInfo + +proc createApiNodeConf( + mode: cli_args.WakuMode = cli_args.WakuMode.Core, numShards: uint16 = 1 +): WakuNodeConf = + var conf = defaultWakuNodeConf().valueOr: + raiseAssert error + conf.mode = mode + conf.listenAddress = parseIpAddress("0.0.0.0") + conf.tcpPort = Port(0) + conf.discv5UdpPort = Port(0) + conf.clusterId = 3'u16 + conf.numShardsInNetwork = numShards + conf.reliabilityEnabled = true + conf.rest = false + result = conf + +proc setupSubscriberNode(conf: WakuNodeConf): Future[Waku] {.async.} = + var node: Waku + lockNewGlobalBrokerContext: + node = (await createNode(conf)).expect("Failed to create subscriber node") + (await startWaku(addr node)).expect("Failed to start subscriber node") + return node + +proc setupNetwork( + numShards: uint16 = 1, mode: cli_args.WakuMode = cli_args.WakuMode.Core +): Future[TestNetwork] {.async.} = + var net = TestNetwork() + + lockNewGlobalBrokerContext: + net.publisher = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + net.publisher.mountMetadata(3, @[0'u16]).expect("Failed to mount metadata") + (await net.publisher.mountRelay()).expect("Failed to mount relay") + await net.publisher.mountLibp2pPing() + await net.publisher.start() + + net.publisherPeerInfo = net.publisher.peerInfo.toRemotePeerInfo() + + proc dummyHandler(topic: PubsubTopic, msg: WakuMessage) {.async, gcsafe.} = + discard + + # Subscribe the publisher to all shards to guarantee a GossipSub mesh with the subscriber. + # Currently, Core/Relay nodes auto-subscribe to all network shards on boot, but if + # that changes, this will be needed to cause the publisher to have shard interest + # for any shards the subscriber may want to use, which is required for waitForMesh to work. + for i in 0 ..< numShards.int: + let shard = PubsubTopic("/waku/2/rs/3/" & $i) + net.publisher.subscribe((kind: PubsubSub, topic: shard), dummyHandler).expect( + "Failed to sub publisher" + ) + + net.subscriber = await setupSubscriberNode(createApiNodeConf(mode, numShards)) + + await net.subscriber.node.connectToNodes(@[net.publisherPeerInfo]) + + return net + +proc teardown(net: TestNetwork) {.async.} = + if not isNil(net.subscriber): + (await net.subscriber.stop()).expect("Failed to stop subscriber node") + net.subscriber = nil + + if not isNil(net.publisher): + await net.publisher.stop() + net.publisher = nil + +proc getRelayShard(node: WakuNode, contentTopic: ContentTopic): PubsubTopic = + let autoSharding = node.wakuAutoSharding.get() + let shardObj = autoSharding.getShard(contentTopic).expect("Failed to get shard") + return PubsubTopic($shardObj) + +proc waitForMesh(node: WakuNode, shard: PubsubTopic) {.async.} = + for _ in 0 ..< 50: + if node.wakuRelay.getNumPeersInMesh(shard).valueOr(0) > 0: + return + await sleepAsync(100.milliseconds) + raise newException(ValueError, "GossipSub Mesh failed to stabilize on " & shard) + +proc publishToMesh( + net: TestNetwork, contentTopic: ContentTopic, payload: seq[byte] +): Future[Result[int, string]] {.async.} = + let shard = net.subscriber.node.getRelayShard(contentTopic) + + await waitForMesh(net.publisher, shard) + + let msg = WakuMessage( + payload: payload, contentTopic: contentTopic, version: 0, timestamp: now() + ) + return await net.publisher.publish(some(shard), msg) + +suite "Messaging API, SubscriptionManager": + asyncTest "Subscription API, relay node auto subscribe and receive message": + let net = await setupNetwork(1) + defer: + await net.teardown() + + let testTopic = ContentTopic("/waku/2/test-content/proto") + (await net.subscriber.subscribe(testTopic)).expect( + "subscriberNode failed to subscribe" + ) + + let eventManager = newReceiveEventListenerManager(net.subscriber.brokerCtx, 1) + defer: + eventManager.teardown() + + discard (await net.publishToMesh(testTopic, "Hello, world!".toBytes())).expect( + "Publish failed" + ) + + require await eventManager.waitForEvents(TestTimeout) + require eventManager.receivedMessages.len == 1 + check eventManager.receivedMessages[0].contentTopic == testTopic + + asyncTest "Subscription API, relay node ignores unsubscribed content topics on same shard": + let net = await setupNetwork(1) + defer: + await net.teardown() + + let subbedTopic = ContentTopic("/waku/2/subbed-topic/proto") + let ignoredTopic = ContentTopic("/waku/2/ignored-topic/proto") + (await net.subscriber.subscribe(subbedTopic)).expect("failed to subscribe") + + let eventManager = newReceiveEventListenerManager(net.subscriber.brokerCtx, 1) + defer: + eventManager.teardown() + + discard (await net.publishToMesh(ignoredTopic, "Ghost Msg".toBytes())).expect( + "Publish failed" + ) + + check not await eventManager.waitForEvents(NegativeTestTimeout) + check eventManager.receivedMessages.len == 0 + + asyncTest "Subscription API, relay node unsubscribe stops message receipt": + let net = await setupNetwork(1) + defer: + await net.teardown() + + let testTopic = ContentTopic("/waku/2/unsub-test/proto") + + (await net.subscriber.subscribe(testTopic)).expect("failed to subscribe") + net.subscriber.unsubscribe(testTopic).expect("failed to unsubscribe") + + let eventManager = newReceiveEventListenerManager(net.subscriber.brokerCtx, 1) + defer: + eventManager.teardown() + + discard (await net.publishToMesh(testTopic, "Should be dropped".toBytes())).expect( + "Publish failed" + ) + + check not await eventManager.waitForEvents(NegativeTestTimeout) + check eventManager.receivedMessages.len == 0 + + asyncTest "Subscription API, overlapping topics on same shard maintain correct isolation": + let net = await setupNetwork(1) + defer: + await net.teardown() + + let topicA = ContentTopic("/waku/2/topic-a/proto") + let topicB = ContentTopic("/waku/2/topic-b/proto") + (await net.subscriber.subscribe(topicA)).expect("failed to sub A") + (await net.subscriber.subscribe(topicB)).expect("failed to sub B") + + let eventManager = newReceiveEventListenerManager(net.subscriber.brokerCtx, 1) + defer: + eventManager.teardown() + + net.subscriber.unsubscribe(topicA).expect("failed to unsub A") + + discard (await net.publishToMesh(topicA, "Dropped Message".toBytes())).expect( + "Publish A failed" + ) + discard + (await net.publishToMesh(topicB, "Kept Msg".toBytes())).expect("Publish B failed") + + require await eventManager.waitForEvents(TestTimeout) + require eventManager.receivedMessages.len == 1 + check eventManager.receivedMessages[0].contentTopic == topicB + + asyncTest "Subscription API, redundant subs tolerated and subs are removed": + let net = await setupNetwork(1) + defer: + await net.teardown() + + let glitchTopic = ContentTopic("/waku/2/glitch/proto") + + (await net.subscriber.subscribe(glitchTopic)).expect("failed to sub") + (await net.subscriber.subscribe(glitchTopic)).expect("failed to double sub") + net.subscriber.unsubscribe(glitchTopic).expect("failed to unsub") + + let eventManager = newReceiveEventListenerManager(net.subscriber.brokerCtx, 1) + defer: + eventManager.teardown() + + discard (await net.publishToMesh(glitchTopic, "Ghost Msg".toBytes())).expect( + "Publish failed" + ) + + check not await eventManager.waitForEvents(NegativeTestTimeout) + check eventManager.receivedMessages.len == 0 + + asyncTest "Subscription API, resubscribe to an unsubscribed topic": + let net = await setupNetwork(1) + defer: + await net.teardown() + + let testTopic = ContentTopic("/waku/2/resub-test/proto") + + # Subscribe + (await net.subscriber.subscribe(testTopic)).expect("Initial sub failed") + + var eventManager = newReceiveEventListenerManager(net.subscriber.brokerCtx, 1) + discard + (await net.publishToMesh(testTopic, "Msg 1".toBytes())).expect("Pub 1 failed") + + require await eventManager.waitForEvents(TestTimeout) + eventManager.teardown() + + # Unsubscribe and verify teardown + net.subscriber.unsubscribe(testTopic).expect("Unsub failed") + eventManager = newReceiveEventListenerManager(net.subscriber.brokerCtx, 1) + + discard + (await net.publishToMesh(testTopic, "Ghost".toBytes())).expect("Ghost pub failed") + + check not await eventManager.waitForEvents(NegativeTestTimeout) + eventManager.teardown() + + # Resubscribe + (await net.subscriber.subscribe(testTopic)).expect("Resub failed") + eventManager = newReceiveEventListenerManager(net.subscriber.brokerCtx, 1) + + discard + (await net.publishToMesh(testTopic, "Msg 2".toBytes())).expect("Pub 2 failed") + + require await eventManager.waitForEvents(TestTimeout) + check eventManager.receivedMessages[0].payload == "Msg 2".toBytes() + + asyncTest "Subscription API, two content topics in different shards": + let net = await setupNetwork(8) + defer: + await net.teardown() + + var topicA = ContentTopic("/appA/2/shard-test-a/proto") + var topicB = ContentTopic("/appB/2/shard-test-b/proto") + + # generate two content topics that land in two different shards + var i = 0 + while net.subscriber.node.getRelayShard(topicA) == + net.subscriber.node.getRelayShard(topicB): + topicB = ContentTopic("/appB" & $i & "/2/shard-test-b/proto") + inc i + + (await net.subscriber.subscribe(topicA)).expect("failed to sub A") + (await net.subscriber.subscribe(topicB)).expect("failed to sub B") + + let eventManager = newReceiveEventListenerManager(net.subscriber.brokerCtx, 2) + defer: + eventManager.teardown() + + discard (await net.publishToMesh(topicA, "Msg on Shard A".toBytes())).expect( + "Publish A failed" + ) + discard (await net.publishToMesh(topicB, "Msg on Shard B".toBytes())).expect( + "Publish B failed" + ) + + require await eventManager.waitForEvents(TestTimeout) + require eventManager.receivedMessages.len == 2 + + asyncTest "Subscription API, many content topics in many shards": + let net = await setupNetwork(8) + defer: + await net.teardown() + + var allTopics: seq[ContentTopic] + for i in 0 ..< 100: + allTopics.add(ContentTopic("/stress-app-" & $i & "/2/state-test/proto")) + + var activeSubs: seq[ContentTopic] + + proc verifyNetworkState(expected: seq[ContentTopic]) {.async.} = + let eventManager = + newReceiveEventListenerManager(net.subscriber.brokerCtx, expected.len) + + for topic in allTopics: + discard (await net.publishToMesh(topic, "Stress Payload".toBytes())).expect( + "publish failed" + ) + + require await eventManager.waitForEvents(TestTimeout) + + # here we just give a chance for any messages that we don't expect to arrive + await sleepAsync(1.seconds) + eventManager.teardown() + + # weak check (but catches most bugs) + require eventManager.receivedMessages.len == expected.len + + # strict expected receipt test + var receivedTopics = initHashSet[ContentTopic]() + for msg in eventManager.receivedMessages: + receivedTopics.incl(msg.contentTopic) + var expectedTopics = initHashSet[ContentTopic]() + for t in expected: + expectedTopics.incl(t) + + check receivedTopics == expectedTopics + + # subscribe to all content topics we generated + for t in allTopics: + (await net.subscriber.subscribe(t)).expect("sub failed") + activeSubs.add(t) + + await verifyNetworkState(activeSubs) + + # unsubscribe from some content topics + for i in 0 ..< 50: + let t = allTopics[i] + net.subscriber.unsubscribe(t).expect("unsub failed") + + let idx = activeSubs.find(t) + if idx >= 0: + activeSubs.del(idx) + + await verifyNetworkState(activeSubs) + + # re-subscribe to some content topics + for i in 0 ..< 25: + let t = allTopics[i] + (await net.subscriber.subscribe(t)).expect("resub failed") + activeSubs.add(t) + + await verifyNetworkState(activeSubs) diff --git a/tests/api/test_entry_nodes.nim b/tests/api/test_entry_nodes.nim index 136a49b2b..38dc38ba4 100644 --- a/tests/api/test_entry_nodes.nim +++ b/tests/api/test_entry_nodes.nim @@ -2,7 +2,7 @@ import std/options, results, testutils/unittests -import waku/api/entry_nodes +import tools/confutils/entry_nodes # Since classifyEntryNode is internal, we test it indirectly through processEntryNodes behavior # The enum is exported so we can test against it diff --git a/tests/api/test_node_conf.nim b/tests/api/test_node_conf.nim index 232ffc7d2..d0b3d433c 100644 --- a/tests/api/test_node_conf.nim +++ b/tests/api/test_node_conf.nim @@ -1,17 +1,337 @@ {.used.} -import std/options, results, stint, testutils/unittests -import waku/api/api_conf, waku/factory/waku_conf, waku/factory/networks_config +import std/[options, json, strutils], results, stint, testutils/unittests +import json_serialization +import confutils, confutils/std/net +import tools/confutils/cli_args +import waku/factory/waku_conf, waku/factory/networks_config +import waku/common/logging -suite "LibWaku Conf - toWakuConf": - test "Minimal configuration": +# Helper: parse JSON into WakuNodeConf using fieldPairs (same as liblogosdelivery) +proc parseWakuNodeConfFromJson(jsonStr: string): Result[WakuNodeConf, string] = + var conf = defaultWakuNodeConf().valueOr: + return err(error) + var jsonNode: JsonNode + try: + jsonNode = parseJson(jsonStr) + except Exception: + return err("JSON parse error: " & getCurrentExceptionMsg()) + for confField, confValue in fieldPairs(conf): + if jsonNode.contains(confField): + let formattedString = ($jsonNode[confField]).strip(chars = {'\"'}) + try: + confValue = parseCmdArg(typeof(confValue), formattedString) + except Exception: + return err( + "Field '" & confField & "' parse error: " & getCurrentExceptionMsg() & + ". Value: " & formattedString + ) + return ok(conf) + +suite "WakuNodeConf - mode-driven toWakuConf": + test "Core mode enables service protocols": ## Given - let nodeConfig = NodeConfig.init(ethRpcEndpoints = @["http://someaddress"]) + var conf = defaultWakuNodeConf().valueOr: + raiseAssert error + conf.mode = Core + conf.clusterId = 1 ## When - let wakuConfRes = toWakuConf(nodeConfig) + let wakuConfRes = conf.toWakuConf() ## Then + require wakuConfRes.isOk() + let wakuConf = wakuConfRes.get() + require wakuConf.validate().isOk() + check: + wakuConf.relay == true + wakuConf.lightPush == true + wakuConf.peerExchangeService == true + wakuConf.rendezvous == true + wakuConf.clusterId == 1 + + test "Edge mode disables service protocols": + ## Given + var conf = defaultWakuNodeConf().valueOr: + raiseAssert error + conf.mode = Edge + conf.clusterId = 1 + + ## When + let wakuConfRes = conf.toWakuConf() + + ## Then + require wakuConfRes.isOk() + let wakuConf = wakuConfRes.get() + require wakuConf.validate().isOk() + check: + wakuConf.relay == false + wakuConf.lightPush == false + wakuConf.filterServiceConf.isSome() == false + wakuConf.storeServiceConf.isSome() == false + wakuConf.peerExchangeService == true + + test "noMode uses explicit CLI flags as-is": + ## Given + var conf = defaultWakuNodeConf().valueOr: + raiseAssert error + conf.mode = WakuMode.noMode + conf.relay = true + conf.lightpush = false + conf.clusterId = 5 + + ## When + let wakuConfRes = conf.toWakuConf() + + ## Then + require wakuConfRes.isOk() + let wakuConf = wakuConfRes.get() + require wakuConf.validate().isOk() + check: + wakuConf.relay == true + wakuConf.lightPush == false + wakuConf.clusterId == 5 + + test "Core mode overrides individual protocol flags": + ## Given - user sets relay=false but mode=Core should override + var conf = defaultWakuNodeConf().valueOr: + raiseAssert error + conf.mode = Core + conf.relay = false # will be overridden by Core mode + + ## When + let wakuConfRes = conf.toWakuConf() + + ## Then + require wakuConfRes.isOk() + let wakuConf = wakuConfRes.get() + require wakuConf.validate().isOk() + check: + wakuConf.relay == true # mode overrides + +suite "WakuNodeConf - JSON parsing with fieldPairs": + test "Empty JSON produces valid default conf": + ## Given / When + let confRes = parseWakuNodeConfFromJson("{}") + + ## Then + require confRes.isOk() + let conf = confRes.get() + check: + conf.mode == WakuMode.noMode + conf.clusterId == 0 + conf.logLevel == logging.LogLevel.INFO + + test "JSON with mode and clusterId": + ## Given / When + let confRes = parseWakuNodeConfFromJson("""{"mode": "Core", "clusterId": 42}""") + + ## Then + require confRes.isOk() + let conf = confRes.get() + check: + conf.mode == Core + conf.clusterId == 42 + + test "JSON with Edge mode": + ## Given / When + let confRes = parseWakuNodeConfFromJson("""{"mode": "Edge"}""") + + ## Then + require confRes.isOk() + let conf = confRes.get() + check: + conf.mode == Edge + + test "JSON with logLevel": + ## Given / When + let confRes = parseWakuNodeConfFromJson("""{"logLevel": "DEBUG"}""") + + ## Then + require confRes.isOk() + let conf = confRes.get() + check: + conf.logLevel == logging.LogLevel.DEBUG + + test "JSON with sharding config": + ## Given / When + let confRes = + parseWakuNodeConfFromJson("""{"clusterId": 99, "numShardsInNetwork": 16}""") + + ## Then + require confRes.isOk() + let conf = confRes.get() + check: + conf.clusterId == 99 + conf.numShardsInNetwork == 16 + + test "JSON with unknown fields is silently ignored": + ## Given / When + let confRes = + parseWakuNodeConfFromJson("""{"unknownField": true, "clusterId": 5}""") + + ## Then - unknown fields are just ignored (not in fieldPairs) + require confRes.isOk() + let conf = confRes.get() + check: + conf.clusterId == 5 + + test "Invalid JSON syntax returns error": + ## Given / When + let confRes = parseWakuNodeConfFromJson("{ not valid json }") + + ## Then + check confRes.isErr() + +suite "WakuNodeConf - preset integration": + test "TWN preset applies TheWakuNetworkConf": + ## Given + var conf = defaultWakuNodeConf().valueOr: + raiseAssert error + conf.preset = "twn" + + ## When + let wakuConfRes = conf.toWakuConf() + + ## Then + require wakuConfRes.isOk() + let wakuConf = wakuConfRes.get() + require wakuConf.validate().isOk() + check: + wakuConf.clusterId == 1 + + test "LogosDev preset applies LogosDevConf": + ## Given + var conf = defaultWakuNodeConf().valueOr: + raiseAssert error + conf.preset = "logosdev" + + ## When + let wakuConfRes = conf.toWakuConf() + + ## Then + require wakuConfRes.isOk() + let wakuConf = wakuConfRes.get() + require wakuConf.validate().isOk() + check: + wakuConf.clusterId == 2 + + test "Invalid preset returns error": + ## Given + var conf = defaultWakuNodeConf().valueOr: + raiseAssert error + conf.preset = "nonexistent" + + ## When + let wakuConfRes = conf.toWakuConf() + + ## Then + check wakuConfRes.isErr() + +suite "WakuNodeConf JSON -> WakuConf integration": + test "Core mode JSON config produces valid WakuConf": + ## Given + let confRes = parseWakuNodeConfFromJson( + """{"mode": "Core", "clusterId": 55, "numShardsInNetwork": 6}""" + ) + require confRes.isOk() + let conf = confRes.get() + + ## When + let wakuConfRes = conf.toWakuConf() + + ## Then + require wakuConfRes.isOk() + let wakuConf = wakuConfRes.get() + require wakuConf.validate().isOk() + check: + wakuConf.relay == true + wakuConf.lightPush == true + wakuConf.peerExchangeService == true + wakuConf.clusterId == 55 + wakuConf.shardingConf.numShardsInCluster == 6 + + test "Edge mode JSON config produces valid WakuConf": + ## Given + let confRes = parseWakuNodeConfFromJson("""{"mode": "Edge", "clusterId": 1}""") + require confRes.isOk() + let conf = confRes.get() + + ## When + let wakuConfRes = conf.toWakuConf() + + ## Then + require wakuConfRes.isOk() + let wakuConf = wakuConfRes.get() + require wakuConf.validate().isOk() + check: + wakuConf.relay == false + wakuConf.lightPush == false + wakuConf.peerExchangeService == true + + test "JSON with preset produces valid WakuConf": + ## Given + let confRes = + parseWakuNodeConfFromJson("""{"mode": "Core", "preset": "logosdev"}""") + require confRes.isOk() + let conf = confRes.get() + + ## When + let wakuConfRes = conf.toWakuConf() + + ## Then + require wakuConfRes.isOk() + let wakuConf = wakuConfRes.get() + require wakuConf.validate().isOk() + check: + wakuConf.clusterId == 2 + wakuConf.relay == true + + test "JSON with static nodes": + ## Given + let confRes = parseWakuNodeConfFromJson( + """{"mode": "Core", "clusterId": 42, "staticnodes": ["/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc"]}""" + ) + require confRes.isOk() + let conf = confRes.get() + + ## When + let wakuConfRes = conf.toWakuConf() + + ## Then + require wakuConfRes.isOk() + let wakuConf = wakuConfRes.get() + require wakuConf.validate().isOk() + check: + wakuConf.staticNodes.len == 1 + + test "JSON with max message size": + ## Given + let confRes = + parseWakuNodeConfFromJson("""{"clusterId": 42, "maxMessageSize": "100KiB"}""") + require confRes.isOk() + let conf = confRes.get() + + ## When + let wakuConfRes = conf.toWakuConf() + + ## Then + require wakuConfRes.isOk() + let wakuConf = wakuConfRes.get() + require wakuConf.validate().isOk() + check: + wakuConf.maxMessageSizeBytes == 100'u64 * 1024'u64 + +# ---- Deprecated NodeConfig tests (kept for backward compatibility) ---- + +{.push warning[Deprecated]: off.} + +import waku/api/api_conf + +suite "NodeConfig (deprecated) - toWakuConf": + test "Minimal configuration": + let nodeConfig = NodeConfig.init(ethRpcEndpoints = @["http://someaddress"]) + let wakuConfRes = api_conf.toWakuConf(nodeConfig) let wakuConf = wakuConfRes.valueOr: raiseAssert error wakuConf.validate().isOkOr: @@ -21,16 +341,24 @@ suite "LibWaku Conf - toWakuConf": wakuConf.shardingConf.numShardsInCluster == 8 wakuConf.staticNodes.len == 0 - test "Core mode configuration": - ## Given + test "Edge mode configuration": let protocolsConfig = ProtocolsConfig.init(entryNodes = @[], clusterId = 1) + let nodeConfig = + NodeConfig.init(mode = api_conf.WakuMode.Edge, protocolsConfig = protocolsConfig) + let wakuConfRes = api_conf.toWakuConf(nodeConfig) + require wakuConfRes.isOk() + let wakuConf = wakuConfRes.get() + require wakuConf.validate().isOk() + check: + wakuConf.relay == false + wakuConf.lightPush == false + wakuConf.peerExchangeService == true - let nodeConfig = NodeConfig.init(mode = Core, protocolsConfig = protocolsConfig) - - ## When - let wakuConfRes = toWakuConf(nodeConfig) - - ## Then + test "Core mode configuration": + let protocolsConfig = ProtocolsConfig.init(entryNodes = @[], clusterId = 1) + let nodeConfig = + NodeConfig.init(mode = api_conf.WakuMode.Core, protocolsConfig = protocolsConfig) + let wakuConfRes = api_conf.toWakuConf(nodeConfig) require wakuConfRes.isOk() let wakuConf = wakuConfRes.get() require wakuConf.validate().isOk() @@ -38,242 +366,5 @@ suite "LibWaku Conf - toWakuConf": wakuConf.relay == true wakuConf.lightPush == true wakuConf.peerExchangeService == true - wakuConf.clusterId == 1 - test "Auto-sharding configuration": - ## Given - let nodeConfig = NodeConfig.init( - mode = Core, - protocolsConfig = ProtocolsConfig.init( - entryNodes = @[], - staticStoreNodes = @[], - clusterId = 42, - autoShardingConfig = AutoShardingConfig(numShardsInCluster: 16), - ), - ) - - ## When - let wakuConfRes = toWakuConf(nodeConfig) - - ## Then - require wakuConfRes.isOk() - let wakuConf = wakuConfRes.get() - require wakuConf.validate().isOk() - check: - wakuConf.clusterId == 42 - wakuConf.shardingConf.numShardsInCluster == 16 - - test "Bootstrap nodes configuration": - ## Given - let entryNodes = - @[ - "enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Vn7gmfkTTnAe8Ys2cgGBN8ufJnvzKQFZqFMBgmlkgnY0iXNlY3AyNTZrMaEDS8-D878DrdbNwcuY-3p1qdDp5MOoCurhdsNPJTXZ3c5g3RjcIJ2X4N1ZHCCd2g", - "enr:-QEkuECnZ3IbVAgkOzv-QLnKC4dRKAPRY80m1-R7G8jZ7yfT3ipEfBrhKN7ARcQgQ-vg-h40AQzyvAkPYlHPaFKk6u9MBgmlkgnY0iXNlY3AyNTZrMaEDk49D8JjMSns4p1XVNBvJquOUzT4PENSJknkROspfAFGg3RjcIJ2X4N1ZHCCd2g", - ] - let libConf = NodeConfig.init( - mode = Core, - protocolsConfig = ProtocolsConfig.init( - entryNodes = entryNodes, staticStoreNodes = @[], clusterId = 1 - ), - ) - - ## When - let wakuConfRes = toWakuConf(libConf) - - ## Then - require wakuConfRes.isOk() - let wakuConf = wakuConfRes.get() - require wakuConf.validate().isOk() - require wakuConf.discv5Conf.isSome() - check: - wakuConf.discv5Conf.get().bootstrapNodes == entryNodes - - test "Static store nodes configuration": - ## Given - let staticStoreNodes = - @[ - "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc", - "/ip4/192.168.1.1/tcp/60001/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYd", - ] - let nodeConf = NodeConfig.init( - protocolsConfig = ProtocolsConfig.init( - entryNodes = @[], staticStoreNodes = staticStoreNodes, clusterId = 1 - ) - ) - - ## When - let wakuConfRes = toWakuConf(nodeConf) - - ## Then - require wakuConfRes.isOk() - let wakuConf = wakuConfRes.get() - require wakuConf.validate().isOk() - check: - wakuConf.staticNodes == staticStoreNodes - - test "Message validation with max message size": - ## Given - let nodeConfig = NodeConfig.init( - protocolsConfig = ProtocolsConfig.init( - entryNodes = @[], - staticStoreNodes = @[], - clusterId = 1, - messageValidation = - MessageValidation(maxMessageSize: "100KiB", rlnConfig: none(RlnConfig)), - ) - ) - - ## When - let wakuConfRes = toWakuConf(nodeConfig) - - ## Then - require wakuConfRes.isOk() - let wakuConf = wakuConfRes.get() - require wakuConf.validate().isOk() - check: - wakuConf.maxMessageSizeBytes == 100'u64 * 1024'u64 - - test "Message validation with RLN config": - ## Given - let nodeConfig = NodeConfig.init( - protocolsConfig = ProtocolsConfig.init( - entryNodes = @[], - clusterId = 1, - messageValidation = MessageValidation( - maxMessageSize: "150 KiB", - rlnConfig: some( - RlnConfig( - contractAddress: "0x1234567890123456789012345678901234567890", - chainId: 1'u, - epochSizeSec: 600'u64, - ) - ), - ), - ), - ethRpcEndpoints = @["http://127.0.0.1:1111"], - ) - - ## When - let wakuConf = toWakuConf(nodeConfig).valueOr: - raiseAssert error - - wakuConf.validate().isOkOr: - raiseAssert error - - check: - wakuConf.maxMessageSizeBytes == 150'u64 * 1024'u64 - - require wakuConf.rlnRelayConf.isSome() - let rlnConf = wakuConf.rlnRelayConf.get() - check: - rlnConf.dynamic == true - rlnConf.ethContractAddress == "0x1234567890123456789012345678901234567890" - rlnConf.chainId == 1'u256 - rlnConf.epochSizeSec == 600'u64 - - test "Full Core mode configuration with all fields": - ## Given - let nodeConfig = NodeConfig.init( - mode = Core, - protocolsConfig = ProtocolsConfig.init( - entryNodes = - @[ - "enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Vn7gmfkTTnAe8Ys2cgGBN8ufJnvzKQFZqFMBgmlkgnY0iXNlY3AyNTZrMaEDS8-D878DrdbNwcuY-3p1qdDp5MOoCurhdsNPJTXZ3c5g3RjcIJ2X4N1ZHCCd2g" - ], - staticStoreNodes = - @[ - "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" - ], - clusterId = 99, - autoShardingConfig = AutoShardingConfig(numShardsInCluster: 12), - messageValidation = MessageValidation( - maxMessageSize: "512KiB", - rlnConfig: some( - RlnConfig( - contractAddress: "0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", - chainId: 5'u, # Goerli - epochSizeSec: 300'u64, - ) - ), - ), - ), - ethRpcEndpoints = @["https://127.0.0.1:8333"], - ) - - ## When - let wakuConfRes = toWakuConf(nodeConfig) - - ## Then - let wakuConf = wakuConfRes.valueOr: - raiseAssert error - wakuConf.validate().isOkOr: - raiseAssert error - - # Check basic settings - check: - wakuConf.relay == true - wakuConf.lightPush == true - wakuConf.peerExchangeService == true - wakuConf.rendezvous == true - wakuConf.clusterId == 99 - - # Check sharding - check: - wakuConf.shardingConf.numShardsInCluster == 12 - - # Check bootstrap nodes - require wakuConf.discv5Conf.isSome() - check: - wakuConf.discv5Conf.get().bootstrapNodes.len == 1 - - # Check static nodes - check: - wakuConf.staticNodes.len == 1 - wakuConf.staticNodes[0] == - "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" - - # Check message validation - check: - wakuConf.maxMessageSizeBytes == 512'u64 * 1024'u64 - - # Check RLN config - require wakuConf.rlnRelayConf.isSome() - let rlnConf = wakuConf.rlnRelayConf.get() - check: - rlnConf.dynamic == true - rlnConf.ethContractAddress == "0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" - rlnConf.chainId == 5'u256 - rlnConf.epochSizeSec == 300'u64 - - test "NodeConfig with mixed entry nodes (integration test)": - ## Given - let entryNodes = - @[ - "enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im", - "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc", - ] - - let nodeConfig = NodeConfig.init( - mode = Core, - protocolsConfig = ProtocolsConfig.init( - entryNodes = entryNodes, staticStoreNodes = @[], clusterId = 1 - ), - ) - - ## When - let wakuConfRes = toWakuConf(nodeConfig) - - ## Then - require wakuConfRes.isOk() - let wakuConf = wakuConfRes.get() - require wakuConf.validate().isOk() - - # Check that ENRTree went to DNS discovery - require wakuConf.dnsDiscoveryConf.isSome() - check: - wakuConf.dnsDiscoveryConf.get().enrTreeUrl == entryNodes[0] - - # Check that multiaddr went to static nodes - check: - wakuConf.staticNodes.len == 1 - wakuConf.staticNodes[0] == entryNodes[1] +{.pop.} diff --git a/tests/common/test_all.nim b/tests/common/test_all.nim index 5b4515093..d597a7424 100644 --- a/tests/common/test_all.nim +++ b/tests/common/test_all.nim @@ -6,7 +6,9 @@ import ./test_protobuf_validation, ./test_sqlite_migrations, ./test_parse_size, - ./test_tokenbucket, ./test_requestratelimiter, ./test_ratelimit_setting, - ./test_timed_map + ./test_timed_map, + ./test_event_broker, + ./test_request_broker, + ./test_multi_request_broker diff --git a/tests/common/test_event_broker.nim b/tests/common/test_event_broker.nim new file mode 100644 index 000000000..bcd081f4f --- /dev/null +++ b/tests/common/test_event_broker.nim @@ -0,0 +1,201 @@ +import chronos +import std/sequtils +import testutils/unittests + +import waku/common/broker/event_broker + +type ExternalDefinedEventType = object + label*: string + +EventBroker: + type IntEvent = int + +EventBroker: + type ExternalAliasEvent = distinct ExternalDefinedEventType + +EventBroker: + type SampleEvent = object + value*: int + label*: string + +EventBroker: + type BinaryEvent = object + flag*: bool + +EventBroker: + type RefEvent = ref object + payload*: seq[int] + +template waitForListeners() = + waitFor sleepAsync(1.milliseconds) + +suite "EventBroker": + test "delivers events to all listeners": + var seen: seq[(int, string)] = @[] + + discard SampleEvent.listen( + proc(evt: SampleEvent): Future[void] {.async: (raises: []).} = + seen.add((evt.value, evt.label)) + ) + + discard SampleEvent.listen( + proc(evt: SampleEvent): Future[void] {.async: (raises: []).} = + seen.add((evt.value * 2, evt.label & "!")) + ) + + let evt = SampleEvent(value: 5, label: "hi") + SampleEvent.emit(evt) + waitForListeners() + + check seen.len == 2 + check seen.anyIt(it == (5, "hi")) + check seen.anyIt(it == (10, "hi!")) + + SampleEvent.dropAllListeners() + + test "forget removes a single listener": + var counter = 0 + + let handleA = SampleEvent.listen( + proc(evt: SampleEvent): Future[void] {.async: (raises: []).} = + inc counter + ) + + let handleB = SampleEvent.listen( + proc(evt: SampleEvent): Future[void] {.async: (raises: []).} = + inc(counter, 2) + ) + + SampleEvent.dropListener(handleA.get()) + let eventVal = SampleEvent(value: 1, label: "one") + SampleEvent.emit(eventVal) + waitForListeners() + check counter == 2 + + SampleEvent.dropAllListeners() + + test "forgetAll clears every listener": + var triggered = false + + let handle1 = SampleEvent.listen( + proc(evt: SampleEvent): Future[void] {.async: (raises: []).} = + triggered = true + ) + let handle2 = SampleEvent.listen( + proc(evt: SampleEvent): Future[void] {.async: (raises: []).} = + discard + ) + + SampleEvent.dropAllListeners() + SampleEvent.emit(42, "noop") + SampleEvent.emit(label = "noop", value = 42) + waitForListeners() + check not triggered + + let freshHandle = SampleEvent.listen( + proc(evt: SampleEvent): Future[void] {.async: (raises: []).} = + discard + ) + check freshHandle.get().id > 0'u64 + SampleEvent.dropListener(freshHandle.get()) + + test "broker helpers operate via typedesc": + var toggles: seq[bool] = @[] + + let handle = BinaryEvent.listen( + proc(evt: BinaryEvent): Future[void] {.async: (raises: []).} = + toggles.add(evt.flag) + ) + + BinaryEvent(flag: true).emit() + waitForListeners() + let binaryEvent = BinaryEvent(flag: false) + BinaryEvent.emit(binaryEvent) + waitForListeners() + + check toggles == @[true, false] + BinaryEvent.dropAllListeners() + + test "ref typed event": + var counter: int = 0 + + let handle = RefEvent.listen( + proc(evt: RefEvent): Future[void] {.async: (raises: []).} = + for n in evt.payload: + counter += n + ) + + RefEvent(payload: @[1, 2, 3]).emit() + waitForListeners() + RefEvent.emit(payload = @[4, 5, 6]) + waitForListeners() + + check counter == 21 # 1+2+3 + 4+5+6 + RefEvent.dropAllListeners() + + test "supports BrokerContext-scoped listeners": + SampleEvent.dropAllListeners() + + let ctxA = NewBrokerContext() + let ctxB = NewBrokerContext() + + var seenA: seq[int] = @[] + var seenB: seq[int] = @[] + + discard SampleEvent.listen( + ctxA, + proc(evt: SampleEvent): Future[void] {.async: (raises: []).} = + seenA.add(evt.value), + ) + + discard SampleEvent.listen( + ctxB, + proc(evt: SampleEvent): Future[void] {.async: (raises: []).} = + seenB.add(evt.value), + ) + + SampleEvent.emit(ctxA, SampleEvent(value: 1, label: "a")) + SampleEvent.emit(ctxB, SampleEvent(value: 2, label: "b")) + waitForListeners() + + check seenA == @[1] + check seenB == @[2] + + SampleEvent.dropAllListeners(ctxA) + SampleEvent.emit(ctxA, SampleEvent(value: 3, label: "a2")) + SampleEvent.emit(ctxB, SampleEvent(value: 4, label: "b2")) + waitForListeners() + + check seenA == @[1] + check seenB == @[2, 4] + + SampleEvent.dropAllListeners(ctxB) + + test "supports non-object event types (auto-distinct)": + var seen: seq[int] = @[] + + discard IntEvent.listen( + proc(evt: IntEvent): Future[void] {.async: (raises: []).} = + seen.add(int(evt)) + ) + + IntEvent.emit(IntEvent(42)) + waitForListeners() + + check seen == @[42] + IntEvent.dropAllListeners() + + test "supports externally-defined type aliases (auto-distinct)": + var seen: seq[string] = @[] + + discard ExternalAliasEvent.listen( + proc(evt: ExternalAliasEvent): Future[void] {.async: (raises: []).} = + let base = ExternalDefinedEventType(evt) + seen.add(base.label) + ) + + ExternalAliasEvent.emit(ExternalAliasEvent(ExternalDefinedEventType(label: "x"))) + waitForListeners() + + check seen == @["x"] + ExternalAliasEvent.dropAllListeners() diff --git a/tests/common/test_multi_request_broker.nim b/tests/common/test_multi_request_broker.nim new file mode 100644 index 000000000..39ed90eea --- /dev/null +++ b/tests/common/test_multi_request_broker.nim @@ -0,0 +1,343 @@ +{.used.} + +import testutils/unittests +import chronos +import std/sequtils +import std/strutils + +import waku/common/broker/multi_request_broker + +MultiRequestBroker: + type NoArgResponse = object + label*: string + + proc signatureFetch*(): Future[Result[NoArgResponse, string]] {.async.} + +MultiRequestBroker: + type ArgResponse = object + id*: string + + proc signatureFetch*( + suffix: string, numsuffix: int + ): Future[Result[ArgResponse, string]] {.async.} + +MultiRequestBroker: + type DualResponse = ref object + note*: string + suffix*: string + + proc signatureBase*(): Future[Result[DualResponse, string]] {.async.} + proc signatureWithInput*( + suffix: string + ): Future[Result[DualResponse, string]] {.async.} + +type ExternalBaseType = string + +MultiRequestBroker: + type NativeIntResponse = int + + proc signatureFetch*(): Future[Result[NativeIntResponse, string]] {.async.} + +MultiRequestBroker: + type ExternalAliasResponse = ExternalBaseType + + proc signatureFetch*(): Future[Result[ExternalAliasResponse, string]] {.async.} + +MultiRequestBroker: + type AlreadyDistinctResponse = distinct int + + proc signatureFetch*(): Future[Result[AlreadyDistinctResponse, string]] {.async.} + +suite "MultiRequestBroker": + test "aggregates zero-argument providers": + discard NoArgResponse.setProvider( + proc(): Future[Result[NoArgResponse, string]] {.async.} = + ok(NoArgResponse(label: "one")) + ) + + discard NoArgResponse.setProvider( + proc(): Future[Result[NoArgResponse, string]] {.async.} = + discard catch: + await sleepAsync(1.milliseconds) + ok(NoArgResponse(label: "two")) + ) + + let responses = waitFor NoArgResponse.request() + check responses.get().len == 2 + check responses.get().anyIt(it.label == "one") + check responses.get().anyIt(it.label == "two") + + NoArgResponse.clearProviders() + + test "aggregates argument providers": + discard ArgResponse.setProvider( + proc(suffix: string, num: int): Future[Result[ArgResponse, string]] {.async.} = + ok(ArgResponse(id: suffix & "-a-" & $num)) + ) + + discard ArgResponse.setProvider( + proc(suffix: string, num: int): Future[Result[ArgResponse, string]] {.async.} = + ok(ArgResponse(id: suffix & "-b-" & $num)) + ) + + let keyed = waitFor ArgResponse.request("topic", 1) + check keyed.get().len == 2 + check keyed.get().anyIt(it.id == "topic-a-1") + check keyed.get().anyIt(it.id == "topic-b-1") + + ArgResponse.clearProviders() + + test "clearProviders resets both provider lists": + discard DualResponse.setProvider( + proc(): Future[Result[DualResponse, string]] {.async.} = + ok(DualResponse(note: "base", suffix: "")) + ) + + discard DualResponse.setProvider( + proc(suffix: string): Future[Result[DualResponse, string]] {.async.} = + ok(DualResponse(note: "base" & suffix, suffix: suffix)) + ) + + let noArgs = waitFor DualResponse.request() + check noArgs.get().len == 1 + + let param = waitFor DualResponse.request("-extra") + check param.get().len == 1 + check param.get()[0].suffix == "-extra" + + DualResponse.clearProviders() + + let emptyNoArgs = waitFor DualResponse.request() + check emptyNoArgs.get().len == 0 + + let emptyWithArgs = waitFor DualResponse.request("-extra") + check emptyWithArgs.get().len == 0 + + test "request returns empty seq when no providers registered": + let empty = waitFor NoArgResponse.request() + check empty.get().len == 0 + + test "failed providers will fail the request": + NoArgResponse.clearProviders() + discard NoArgResponse.setProvider( + proc(): Future[Result[NoArgResponse, string]] {.async.} = + err("boom") + ) + + discard NoArgResponse.setProvider( + proc(): Future[Result[NoArgResponse, string]] {.async.} = + ok(NoArgResponse(label: "survivor")) + ) + + let filtered = waitFor NoArgResponse.request() + check filtered.isErr() + + NoArgResponse.clearProviders() + + test "deduplicates identical zero-argument providers": + NoArgResponse.clearProviders() + var invocations = 0 + let sharedHandler = proc(): Future[Result[NoArgResponse, string]] {.async.} = + inc invocations + ok(NoArgResponse(label: "dup")) + + let first = NoArgResponse.setProvider(sharedHandler) + let second = NoArgResponse.setProvider(sharedHandler) + + check first.get().id == second.get().id + check first.get().kind == second.get().kind + + let dupResponses = waitFor NoArgResponse.request() + check dupResponses.get().len == 1 + check invocations == 1 + + NoArgResponse.clearProviders() + + test "removeProvider deletes registered handlers": + var removedCalled = false + var keptCalled = false + + let removable = NoArgResponse.setProvider( + proc(): Future[Result[NoArgResponse, string]] {.async.} = + removedCalled = true + ok(NoArgResponse(label: "removed")) + ) + + discard NoArgResponse.setProvider( + proc(): Future[Result[NoArgResponse, string]] {.async.} = + keptCalled = true + ok(NoArgResponse(label: "kept")) + ) + + NoArgResponse.removeProvider(removable.get()) + + let afterRemoval = (waitFor NoArgResponse.request()).valueOr: + assert false, "request failed" + @[] + check afterRemoval.len == 1 + check afterRemoval[0].label == "kept" + check not removedCalled + check keptCalled + + NoArgResponse.clearProviders() + + test "removeProvider works for argument signatures": + var invoked: seq[string] = @[] + + discard ArgResponse.setProvider( + proc(suffix: string, num: int): Future[Result[ArgResponse, string]] {.async.} = + invoked.add("first" & suffix) + ok(ArgResponse(id: suffix & "-one-" & $num)) + ) + + let handle = ArgResponse.setProvider( + proc(suffix: string, num: int): Future[Result[ArgResponse, string]] {.async.} = + invoked.add("second" & suffix) + ok(ArgResponse(id: suffix & "-two-" & $num)) + ) + + ArgResponse.removeProvider(handle.get()) + + let single = (waitFor ArgResponse.request("topic", 1)).valueOr: + assert false, "request failed" + @[] + check single.len == 1 + check single[0].id == "topic-one-1" + check invoked == @["firsttopic"] + + ArgResponse.clearProviders() + + test "catches exception from providers and report error": + let firstHandler = NoArgResponse.setProvider( + proc(): Future[Result[NoArgResponse, string]] {.async.} = + raise newException(ValueError, "first handler raised") + ) + + discard NoArgResponse.setProvider( + proc(): Future[Result[NoArgResponse, string]] {.async.} = + ok(NoArgResponse(label: "just ok")) + ) + + let afterException = waitFor NoArgResponse.request() + check afterException.isErr() + check afterException.error().contains("first handler raised") + + NoArgResponse.clearProviders() + + test "ref providers returning nil fail request": + DualResponse.clearProviders() + + test "supports native request types": + NativeIntResponse.clearProviders() + + discard NativeIntResponse.setProvider( + proc(): Future[Result[NativeIntResponse, string]] {.async.} = + ok(NativeIntResponse(1)) + ) + + discard NativeIntResponse.setProvider( + proc(): Future[Result[NativeIntResponse, string]] {.async.} = + ok(NativeIntResponse(2)) + ) + + let res = waitFor NativeIntResponse.request() + check res.isOk() + check res.get().len == 2 + check res.get().anyIt(int(it) == 1) + check res.get().anyIt(int(it) == 2) + + NativeIntResponse.clearProviders() + + test "supports external request types": + ExternalAliasResponse.clearProviders() + + discard ExternalAliasResponse.setProvider( + proc(): Future[Result[ExternalAliasResponse, string]] {.async.} = + ok(ExternalAliasResponse("hello")) + ) + + let res = waitFor ExternalAliasResponse.request() + check res.isOk() + check res.get().len == 1 + check ExternalBaseType(res.get()[0]) == "hello" + + ExternalAliasResponse.clearProviders() + + test "supports already-distinct request types": + AlreadyDistinctResponse.clearProviders() + + discard AlreadyDistinctResponse.setProvider( + proc(): Future[Result[AlreadyDistinctResponse, string]] {.async.} = + ok(AlreadyDistinctResponse(7)) + ) + + let res = waitFor AlreadyDistinctResponse.request() + check res.isOk() + check res.get().len == 1 + check int(res.get()[0]) == 7 + + AlreadyDistinctResponse.clearProviders() + + test "context-aware providers are isolated": + NoArgResponse.clearProviders() + let ctxA = NewBrokerContext() + let ctxB = NewBrokerContext() + + discard NoArgResponse.setProvider( + ctxA, + proc(): Future[Result[NoArgResponse, string]] {.async.} = + ok(NoArgResponse(label: "a")), + ) + discard NoArgResponse.setProvider( + ctxB, + proc(): Future[Result[NoArgResponse, string]] {.async.} = + ok(NoArgResponse(label: "b")), + ) + + let resA = waitFor NoArgResponse.request(ctxA) + check resA.isOk() + check resA.get().len == 1 + check resA.get()[0].label == "a" + + let resB = waitFor NoArgResponse.request(ctxB) + check resB.isOk() + check resB.get().len == 1 + check resB.get()[0].label == "b" + + let resDefault = waitFor NoArgResponse.request() + check resDefault.isOk() + check resDefault.get().len == 0 + + NoArgResponse.clearProviders(ctxA) + let clearedA = waitFor NoArgResponse.request(ctxA) + check clearedA.isOk() + check clearedA.get().len == 0 + + let stillB = waitFor NoArgResponse.request(ctxB) + check stillB.isOk() + check stillB.get().len == 1 + check stillB.get()[0].label == "b" + + NoArgResponse.clearProviders(ctxB) + + discard DualResponse.setProvider( + proc(): Future[Result[DualResponse, string]] {.async.} = + let nilResponse: DualResponse = nil + ok(nilResponse) + ) + + let zeroArg = waitFor DualResponse.request() + check zeroArg.isErr() + + DualResponse.clearProviders() + + discard DualResponse.setProvider( + proc(suffix: string): Future[Result[DualResponse, string]] {.async.} = + let nilResponse: DualResponse = nil + ok(nilResponse) + ) + + let withInput = waitFor DualResponse.request("-extra") + check withInput.isErr() + + DualResponse.clearProviders() diff --git a/tests/common/test_request_broker.nim b/tests/common/test_request_broker.nim new file mode 100644 index 000000000..87065a916 --- /dev/null +++ b/tests/common/test_request_broker.nim @@ -0,0 +1,665 @@ +{.used.} + +import testutils/unittests +import chronos +import std/strutils + +import waku/common/broker/request_broker + +## --------------------------------------------------------------------------- +## Async-mode brokers + tests +## --------------------------------------------------------------------------- + +RequestBroker: + type SimpleResponse = object + value*: string + + proc signatureFetch*(): Future[Result[SimpleResponse, string]] {.async.} + +RequestBroker: + type KeyedResponse = object + key*: string + payload*: string + + proc signatureFetchWithKey*( + key: string, subKey: int + ): Future[Result[KeyedResponse, string]] {.async.} + +RequestBroker: + type DualResponse = object + note*: string + count*: int + + proc signatureNoInput*(): Future[Result[DualResponse, string]] {.async.} + proc signatureWithInput*( + suffix: string + ): Future[Result[DualResponse, string]] {.async.} + +RequestBroker(async): + type ImplicitResponse = ref object + note*: string + +static: + doAssert typeof(SimpleResponse.request()) is Future[Result[SimpleResponse, string]] + +suite "RequestBroker macro (async mode)": + test "serves zero-argument providers": + check SimpleResponse + .setProvider( + proc(): Future[Result[SimpleResponse, string]] {.async.} = + ok(SimpleResponse(value: "hi")) + ) + .isOk() + + let res = waitFor SimpleResponse.request() + check res.isOk() + check res.value.value == "hi" + + SimpleResponse.clearProvider() + + test "zero-argument request errors when unset": + let res = waitFor SimpleResponse.request() + check res.isErr() + check res.error.contains("no zero-arg provider") + + test "serves input-based providers": + var seen: seq[string] = @[] + check KeyedResponse + .setProvider( + proc(key: string, subKey: int): Future[Result[KeyedResponse, string]] {.async.} = + seen.add(key) + ok(KeyedResponse(key: key, payload: key & "-payload+" & $subKey)) + ) + .isOk() + + let res = waitFor KeyedResponse.request("topic", 1) + check res.isOk() + check res.value.key == "topic" + check res.value.payload == "topic-payload+1" + check seen == @["topic"] + + KeyedResponse.clearProvider() + + test "catches provider exception": + check KeyedResponse + .setProvider( + proc(key: string, subKey: int): Future[Result[KeyedResponse, string]] {.async.} = + raise newException(ValueError, "simulated failure") + ) + .isOk() + + let res = waitFor KeyedResponse.request("neglected", 11) + check res.isErr() + check res.error.contains("simulated failure") + + KeyedResponse.clearProvider() + + test "input request errors when unset": + let res = waitFor KeyedResponse.request("foo", 2) + check res.isErr() + check res.error.contains("input signature") + + test "supports both provider types simultaneously": + check DualResponse + .setProvider( + proc(): Future[Result[DualResponse, string]] {.async.} = + ok(DualResponse(note: "base", count: 1)) + ) + .isOk() + + check DualResponse + .setProvider( + proc(suffix: string): Future[Result[DualResponse, string]] {.async.} = + ok(DualResponse(note: "base" & suffix, count: suffix.len)) + ) + .isOk() + + let noInput = waitFor DualResponse.request() + check noInput.isOk() + check noInput.value.note == "base" + + let withInput = waitFor DualResponse.request("-extra") + check withInput.isOk() + check withInput.value.note == "base-extra" + check withInput.value.count == 6 + + DualResponse.clearProvider() + + test "clearProvider resets both entries": + check DualResponse + .setProvider( + proc(): Future[Result[DualResponse, string]] {.async.} = + ok(DualResponse(note: "temp", count: 0)) + ) + .isOk() + DualResponse.clearProvider() + + let res = waitFor DualResponse.request() + check res.isErr() + + test "implicit zero-argument provider works by default": + check ImplicitResponse + .setProvider( + proc(): Future[Result[ImplicitResponse, string]] {.async.} = + ok(ImplicitResponse(note: "auto")) + ) + .isOk() + + let res = waitFor ImplicitResponse.request() + check res.isOk() + + ImplicitResponse.clearProvider() + check res.value.note == "auto" + + test "implicit zero-argument request errors when unset": + let res = waitFor ImplicitResponse.request() + check res.isErr() + check res.error.contains("no zero-arg provider") + + test "no provider override": + check DualResponse + .setProvider( + proc(): Future[Result[DualResponse, string]] {.async.} = + ok(DualResponse(note: "base", count: 1)) + ) + .isOk() + + check DualResponse + .setProvider( + proc(suffix: string): Future[Result[DualResponse, string]] {.async.} = + ok(DualResponse(note: "base" & suffix, count: suffix.len)) + ) + .isOk() + + let overrideProc = proc(): Future[Result[DualResponse, string]] {.async.} = + ok(DualResponse(note: "something else", count: 1)) + + check DualResponse.setProvider(overrideProc).isErr() + + let noInput = waitFor DualResponse.request() + check noInput.isOk() + check noInput.value.note == "base" + + let stillResponse = waitFor DualResponse.request(" still works") + check stillResponse.isOk() + check stillResponse.value.note.contains("base still works") + + DualResponse.clearProvider() + + let noResponse = waitFor DualResponse.request() + check noResponse.isErr() + check noResponse.error.contains("no zero-arg provider") + + let noResponseArg = waitFor DualResponse.request("Should not work") + check noResponseArg.isErr() + check noResponseArg.error.contains("no provider") + + check DualResponse.setProvider(overrideProc).isOk() + + let nowSuccWithOverride = waitFor DualResponse.request() + check nowSuccWithOverride.isOk() + check nowSuccWithOverride.value.note == "something else" + check nowSuccWithOverride.value.count == 1 + + DualResponse.clearProvider() + + test "supports keyed providers (async, zero-arg)": + SimpleResponse.clearProvider() + + check SimpleResponse + .setProvider( + proc(): Future[Result[SimpleResponse, string]] {.async.} = + ok(SimpleResponse(value: "default")) + ) + .isOk() + + check SimpleResponse + .setProvider( + BrokerContext(0x11111111'u32), + proc(): Future[Result[SimpleResponse, string]] {.async.} = + ok(SimpleResponse(value: "one")), + ) + .isOk() + + check SimpleResponse + .setProvider( + BrokerContext(0x22222222'u32), + proc(): Future[Result[SimpleResponse, string]] {.async.} = + ok(SimpleResponse(value: "two")), + ) + .isOk() + + let defaultRes = waitFor SimpleResponse.request() + check defaultRes.isOk() + check defaultRes.value.value == "default" + + let res1 = waitFor SimpleResponse.request(BrokerContext(0x11111111'u32)) + check res1.isOk() + check res1.value.value == "one" + + let res2 = waitFor SimpleResponse.request(BrokerContext(0x22222222'u32)) + check res2.isOk() + check res2.value.value == "two" + + let missing = waitFor SimpleResponse.request(BrokerContext(0x33333333'u32)) + check missing.isErr() + check missing.error.contains("no provider registered for broker context") + + check SimpleResponse + .setProvider( + BrokerContext(0x11111111'u32), + proc(): Future[Result[SimpleResponse, string]] {.async.} = + ok(SimpleResponse(value: "dup")), + ) + .isErr() + + SimpleResponse.clearProvider() + + test "supports keyed providers (async, with args)": + KeyedResponse.clearProvider() + + check KeyedResponse + .setProvider( + proc(key: string, subKey: int): Future[Result[KeyedResponse, string]] {.async.} = + ok(KeyedResponse(key: "default-" & key, payload: $subKey)) + ) + .isOk() + + check KeyedResponse + .setProvider( + BrokerContext(0xABCDEF01'u32), + proc(key: string, subKey: int): Future[Result[KeyedResponse, string]] {.async.} = + ok(KeyedResponse(key: "k1-" & key, payload: "p" & $subKey)), + ) + .isOk() + + check KeyedResponse + .setProvider( + BrokerContext(0xABCDEF02'u32), + proc(key: string, subKey: int): Future[Result[KeyedResponse, string]] {.async.} = + ok(KeyedResponse(key: "k2-" & key, payload: "q" & $subKey)), + ) + .isOk() + + let d = waitFor KeyedResponse.request("topic", 7) + check d.isOk() + check d.value.key == "default-topic" + + let k1 = waitFor KeyedResponse.request(BrokerContext(0xABCDEF01'u32), "topic", 7) + check k1.isOk() + check k1.value.key == "k1-topic" + check k1.value.payload == "p7" + + let k2 = waitFor KeyedResponse.request(BrokerContext(0xABCDEF02'u32), "topic", 7) + check k2.isOk() + check k2.value.key == "k2-topic" + check k2.value.payload == "q7" + + let miss = waitFor KeyedResponse.request(BrokerContext(0xDEADBEEF'u32), "topic", 7) + check miss.isErr() + check miss.error.contains("no provider registered for broker context") + + KeyedResponse.clearProvider() + +## --------------------------------------------------------------------------- +## Sync-mode brokers + tests +## --------------------------------------------------------------------------- + +RequestBroker(sync): + type SimpleResponseSync = object + value*: string + + proc signatureFetch*(): Result[SimpleResponseSync, string] + +RequestBroker(sync): + type KeyedResponseSync = object + key*: string + payload*: string + + proc signatureFetchWithKey*( + key: string, subKey: int + ): Result[KeyedResponseSync, string] + +RequestBroker(sync): + type DualResponseSync = object + note*: string + count*: int + + proc signatureNoInput*(): Result[DualResponseSync, string] + proc signatureWithInput*(suffix: string): Result[DualResponseSync, string] + +RequestBroker(sync): + type ImplicitResponseSync = ref object + note*: string + +static: + doAssert typeof(SimpleResponseSync.request()) is Result[SimpleResponseSync, string] + doAssert not ( + typeof(SimpleResponseSync.request()) is Future[Result[SimpleResponseSync, string]] + ) + doAssert typeof(KeyedResponseSync.request("topic", 1)) is + Result[KeyedResponseSync, string] + +suite "RequestBroker macro (sync mode)": + test "serves zero-argument providers (sync)": + check SimpleResponseSync + .setProvider( + proc(): Result[SimpleResponseSync, string] = + ok(SimpleResponseSync(value: "hi")) + ) + .isOk() + + let res = SimpleResponseSync.request() + check res.isOk() + check res.value.value == "hi" + + SimpleResponseSync.clearProvider() + + test "zero-argument request errors when unset (sync)": + let res = SimpleResponseSync.request() + check res.isErr() + check res.error.contains("no zero-arg provider") + + test "serves input-based providers (sync)": + var seen: seq[string] = @[] + check KeyedResponseSync + .setProvider( + proc(key: string, subKey: int): Result[KeyedResponseSync, string] = + seen.add(key) + ok(KeyedResponseSync(key: key, payload: key & "-payload+" & $subKey)) + ) + .isOk() + + let res = KeyedResponseSync.request("topic", 1) + check res.isOk() + check res.value.key == "topic" + check res.value.payload == "topic-payload+1" + check seen == @["topic"] + + KeyedResponseSync.clearProvider() + + test "catches provider exception (sync)": + check KeyedResponseSync + .setProvider( + proc(key: string, subKey: int): Result[KeyedResponseSync, string] = + raise newException(ValueError, "simulated failure") + ) + .isOk() + + let res = KeyedResponseSync.request("neglected", 11) + check res.isErr() + check res.error.contains("simulated failure") + + KeyedResponseSync.clearProvider() + + test "input request errors when unset (sync)": + let res = KeyedResponseSync.request("foo", 2) + check res.isErr() + check res.error.contains("input signature") + + test "supports both provider types simultaneously (sync)": + check DualResponseSync + .setProvider( + proc(): Result[DualResponseSync, string] = + ok(DualResponseSync(note: "base", count: 1)) + ) + .isOk() + + check DualResponseSync + .setProvider( + proc(suffix: string): Result[DualResponseSync, string] = + ok(DualResponseSync(note: "base" & suffix, count: suffix.len)) + ) + .isOk() + + let noInput = DualResponseSync.request() + check noInput.isOk() + check noInput.value.note == "base" + + let withInput = DualResponseSync.request("-extra") + check withInput.isOk() + check withInput.value.note == "base-extra" + check withInput.value.count == 6 + + DualResponseSync.clearProvider() + + test "clearProvider resets both entries (sync)": + check DualResponseSync + .setProvider( + proc(): Result[DualResponseSync, string] = + ok(DualResponseSync(note: "temp", count: 0)) + ) + .isOk() + DualResponseSync.clearProvider() + + let res = DualResponseSync.request() + check res.isErr() + + test "implicit zero-argument provider works by default (sync)": + check ImplicitResponseSync + .setProvider( + proc(): Result[ImplicitResponseSync, string] = + ok(ImplicitResponseSync(note: "auto")) + ) + .isOk() + + let res = ImplicitResponseSync.request() + check res.isOk() + + ImplicitResponseSync.clearProvider() + check res.value.note == "auto" + + test "implicit zero-argument request errors when unset (sync)": + let res = ImplicitResponseSync.request() + check res.isErr() + check res.error.contains("no zero-arg provider") + + test "implicit zero-argument provider raises error (sync)": + check ImplicitResponseSync + .setProvider( + proc(): Result[ImplicitResponseSync, string] = + raise newException(ValueError, "simulated failure") + ) + .isOk() + + let res = ImplicitResponseSync.request() + check res.isErr() + check res.error.contains("simulated failure") + + ImplicitResponseSync.clearProvider() + + test "supports keyed providers (sync, zero-arg)": + SimpleResponseSync.clearProvider() + + check SimpleResponseSync + .setProvider( + proc(): Result[SimpleResponseSync, string] = + ok(SimpleResponseSync(value: "default")) + ) + .isOk() + + check SimpleResponseSync + .setProvider( + BrokerContext(0x10101010'u32), + proc(): Result[SimpleResponseSync, string] = + ok(SimpleResponseSync(value: "ten")), + ) + .isOk() + + let defaultRes = SimpleResponseSync.request() + check defaultRes.isOk() + check defaultRes.value.value == "default" + + let keyedRes = SimpleResponseSync.request(BrokerContext(0x10101010'u32)) + check keyedRes.isOk() + check keyedRes.value.value == "ten" + + let miss = SimpleResponseSync.request(BrokerContext(0x20202020'u32)) + check miss.isErr() + check miss.error.contains("no provider registered for broker context") + + SimpleResponseSync.clearProvider() + + test "supports keyed providers (sync, with args)": + KeyedResponseSync.clearProvider() + + check KeyedResponseSync + .setProvider( + proc(key: string, subKey: int): Result[KeyedResponseSync, string] = + ok(KeyedResponseSync(key: "default-" & key, payload: $subKey)) + ) + .isOk() + + check KeyedResponseSync + .setProvider( + BrokerContext(0xA0A0A0A0'u32), + proc(key: string, subKey: int): Result[KeyedResponseSync, string] = + ok(KeyedResponseSync(key: "k-" & key, payload: "p" & $subKey)), + ) + .isOk() + + let d = KeyedResponseSync.request("topic", 2) + check d.isOk() + check d.value.key == "default-topic" + + let keyed = KeyedResponseSync.request(BrokerContext(0xA0A0A0A0'u32), "topic", 2) + check keyed.isOk() + check keyed.value.key == "k-topic" + check keyed.value.payload == "p2" + + let miss = KeyedResponseSync.request(BrokerContext(0xB0B0B0B0'u32), "topic", 2) + check miss.isErr() + check miss.error.contains("no provider registered for broker context") + + KeyedResponseSync.clearProvider() + +## --------------------------------------------------------------------------- +## POD / external type brokers + tests (distinct/alias behavior) +## --------------------------------------------------------------------------- + +type ExternalDefinedTypeAsync = object + label*: string + +type ExternalDefinedTypeSync = object + label*: string + +type ExternalDefinedTypeShared = object + label*: string + +RequestBroker: + type PodResponse = int + + proc signatureFetch*(): Future[Result[PodResponse, string]] {.async.} + +RequestBroker: + type ExternalAliasedResponse = ExternalDefinedTypeAsync + + proc signatureFetch*(): Future[Result[ExternalAliasedResponse, string]] {.async.} + +RequestBroker(sync): + type ExternalAliasedResponseSync = ExternalDefinedTypeSync + + proc signatureFetch*(): Result[ExternalAliasedResponseSync, string] + +RequestBroker(sync): + type DistinctStringResponseA = distinct string + +RequestBroker(sync): + type DistinctStringResponseB = distinct string + +RequestBroker(sync): + type ExternalDistinctResponseA = distinct ExternalDefinedTypeShared + +RequestBroker(sync): + type ExternalDistinctResponseB = distinct ExternalDefinedTypeShared + +suite "RequestBroker macro (POD/external types)": + test "supports non-object response types (async)": + check PodResponse + .setProvider( + proc(): Future[Result[PodResponse, string]] {.async.} = + ok(PodResponse(123)) + ) + .isOk() + + let res = waitFor PodResponse.request() + check res.isOk() + check int(res.value) == 123 + + PodResponse.clearProvider() + + test "supports aliased external types (async)": + check ExternalAliasedResponse + .setProvider( + proc(): Future[Result[ExternalAliasedResponse, string]] {.async.} = + ok(ExternalAliasedResponse(ExternalDefinedTypeAsync(label: "ext"))) + ) + .isOk() + + let res = waitFor ExternalAliasedResponse.request() + check res.isOk() + check ExternalDefinedTypeAsync(res.value).label == "ext" + + ExternalAliasedResponse.clearProvider() + + test "supports aliased external types (sync)": + check ExternalAliasedResponseSync + .setProvider( + proc(): Result[ExternalAliasedResponseSync, string] = + ok(ExternalAliasedResponseSync(ExternalDefinedTypeSync(label: "ext"))) + ) + .isOk() + + let res = ExternalAliasedResponseSync.request() + check res.isOk() + check ExternalDefinedTypeSync(res.value).label == "ext" + + ExternalAliasedResponseSync.clearProvider() + + test "distinct response types avoid overload ambiguity (sync)": + check DistinctStringResponseA + .setProvider( + proc(): Result[DistinctStringResponseA, string] = + ok(DistinctStringResponseA("a")) + ) + .isOk() + + check DistinctStringResponseB + .setProvider( + proc(): Result[DistinctStringResponseB, string] = + ok(DistinctStringResponseB("b")) + ) + .isOk() + + check ExternalDistinctResponseA + .setProvider( + proc(): Result[ExternalDistinctResponseA, string] = + ok(ExternalDistinctResponseA(ExternalDefinedTypeShared(label: "ea"))) + ) + .isOk() + + check ExternalDistinctResponseB + .setProvider( + proc(): Result[ExternalDistinctResponseB, string] = + ok(ExternalDistinctResponseB(ExternalDefinedTypeShared(label: "eb"))) + ) + .isOk() + + let resA = DistinctStringResponseA.request() + let resB = DistinctStringResponseB.request() + check resA.isOk() + check resB.isOk() + check string(resA.value) == "a" + check string(resB.value) == "b" + + let resEA = ExternalDistinctResponseA.request() + let resEB = ExternalDistinctResponseB.request() + check resEA.isOk() + check resEB.isOk() + check ExternalDefinedTypeShared(resEA.value).label == "ea" + check ExternalDefinedTypeShared(resEB.value).label == "eb" + + DistinctStringResponseA.clearProvider() + DistinctStringResponseB.clearProvider() + ExternalDistinctResponseA.clearProvider() + ExternalDistinctResponseB.clearProvider() diff --git a/tests/common/test_tokenbucket.nim b/tests/common/test_tokenbucket.nim deleted file mode 100644 index 5bc1a0583..000000000 --- a/tests/common/test_tokenbucket.nim +++ /dev/null @@ -1,69 +0,0 @@ -# Chronos Test Suite -# (c) Copyright 2022-Present -# Status Research & Development GmbH -# -# Licensed under either of -# Apache License, version 2.0, (LICENSE-APACHEv2) -# MIT license (LICENSE-MIT) - -{.used.} - -import testutils/unittests -import chronos -import ../../waku/common/rate_limit/token_bucket - -suite "Token Bucket": - test "TokenBucket Sync test - strict": - var bucket = TokenBucket.newStrict(1000, 1.milliseconds) - let - start = Moment.now() - fullTime = start + 1.milliseconds - check: - bucket.tryConsume(800, start) == true - bucket.tryConsume(200, start) == true - # Out of budget - bucket.tryConsume(100, start) == false - bucket.tryConsume(800, fullTime) == true - bucket.tryConsume(200, fullTime) == true - # Out of budget - bucket.tryConsume(100, fullTime) == false - - test "TokenBucket Sync test - compensating": - var bucket = TokenBucket.new(1000, 1.milliseconds) - let - start = Moment.now() - fullTime = start + 1.milliseconds - check: - bucket.tryConsume(800, start) == true - bucket.tryConsume(200, start) == true - # Out of budget - bucket.tryConsume(100, start) == false - bucket.tryConsume(800, fullTime) == true - bucket.tryConsume(200, fullTime) == true - # Due not using the bucket for a full period the compensation will satisfy this request - bucket.tryConsume(100, fullTime) == true - - test "TokenBucket Max compensation": - var bucket = TokenBucket.new(1000, 1.minutes) - var reqTime = Moment.now() - - check bucket.tryConsume(1000, reqTime) - check bucket.tryConsume(1, reqTime) == false - reqTime += 1.minutes - check bucket.tryConsume(500, reqTime) == true - reqTime += 1.minutes - check bucket.tryConsume(1000, reqTime) == true - reqTime += 10.seconds - # max compensation is 25% so try to consume 250 more - check bucket.tryConsume(250, reqTime) == true - reqTime += 49.seconds - # out of budget within the same period - check bucket.tryConsume(1, reqTime) == false - - test "TokenBucket Short replenish": - var bucket = TokenBucket.new(15000, 1.milliseconds) - let start = Moment.now() - check bucket.tryConsume(15000, start) - check bucket.tryConsume(1, start) == false - - check bucket.tryConsume(15000, start + 1.milliseconds) == true diff --git a/tests/node/test_all.nim b/tests/node/test_all.nim index f6e7507b7..fe785dee2 100644 --- a/tests/node/test_all.nim +++ b/tests/node/test_all.nim @@ -7,4 +7,5 @@ import ./test_wakunode_peer_exchange, ./test_wakunode_store, ./test_wakunode_legacy_store, - ./test_wakunode_peer_manager + ./test_wakunode_peer_manager, + ./test_wakunode_health_monitor diff --git a/tests/node/test_wakunode_health_monitor.nim b/tests/node/test_wakunode_health_monitor.nim new file mode 100644 index 000000000..8be9c444d --- /dev/null +++ b/tests/node/test_wakunode_health_monitor.nim @@ -0,0 +1,301 @@ +{.used.} + +import + std/[json, options, sequtils, strutils, tables], testutils/unittests, chronos, results + +import + waku/[ + waku_core, + common/waku_protocol, + node/waku_node, + node/peer_manager, + node/health_monitor/health_status, + node/health_monitor/connection_status, + node/health_monitor/protocol_health, + node/health_monitor/node_health_monitor, + node/kernel_api/relay, + node/kernel_api/store, + node/kernel_api/lightpush, + node/kernel_api/filter, + waku_archive, + ] + +import ../testlib/[wakunode, wakucore], ../waku_archive/archive_utils + +const MockDLow = 4 # Mocked GossipSub DLow value + +const TestConnectivityTimeLimit = 3.seconds + +proc protoHealthMock(kind: WakuProtocol, health: HealthStatus): ProtocolHealth = + var ph = ProtocolHealth.init(kind) + if health == HealthStatus.READY: + return ph.ready() + else: + return ph.notReady("mock") + +suite "Health Monitor - health state calculation": + test "Disconnected, zero peers": + let protocols = + @[ + protoHealthMock(RelayProtocol, HealthStatus.NOT_READY), + protoHealthMock(StoreClientProtocol, HealthStatus.NOT_READY), + protoHealthMock(FilterClientProtocol, HealthStatus.NOT_READY), + protoHealthMock(LightpushClientProtocol, HealthStatus.NOT_READY), + ] + let strength = initTable[WakuProtocol, int]() + let state = calculateConnectionState(protocols, strength, some(MockDLow)) + check state == ConnectionStatus.Disconnected + + test "PartiallyConnected, weak relay": + let weakCount = MockDLow - 1 + let protocols = @[protoHealthMock(RelayProtocol, HealthStatus.READY)] + var strength = initTable[WakuProtocol, int]() + strength[RelayProtocol] = weakCount + let state = calculateConnectionState(protocols, strength, some(MockDLow)) + # Partially connected since relay connectivity is weak (> 0, but < dLow) + check state == ConnectionStatus.PartiallyConnected + + test "Connected, robust relay": + let protocols = @[protoHealthMock(RelayProtocol, HealthStatus.READY)] + var strength = initTable[WakuProtocol, int]() + strength[RelayProtocol] = MockDLow + let state = calculateConnectionState(protocols, strength, some(MockDLow)) + # Fully connected since relay connectivity is ideal (>= dLow) + check state == ConnectionStatus.Connected + + test "Connected, robust edge": + let protocols = + @[ + protoHealthMock(RelayProtocol, HealthStatus.NOT_MOUNTED), + protoHealthMock(LightpushClientProtocol, HealthStatus.READY), + protoHealthMock(FilterClientProtocol, HealthStatus.READY), + protoHealthMock(StoreClientProtocol, HealthStatus.READY), + ] + var strength = initTable[WakuProtocol, int]() + strength[LightpushClientProtocol] = HealthyThreshold + strength[FilterClientProtocol] = HealthyThreshold + strength[StoreClientProtocol] = HealthyThreshold + let state = calculateConnectionState(protocols, strength, some(MockDLow)) + check state == ConnectionStatus.Connected + + test "Disconnected, edge missing store": + let protocols = + @[ + protoHealthMock(LightpushClientProtocol, HealthStatus.READY), + protoHealthMock(FilterClientProtocol, HealthStatus.READY), + protoHealthMock(StoreClientProtocol, HealthStatus.NOT_READY), + ] + var strength = initTable[WakuProtocol, int]() + strength[LightpushClientProtocol] = HealthyThreshold + strength[FilterClientProtocol] = HealthyThreshold + strength[StoreClientProtocol] = 0 + let state = calculateConnectionState(protocols, strength, some(MockDLow)) + check state == ConnectionStatus.Disconnected + + test "PartiallyConnected, edge meets minimum failover requirement": + let weakCount = max(1, HealthyThreshold - 1) + let protocols = + @[ + protoHealthMock(LightpushClientProtocol, HealthStatus.READY), + protoHealthMock(FilterClientProtocol, HealthStatus.READY), + protoHealthMock(StoreClientProtocol, HealthStatus.READY), + ] + var strength = initTable[WakuProtocol, int]() + strength[LightpushClientProtocol] = weakCount + strength[FilterClientProtocol] = weakCount + strength[StoreClientProtocol] = weakCount + let state = calculateConnectionState(protocols, strength, some(MockDLow)) + check state == ConnectionStatus.PartiallyConnected + + test "Connected, robust relay ignores store server": + let protocols = + @[ + protoHealthMock(RelayProtocol, HealthStatus.READY), + protoHealthMock(StoreProtocol, HealthStatus.READY), + ] + var strength = initTable[WakuProtocol, int]() + strength[RelayProtocol] = MockDLow + strength[StoreProtocol] = 0 + let state = calculateConnectionState(protocols, strength, some(MockDLow)) + check state == ConnectionStatus.Connected + + test "Connected, robust relay ignores store client": + let protocols = + @[ + protoHealthMock(RelayProtocol, HealthStatus.READY), + protoHealthMock(StoreProtocol, HealthStatus.READY), + protoHealthMock(StoreClientProtocol, HealthStatus.NOT_READY), + ] + var strength = initTable[WakuProtocol, int]() + strength[RelayProtocol] = MockDLow + strength[StoreProtocol] = 0 + strength[StoreClientProtocol] = 0 + let state = calculateConnectionState(protocols, strength, some(MockDLow)) + check state == ConnectionStatus.Connected + +suite "Health Monitor - events": + asyncTest "Core (relay) health update": + let + nodeAKey = generateSecp256k1Key() + nodeA = newTestWakuNode(nodeAKey, parseIpAddress("127.0.0.1"), Port(0)) + + (await nodeA.mountRelay()).expect("Node A failed to mount Relay") + + await nodeA.start() + + let monitorA = NodeHealthMonitor.new(nodeA) + + var + lastStatus = ConnectionStatus.Disconnected + callbackCount = 0 + healthChangeSignal = newAsyncEvent() + + monitorA.onConnectionStatusChange = proc(status: ConnectionStatus) {.async.} = + lastStatus = status + callbackCount.inc() + healthChangeSignal.fire() + + monitorA.startHealthMonitor().expect("Health monitor failed to start") + + let + nodeBKey = generateSecp256k1Key() + nodeB = newTestWakuNode(nodeBKey, parseIpAddress("127.0.0.1"), Port(0)) + + let driver = newSqliteArchiveDriver() + nodeB.mountArchive(driver).expect("Node B failed to mount archive") + + (await nodeB.mountRelay()).expect("Node B failed to mount relay") + await nodeB.mountStore() + + await nodeB.start() + + await nodeA.connectToNodes(@[nodeB.switch.peerInfo.toRemotePeerInfo()]) + + proc dummyHandler(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async.} = + discard + + nodeA.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), dummyHandler).expect( + "Node A failed to subscribe" + ) + nodeB.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), dummyHandler).expect( + "Node B failed to subscribe" + ) + + let connectTimeLimit = Moment.now() + TestConnectivityTimeLimit + var gotConnected = false + + while Moment.now() < connectTimeLimit: + if lastStatus == ConnectionStatus.PartiallyConnected: + gotConnected = true + break + + if await healthChangeSignal.wait().withTimeout(connectTimeLimit - Moment.now()): + healthChangeSignal.clear() + + check: + gotConnected == true + callbackCount >= 1 + lastStatus == ConnectionStatus.PartiallyConnected + + healthChangeSignal.clear() + + await nodeB.stop() + await nodeA.disconnectNode(nodeB.switch.peerInfo.toRemotePeerInfo()) + + let disconnectTimeLimit = Moment.now() + TestConnectivityTimeLimit + var gotDisconnected = false + + while Moment.now() < disconnectTimeLimit: + if lastStatus == ConnectionStatus.Disconnected: + gotDisconnected = true + break + + if await healthChangeSignal.wait().withTimeout(disconnectTimeLimit - Moment.now()): + healthChangeSignal.clear() + + check: + gotDisconnected == true + + await monitorA.stopHealthMonitor() + await nodeA.stop() + + asyncTest "Edge (light client) health update": + let + nodeAKey = generateSecp256k1Key() + nodeA = newTestWakuNode(nodeAKey, parseIpAddress("127.0.0.1"), Port(0)) + + nodeA.mountLightpushClient() + await nodeA.mountFilterClient() + nodeA.mountStoreClient() + + await nodeA.start() + + let monitorA = NodeHealthMonitor.new(nodeA) + + var + lastStatus = ConnectionStatus.Disconnected + callbackCount = 0 + healthChangeSignal = newAsyncEvent() + + monitorA.onConnectionStatusChange = proc(status: ConnectionStatus) {.async.} = + lastStatus = status + callbackCount.inc() + healthChangeSignal.fire() + + monitorA.startHealthMonitor().expect("Health monitor failed to start") + + let + nodeBKey = generateSecp256k1Key() + nodeB = newTestWakuNode(nodeBKey, parseIpAddress("127.0.0.1"), Port(0)) + + let driver = newSqliteArchiveDriver() + nodeB.mountArchive(driver).expect("Node B failed to mount archive") + + (await nodeB.mountRelay()).expect("Node B failed to mount relay") + + (await nodeB.mountLightpush()).expect("Node B failed to mount lightpush") + await nodeB.mountFilter() + await nodeB.mountStore() + + await nodeB.start() + + await nodeA.connectToNodes(@[nodeB.switch.peerInfo.toRemotePeerInfo()]) + + let connectTimeLimit = Moment.now() + TestConnectivityTimeLimit + var gotConnected = false + + while Moment.now() < connectTimeLimit: + if lastStatus == ConnectionStatus.PartiallyConnected: + gotConnected = true + break + + if await healthChangeSignal.wait().withTimeout(connectTimeLimit - Moment.now()): + healthChangeSignal.clear() + + check: + gotConnected == true + callbackCount >= 1 + lastStatus == ConnectionStatus.PartiallyConnected + + healthChangeSignal.clear() + + await nodeB.stop() + await nodeA.disconnectNode(nodeB.switch.peerInfo.toRemotePeerInfo()) + + let disconnectTimeLimit = Moment.now() + TestConnectivityTimeLimit + var gotDisconnected = false + + while Moment.now() < disconnectTimeLimit: + if lastStatus == ConnectionStatus.Disconnected: + gotDisconnected = true + break + + if await healthChangeSignal.wait().withTimeout(disconnectTimeLimit - Moment.now()): + healthChangeSignal.clear() + + check: + gotDisconnected == true + lastStatus == ConnectionStatus.Disconnected + + await monitorA.stopHealthMonitor() + await nodeA.stop() diff --git a/tests/node/test_wakunode_legacy_lightpush.nim b/tests/node/test_wakunode_legacy_lightpush.nim index a51ba60b9..902464bcd 100644 --- a/tests/node/test_wakunode_legacy_lightpush.nim +++ b/tests/node/test_wakunode_legacy_lightpush.nim @@ -13,6 +13,7 @@ import node/peer_manager, node/waku_node, node/kernel_api, + node/kernel_api/lightpush, waku_lightpush_legacy, waku_lightpush_legacy/common, waku_lightpush_legacy/protocol_metrics, @@ -24,9 +25,6 @@ import suite "Waku Legacy Lightpush - End To End": var - handlerFuture {.threadvar.}: Future[(PubsubTopic, WakuMessage)] - handler {.threadvar.}: PushMessageHandler - server {.threadvar.}: WakuNode client {.threadvar.}: WakuNode @@ -36,13 +34,6 @@ suite "Waku Legacy Lightpush - End To End": message {.threadvar.}: WakuMessage asyncSetup: - handlerFuture = newPushHandlerFuture() - handler = proc( - peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage - ): Future[WakuLightPushResult[void]] {.async.} = - handlerFuture.complete((pubsubTopic, message)) - return ok() - let serverKey = generateSecp256k1Key() clientKey = generateSecp256k1Key() @@ -56,7 +47,7 @@ suite "Waku Legacy Lightpush - End To End": (await server.mountRelay()).isOkOr: assert false, "Failed to mount relay" - await server.mountLegacyLightpush() # without rln-relay + check (await server.mountLegacyLightpush()).isOk() # without rln-relay client.mountLegacyLightpushClient() serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() @@ -107,9 +98,6 @@ suite "Waku Legacy Lightpush - End To End": suite "RLN Proofs as a Lightpush Service": var - handlerFuture {.threadvar.}: Future[(PubsubTopic, WakuMessage)] - handler {.threadvar.}: PushMessageHandler - server {.threadvar.}: WakuNode client {.threadvar.}: WakuNode anvilProc {.threadvar.}: Process @@ -121,13 +109,6 @@ suite "RLN Proofs as a Lightpush Service": message {.threadvar.}: WakuMessage asyncSetup: - handlerFuture = newPushHandlerFuture() - handler = proc( - peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage - ): Future[WakuLightPushResult[void]] {.async.} = - handlerFuture.complete((pubsubTopic, message)) - return ok() - let serverKey = generateSecp256k1Key() clientKey = generateSecp256k1Key() @@ -135,8 +116,8 @@ suite "RLN Proofs as a Lightpush Service": server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) - anvilProc = runAnvil() - manager = waitFor setupOnchainGroupManager() + anvilProc = runAnvil(stateFile = some(DEFAULT_ANVIL_STATE_PATH)) + manager = waitFor setupOnchainGroupManager(deployContracts = false) # mount rln-relay let wakuRlnConfig = getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) @@ -147,7 +128,7 @@ suite "RLN Proofs as a Lightpush Service": (await server.mountRelay()).isOkOr: assert false, "Failed to mount relay" await server.mountRlnRelay(wakuRlnConfig) - await server.mountLegacyLightPush() + check (await server.mountLegacyLightPush()).isOk() client.mountLegacyLightPushClient() let manager1 = cast[OnchainGroupManager](server.wakuRlnRelay.groupManager) @@ -213,7 +194,7 @@ suite "Waku Legacy Lightpush message delivery": assert false, "Failed to mount relay" (await bridgeNode.mountRelay()).isOkOr: assert false, "Failed to mount relay" - await bridgeNode.mountLegacyLightPush() + check (await bridgeNode.mountLegacyLightPush()).isOk() lightNode.mountLegacyLightPushClient() discard await lightNode.peerManager.dialPeer( @@ -249,3 +230,19 @@ suite "Waku Legacy Lightpush message delivery": ## Cleanup await allFutures(lightNode.stop(), bridgeNode.stop(), destNode.stop()) + +suite "Waku Legacy Lightpush mounting behavior": + asyncTest "fails to mount when relay is not mounted": + ## Given a node without Relay mounted + let + key = generateSecp256k1Key() + node = newTestWakuNode(key, parseIpAddress("0.0.0.0"), Port(0)) + + # Do not mount Relay on purpose + check node.wakuRelay.isNil() + + ## Then mounting Legacy Lightpush must fail + let res = await node.mountLegacyLightPush() + check: + res.isErr() + res.error == MountWithoutRelayError diff --git a/tests/node/test_wakunode_lightpush.nim b/tests/node/test_wakunode_lightpush.nim index 12bfdddd8..66b87b85e 100644 --- a/tests/node/test_wakunode_lightpush.nim +++ b/tests/node/test_wakunode_lightpush.nim @@ -13,6 +13,7 @@ import node/peer_manager, node/waku_node, node/kernel_api, + node/kernel_api/lightpush, waku_lightpush, waku_rln_relay, ], @@ -36,13 +37,6 @@ suite "Waku Lightpush - End To End": message {.threadvar.}: WakuMessage asyncSetup: - handlerFuture = newPushHandlerFuture() - handler = proc( - peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage - ): Future[WakuLightPushResult] {.async.} = - handlerFuture.complete((pubsubTopic, message)) - return ok(PublishedToOnePeer) - let serverKey = generateSecp256k1Key() clientKey = generateSecp256k1Key() @@ -55,7 +49,7 @@ suite "Waku Lightpush - End To End": (await server.mountRelay()).isOkOr: assert false, "Failed to mount relay" - await server.mountLightpush() # without rln-relay + check (await server.mountLightpush()).isOk() # without rln-relay client.mountLightpushClient() serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() @@ -107,9 +101,6 @@ suite "Waku Lightpush - End To End": suite "RLN Proofs as a Lightpush Service": var - handlerFuture {.threadvar.}: Future[(PubsubTopic, WakuMessage)] - handler {.threadvar.}: PushMessageHandler - server {.threadvar.}: WakuNode client {.threadvar.}: WakuNode anvilProc {.threadvar.}: Process @@ -121,13 +112,6 @@ suite "RLN Proofs as a Lightpush Service": message {.threadvar.}: WakuMessage asyncSetup: - handlerFuture = newPushHandlerFuture() - handler = proc( - peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage - ): Future[WakuLightPushResult] {.async.} = - handlerFuture.complete((pubsubTopic, message)) - return ok(PublishedToOnePeer) - let serverKey = generateSecp256k1Key() clientKey = generateSecp256k1Key() @@ -135,8 +119,8 @@ suite "RLN Proofs as a Lightpush Service": server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) - anvilProc = runAnvil() - manager = waitFor setupOnchainGroupManager() + anvilProc = runAnvil(stateFile = some(DEFAULT_ANVIL_STATE_PATH)) + manager = waitFor setupOnchainGroupManager(deployContracts = false) # mount rln-relay let wakuRlnConfig = getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) @@ -147,7 +131,7 @@ suite "RLN Proofs as a Lightpush Service": (await server.mountRelay()).isOkOr: assert false, "Failed to mount relay" await server.mountRlnRelay(wakuRlnConfig) - await server.mountLightPush() + check (await server.mountLightPush()).isOk() client.mountLightPushClient() let manager1 = cast[OnchainGroupManager](server.wakuRlnRelay.groupManager) @@ -213,7 +197,7 @@ suite "Waku Lightpush message delivery": assert false, "Failed to mount relay" (await bridgeNode.mountRelay()).isOkOr: assert false, "Failed to mount relay" - await bridgeNode.mountLightPush() + check (await bridgeNode.mountLightPush()).isOk() lightNode.mountLightPushClient() discard await lightNode.peerManager.dialPeer( @@ -251,3 +235,19 @@ suite "Waku Lightpush message delivery": ## Cleanup await allFutures(lightNode.stop(), bridgeNode.stop(), destNode.stop()) + +suite "Waku Lightpush mounting behavior": + asyncTest "fails to mount when relay is not mounted": + ## Given a node without Relay mounted + let + key = generateSecp256k1Key() + node = newTestWakuNode(key, parseIpAddress("0.0.0.0"), Port(0)) + + # Do not mount Relay on purpose + check node.wakuRelay.isNil() + + ## Then mounting Lightpush must fail + let res = await node.mountLightPush() + check: + res.isErr() + res.error == MountWithoutRelayError diff --git a/tests/node/test_wakunode_sharding.nim b/tests/node/test_wakunode_sharding.nim index eefd8f06e..261077e36 100644 --- a/tests/node/test_wakunode_sharding.nim +++ b/tests/node/test_wakunode_sharding.nim @@ -282,7 +282,7 @@ suite "Sharding": asyncTest "lightpush": # Given a connected server and client subscribed to the same pubsub topic client.mountLegacyLightPushClient() - await server.mountLightpush() + check (await server.mountLightpush()).isOk() let topic = "/waku/2/rs/0/1" @@ -405,7 +405,7 @@ suite "Sharding": asyncTest "lightpush (automatic sharding filtering)": # Given a connected server and client using the same content topic (with two different formats) client.mountLegacyLightPushClient() - await server.mountLightpush() + check (await server.mountLightpush()).isOk() let contentTopicShort = "/toychat/2/huilong/proto" @@ -563,7 +563,7 @@ suite "Sharding": asyncTest "lightpush - exclusion (automatic sharding filtering)": # Given a connected server and client using different content topics client.mountLegacyLightPushClient() - await server.mountLightpush() + check (await server.mountLightpush()).isOk() let contentTopic1 = "/toychat/2/huilong/proto" @@ -874,7 +874,7 @@ suite "Sharding": asyncTest "Waku LightPush Sharding (Static Sharding)": # Given a connected server and client using two different pubsub topics client.mountLegacyLightPushClient() - await server.mountLightpush() + check (await server.mountLightpush()).isOk() # Given a connected server and client subscribed to multiple pubsub topics let diff --git a/tests/test_peer_manager.nim b/tests/test_peer_manager.nim index 1369f3f88..c96f21b6e 100644 --- a/tests/test_peer_manager.nim +++ b/tests/test_peer_manager.nim @@ -997,6 +997,7 @@ procSuite "Peer Manager": .build(), maxFailedAttempts = 1, storage = nil, + maxConnections = 20, ) # Create 30 peers and add them to the peerstore @@ -1063,6 +1064,7 @@ procSuite "Peer Manager": backoffFactor = 2, maxFailedAttempts = 10, storage = nil, + maxConnections = 20, ) var p1: PeerId require p1.init("QmeuZJbXrszW2jdT7GdduSjQskPU3S7vvGWKtKgDfkDvW" & "1") @@ -1116,6 +1118,7 @@ procSuite "Peer Manager": .build(), maxFailedAttempts = 150, storage = nil, + maxConnections = 20, ) # Should result in backoff > 1 week @@ -1131,6 +1134,7 @@ procSuite "Peer Manager": .build(), maxFailedAttempts = 10, storage = nil, + maxConnections = 20, ) let pm = PeerManager.new( @@ -1144,6 +1148,7 @@ procSuite "Peer Manager": .build(), maxFailedAttempts = 5, storage = nil, + maxConnections = 20, ) asyncTest "colocationLimit is enforced by pruneConnsByIp()": @@ -1202,3 +1207,233 @@ procSuite "Peer Manager": r = node1.peerManager.selectPeer(WakuPeerExchangeCodec) assert r.isSome(), "could not retrieve peer mounting WakuPeerExchangeCodec" + + asyncTest "selectPeer() filters peers by shard using ENR": + ## Given: A peer manager with 3 peers having different shards in their ENRs + let + clusterId = 0.uint16 + shardId0 = 0.uint16 + shardId1 = 1.uint16 + + # Create 3 nodes with different shards + let nodes = + @[ + newTestWakuNode( + generateSecp256k1Key(), + parseIpAddress("0.0.0.0"), + Port(0), + clusterId = clusterId, + subscribeShards = @[shardId0], + ), + newTestWakuNode( + generateSecp256k1Key(), + parseIpAddress("0.0.0.0"), + Port(0), + clusterId = clusterId, + subscribeShards = @[shardId1], + ), + newTestWakuNode( + generateSecp256k1Key(), + parseIpAddress("0.0.0.0"), + Port(0), + clusterId = clusterId, + subscribeShards = @[shardId0], + ), + ] + + await allFutures(nodes.mapIt(it.start())) + for node in nodes: + discard await node.mountRelay() + + # Get peer infos with ENRs + let peerInfos = collect: + for node in nodes: + var peerInfo = node.switch.peerInfo.toRemotePeerInfo() + peerInfo.enr = some(node.enr) + peerInfo + + # Add all peers to node 0's peer manager and peerstore + for i in 1 .. 2: + nodes[0].peerManager.addPeer(peerInfos[i]) + nodes[0].peerManager.switch.peerStore[AddressBook][peerInfos[i].peerId] = + peerInfos[i].addrs + nodes[0].peerManager.switch.peerStore[ProtoBook][peerInfos[i].peerId] = + @[WakuRelayCodec] + + ## When: We select a peer for shard 0 + let shard0Topic = some(PubsubTopic("/waku/2/rs/0/0")) + let selectedPeer0 = nodes[0].peerManager.selectPeer(WakuRelayCodec, shard0Topic) + + ## Then: Only peers supporting shard 0 are considered (nodes 2, not node 1) + check: + selectedPeer0.isSome() + selectedPeer0.get().peerId != peerInfos[1].peerId # node1 has shard 1 + selectedPeer0.get().peerId == peerInfos[2].peerId # node2 has shard 0 + + ## When: We select a peer for shard 1 + let shard1Topic = some(PubsubTopic("/waku/2/rs/0/1")) + let selectedPeer1 = nodes[0].peerManager.selectPeer(WakuRelayCodec, shard1Topic) + + ## Then: Only peer with shard 1 is selected + check: + selectedPeer1.isSome() + selectedPeer1.get().peerId == peerInfos[1].peerId # node1 has shard 1 + + await allFutures(nodes.mapIt(it.stop())) + + asyncTest "selectPeer() filters peers by shard using shards field": + ## Given: A peer manager with peers having shards in RemotePeerInfo (no ENR) + let + clusterId = 0.uint16 + shardId0 = 0.uint16 + shardId1 = 1.uint16 + + # Create peer manager + let pm = PeerManager.new( + switch = SwitchBuilder.new().withRng(rng()).withMplex().withNoise().build(), + storage = nil, + ) + + # Create peer infos with shards field populated (simulating metadata exchange) + let basePeerId = "16Uiu2HAm7QGEZKujdSbbo1aaQyfDPQ6Bw3ybQnj6fruH5Dxwd7D" + let peers = toSeq(1 .. 3) + .mapIt(parsePeerInfo("/ip4/0.0.0.0/tcp/30300/p2p/" & basePeerId & $it)) + .filterIt(it.isOk()) + .mapIt(it.value) + require: + peers.len == 3 + + # Manually populate the shards field (ENR is not available) + var peerInfos: seq[RemotePeerInfo] = @[] + for i, peer in peers: + var peerInfo = RemotePeerInfo.init(peer.peerId, peer.addrs) + # Peer 0 and 2 have shard 0, peer 1 has shard 1 + peerInfo.shards = + if i == 1: + @[shardId1] + else: + @[shardId0] + # Note: ENR is intentionally left as none + peerInfos.add(peerInfo) + + # Add peers to peerstore + for peerInfo in peerInfos: + pm.switch.peerStore[AddressBook][peerInfo.peerId] = peerInfo.addrs + pm.switch.peerStore[ProtoBook][peerInfo.peerId] = @[WakuRelayCodec] + # simulate metadata exchange by setting shards field in peerstore + pm.switch.peerStore.setShardInfo(peerInfo.peerId, peerInfo.shards) + + ## When: We select a peer for shard 0 + let shard0Topic = some(PubsubTopic("/waku/2/rs/0/0")) + let selectedPeer0 = pm.selectPeer(WakuRelayCodec, shard0Topic) + + ## Then: Peers with shard 0 in shards field are selected + check: + selectedPeer0.isSome() + selectedPeer0.get().peerId in [peerInfos[0].peerId, peerInfos[2].peerId] + + ## When: We select a peer for shard 1 + let shard1Topic = some(PubsubTopic("/waku/2/rs/0/1")) + let selectedPeer1 = pm.selectPeer(WakuRelayCodec, shard1Topic) + + ## Then: Peer with shard 1 in shards field is selected + check: + selectedPeer1.isSome() + selectedPeer1.get().peerId == peerInfos[1].peerId + + asyncTest "selectPeer() handles invalid pubsub topic gracefully": + ## Given: A peer manager with valid peers + let node = newTestWakuNode( + generateSecp256k1Key(), + parseIpAddress("0.0.0.0"), + Port(0), + clusterId = 0, + subscribeShards = @[0'u16], + ) + await node.start() + + # Add a peer + let peer = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + await peer.start() + discard await peer.mountRelay() + + var peerInfo = peer.switch.peerInfo.toRemotePeerInfo() + peerInfo.enr = some(peer.enr) + node.peerManager.addPeer(peerInfo) + node.peerManager.switch.peerStore[ProtoBook][peerInfo.peerId] = @[WakuRelayCodec] + + ## When: selectPeer is called with malformed pubsub topic + let invalidTopics = + @[ + some(PubsubTopic("invalid-topic")), + some(PubsubTopic("/waku/2/invalid")), + some(PubsubTopic("/waku/2/rs/abc/0")), # non-numeric cluster + some(PubsubTopic("")), # empty topic + ] + + ## Then: Returns none(RemotePeerInfo) without crashing + for invalidTopic in invalidTopics: + let result = node.peerManager.selectPeer(WakuRelayCodec, invalidTopic) + check: + result.isNone() + + await allFutures(node.stop(), peer.stop()) + + asyncTest "selectPeer() prioritizes ENR over shards field": + ## Given: A peer with both ENR and shards field populated + let + clusterId = 0.uint16 + shardId0 = 0.uint16 + shardId1 = 1.uint16 + + let node = newTestWakuNode( + generateSecp256k1Key(), + parseIpAddress("0.0.0.0"), + Port(0), + clusterId = clusterId, + subscribeShards = @[shardId0], + ) + await node.start() + discard await node.mountRelay() + + # Create peer with ENR containing shard 0 + let peer = newTestWakuNode( + generateSecp256k1Key(), + parseIpAddress("0.0.0.0"), + Port(0), + clusterId = clusterId, + subscribeShards = @[shardId0], + ) + await peer.start() + discard await peer.mountRelay() + + # Create peer info with ENR (shard 0) but set shards field to shard 1 + var peerInfo = peer.switch.peerInfo.toRemotePeerInfo() + peerInfo.enr = some(peer.enr) # ENR has shard 0 + peerInfo.shards = @[shardId1] # shards field has shard 1 + + node.peerManager.addPeer(peerInfo) + node.peerManager.switch.peerStore[ProtoBook][peerInfo.peerId] = @[WakuRelayCodec] + # simulate metadata exchange by setting shards field in peerstore + node.peerManager.switch.peerStore.setShardInfo(peerInfo.peerId, peerInfo.shards) + + ## When: We select for shard 0 + let shard0Topic = some(PubsubTopic("/waku/2/rs/0/0")) + let selectedPeer = node.peerManager.selectPeer(WakuRelayCodec, shard0Topic) + + ## Then: Peer is selected because ENR (shard 0) takes precedence + check: + selectedPeer.isSome() + selectedPeer.get().peerId == peerInfo.peerId + + ## When: We select for shard 1 + let shard1Topic = some(PubsubTopic("/waku/2/rs/0/1")) + let selectedPeer1 = node.peerManager.selectPeer(WakuRelayCodec, shard1Topic) + + ## Then: Peer is still selected because shards field is checked as fallback + check: + selectedPeer1.isSome() + selectedPeer1.get().peerId == peerInfo.peerId + + await allFutures(node.stop(), peer.stop()) diff --git a/tests/test_waku.nim b/tests/test_waku.nim index b8e2b26b1..dabd65af7 100644 --- a/tests/test_waku.nim +++ b/tests/test_waku.nim @@ -3,49 +3,49 @@ import chronos, testutils/unittests, std/options import waku +import tools/confutils/cli_args suite "Waku API - Create node": asyncTest "Create node with minimal configuration": ## Given - let nodeConfig = NodeConfig.init( - protocolsConfig = ProtocolsConfig.init(entryNodes = @[], clusterId = 1) - ) + var nodeConf = defaultWakuNodeConf().valueOr: + raiseAssert error + nodeConf.mode = Core + nodeConf.clusterId = 3'u16 + nodeConf.rest = false # This is the actual minimal config but as the node auto-start, it is not suitable for tests - # NodeConfig.init(ethRpcEndpoints = @["http://someaddress"]) ## When - let node = (await createNode(nodeConfig)).valueOr: + let node = (await createNode(nodeConf)).valueOr: raiseAssert error ## Then check: not node.isNil() - node.conf.clusterId == 1 + node.conf.clusterId == 3 node.conf.relay == true asyncTest "Create node with full configuration": ## Given - let nodeConfig = NodeConfig.init( - mode = Core, - protocolsConfig = ProtocolsConfig.init( - entryNodes = - @[ - "enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Vn7gmfkTTnAe8Ys2cgGBN8ufJnvzKQFZqFMBgmlkgnY0iXNlY3AyNTZrMaEDS8-D878DrdbNwcuY-3p1qdDp5MOoCurhdsNPJTXZ3c5g3RjcIJ2X4N1ZHCCd2g" - ], - staticStoreNodes = - @[ - "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" - ], - clusterId = 99, - autoShardingConfig = AutoShardingConfig(numShardsInCluster: 16), - messageValidation = - MessageValidation(maxMessageSize: "1024 KiB", rlnConfig: none(RlnConfig)), - ), - ) + var nodeConf = defaultWakuNodeConf().valueOr: + raiseAssert error + nodeConf.mode = Core + nodeConf.clusterId = 99'u16 + nodeConf.rest = false + nodeConf.numShardsInNetwork = 16 + nodeConf.maxMessageSize = "1024 KiB" + nodeConf.entryNodes = + @[ + "enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Vn7gmfkTTnAe8Ys2cgGBN8ufJnvzKQFZqFMBgmlkgnY0iXNlY3AyNTZrMaEDS8-D878DrdbNwcuY-3p1qdDp5MOoCurhdsNPJTXZ3c5g3RjcIJ2X4N1ZHCCd2g" + ] + nodeConf.staticnodes = + @[ + "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + ] ## When - let node = (await createNode(nodeConfig)).valueOr: + let node = (await createNode(nodeConf)).valueOr: raiseAssert error ## Then @@ -62,20 +62,19 @@ suite "Waku API - Create node": asyncTest "Create node with mixed entry nodes (enrtree, multiaddr)": ## Given - let nodeConfig = NodeConfig.init( - mode = Core, - protocolsConfig = ProtocolsConfig.init( - entryNodes = - @[ - "enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im", - "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc", - ], - clusterId = 42, - ), - ) + var nodeConf = defaultWakuNodeConf().valueOr: + raiseAssert error + nodeConf.mode = Core + nodeConf.clusterId = 42'u16 + nodeConf.rest = false + nodeConf.entryNodes = + @[ + "enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im", + "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc", + ] ## When - let node = (await createNode(nodeConfig)).valueOr: + let node = (await createNode(nodeConf)).valueOr: raiseAssert error ## Then diff --git a/tests/test_waku_keepalive.nim b/tests/test_waku_keepalive.nim index c12f20a05..5d8402268 100644 --- a/tests/test_waku_keepalive.nim +++ b/tests/test_waku_keepalive.nim @@ -44,8 +44,7 @@ suite "Waku Keepalive": await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) - let healthMonitor = NodeHealthMonitor() - healthMonitor.setNodeToHealthMonitor(node1) + let healthMonitor = NodeHealthMonitor.new(node1) healthMonitor.startKeepalive(2.seconds).isOkOr: assert false, "Failed to start keepalive" diff --git a/tests/test_waku_metadata.nim b/tests/test_waku_metadata.nim index b30fd1712..cfceb89b5 100644 --- a/tests/test_waku_metadata.nim +++ b/tests/test_waku_metadata.nim @@ -13,14 +13,15 @@ import eth/keys, eth/p2p/discoveryv5/enr import - waku/ - [ - waku_node, - waku_core/topics, - node/peer_manager, - discovery/waku_discv5, - waku_metadata, - ], + waku/[ + waku_node, + waku_core/topics, + waku_core, + node/peer_manager, + discovery/waku_discv5, + waku_metadata, + waku_relay/protocol, + ], ./testlib/wakucore, ./testlib/wakunode @@ -41,26 +42,86 @@ procSuite "Waku Metadata Protocol": clusterId = clusterId, ) + # Mount metadata protocol on both nodes before starting + discard node1.mountMetadata(clusterId, @[]) + discard node2.mountMetadata(clusterId, @[]) + + # Mount relay so metadata can track subscriptions + discard await node1.mountRelay() + discard await node2.mountRelay() + # Start nodes await allFutures([node1.start(), node2.start()]) - node1.topicSubscriptionQueue.emit((kind: PubsubSub, topic: "/waku/2/rs/10/7")) - node1.topicSubscriptionQueue.emit((kind: PubsubSub, topic: "/waku/2/rs/10/6")) + # Subscribe to topics on node1 - relay will track these and metadata will report them + let noOpHandler: WakuRelayHandler = proc( + pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[void] {.async.} = + discard + + node1.wakuRelay.subscribe("/waku/2/rs/10/7", noOpHandler) + node1.wakuRelay.subscribe("/waku/2/rs/10/6", noOpHandler) # Create connection let connOpt = await node2.peerManager.dialPeer( node1.switch.peerInfo.toRemotePeerInfo(), WakuMetadataCodec ) require: - connOpt.isSome + connOpt.isSome() # Request metadata let response1 = await node2.wakuMetadata.request(connOpt.get()) # Check the response or dont even continue require: - response1.isOk + response1.isOk() check: response1.get().clusterId.get() == clusterId response1.get().shards == @[uint32(6), uint32(7)] + + await allFutures([node1.stop(), node2.stop()]) + + asyncTest "Metadata reports configured shards before relay subscription": + ## Given: Node with configured shards but no relay subscriptions yet + let + clusterId = 10.uint16 + configuredShards = @[uint16(0), uint16(1)] + + let node1 = newTestWakuNode( + generateSecp256k1Key(), + parseIpAddress("0.0.0.0"), + Port(0), + clusterId = clusterId, + subscribeShards = configuredShards, + ) + let node2 = newTestWakuNode( + generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0), clusterId = clusterId + ) + + # Mount metadata with configured shards on node1 + discard node1.mountMetadata(clusterId, configuredShards) + # Mount metadata on node2 so it can make requests + discard node2.mountMetadata(clusterId, @[]) + + # Start nodes (relay is NOT mounted yet on node1) + await allFutures([node1.start(), node2.start()]) + + ## When: Node2 requests metadata from Node1 before relay is active + let connOpt = await node2.peerManager.dialPeer( + node1.switch.peerInfo.toRemotePeerInfo(), WakuMetadataCodec + ) + require: + connOpt.isSome + + let response = await node2.wakuMetadata.request(connOpt.get()) + + ## Then: Response contains configured shards even without relay subscriptions + require: + response.isOk() + + check: + response.get().clusterId.get() == clusterId + response.get().shards == @[uint32(0), uint32(1)] + + await allFutures([node1.stop(), node2.stop()]) diff --git a/tests/test_waku_rendezvous.nim b/tests/test_waku_rendezvous.nim index fa2efbd47..07113ca4a 100644 --- a/tests/test_waku_rendezvous.nim +++ b/tests/test_waku_rendezvous.nim @@ -1,12 +1,21 @@ {.used.} -import std/options, chronos, testutils/unittests, libp2p/builders +import + std/options, + chronos, + testutils/unittests, + libp2p/builders, + libp2p/protocols/rendezvous import waku/waku_core/peers, + waku/waku_core/codecs, + waku/waku_core, waku/node/waku_node, waku/node/peer_manager/peer_manager, waku/waku_rendezvous/protocol, + waku/waku_rendezvous/common, + waku/waku_rendezvous/waku_peer_record, ./testlib/[wakucore, wakunode] procSuite "Waku Rendezvous": @@ -50,18 +59,88 @@ procSuite "Waku Rendezvous": node2.peerManager.addPeer(peerInfo3) node3.peerManager.addPeer(peerInfo2) - let namespace = "test/name/space" - - let res = await node1.wakuRendezvous.batchAdvertise( - namespace, 60.seconds, @[peerInfo2.peerId] - ) + let res = await node1.wakuRendezvous.advertiseAll() assert res.isOk(), $res.error + # Rendezvous Request API requires dialing first + let connOpt = + await node3.peerManager.dialPeer(peerInfo2.peerId, WakuRendezVousCodec) + require: + connOpt.isSome - let response = - await node3.wakuRendezvous.batchRequest(namespace, 1, @[peerInfo2.peerId]) - assert response.isOk(), $response.error - let records = response.get() + var records: seq[WakuPeerRecord] + try: + records = await rendezvous.request[WakuPeerRecord]( + node3.wakuRendezvous, + Opt.some(computeMixNamespace(clusterId)), + Opt.some(1), + Opt.some(@[peerInfo2.peerId]), + ) + except CatchableError as e: + assert false, "Request failed with exception: " & e.msg check: records.len == 1 records[0].peerId == peerInfo1.peerId + #records[0].mixPubKey == $node1.wakuMix.pubKey + + asyncTest "Rendezvous advertises configured shards before relay is active": + ## Given: A node with configured shards but no relay subscriptions yet + let + clusterId = 10.uint16 + configuredShards = @[RelayShard(clusterId: clusterId, shardId: 0)] + + let node = newTestWakuNode( + generateSecp256k1Key(), + parseIpAddress("0.0.0.0"), + Port(0), + clusterId = clusterId, + subscribeShards = @[0'u16], + ) + + ## When: Node mounts rendezvous with configured shards (before relay) + await node.mountRendezvous(clusterId, configuredShards) + await node.start() + + ## Then: The rendezvous protocol should be mounted successfully + check: + node.wakuRendezvous != nil + + # Verify that the protocol is running without errors + # (shards are used internally by the getShardsGetter closure) + let namespace = computeMixNamespace(clusterId) + check: + namespace.len > 0 + + await node.stop() + + asyncTest "Rendezvous uses configured shards when relay not mounted": + ## Given: A light client node with no relay protocol + let + clusterId = 10.uint16 + configuredShards = + @[ + RelayShard(clusterId: clusterId, shardId: 0), + RelayShard(clusterId: clusterId, shardId: 1), + ] + + let lightClient = newTestWakuNode( + generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0), clusterId = clusterId + ) + + ## When: Node mounts rendezvous with configured shards (no relay mounted) + await lightClient.mountRendezvous(clusterId, configuredShards) + await lightClient.start() + + ## Then: Rendezvous should be mounted successfully without relay + check: + lightClient.wakuRendezvous != nil + lightClient.wakuRelay == nil # Verify relay is not mounted + + # Verify the protocol is working (doesn't fail immediately) + # advertiseAll requires peers,so we just check the protocol is initialized + await sleepAsync(100.milliseconds) + + check: + lightClient.wakuRendezvous != nil + + await lightClient.stop() diff --git a/tests/testlib/wakunode.nim b/tests/testlib/wakunode.nim index ef6ba2b24..f59546ec8 100644 --- a/tests/testlib/wakunode.nim +++ b/tests/testlib/wakunode.nim @@ -27,15 +27,15 @@ import # TODO: migrate to usage of a test cluster conf proc defaultTestWakuConfBuilder*(): WakuConfBuilder = var builder = WakuConfBuilder.init() - builder.withP2pTcpPort(Port(60000)) + builder.withP2pTcpPort(Port(0)) builder.withP2pListenAddress(parseIpAddress("0.0.0.0")) builder.restServerConf.withListenAddress(parseIpAddress("127.0.0.1")) builder.withDnsAddrsNameServers( @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")] ) builder.withNatStrategy("any") - builder.withMaxConnections(50) - builder.withRelayServiceRatio("60:40") + builder.withMaxConnections(150) + builder.withRelayServiceRatio("50:50") builder.withMaxMessageSize("1024 KiB") builder.withClusterId(DefaultClusterId) builder.withSubscribeShards(@[DefaultShardId]) @@ -80,7 +80,7 @@ proc newTestWakuNode*( # Update extPort to default value if it's missing and there's an extIp or a DNS domain let extPort = if (extIp.isSome() or dns4DomainName.isSome()) and extPort.isNone(): - some(Port(60000)) + some(Port(0)) else: extPort diff --git a/tests/waku_core/test_message_digest.nim b/tests/waku_core/test_message_digest.nim index 1d1f71225..22a10d84d 100644 --- a/tests/waku_core/test_message_digest.nim +++ b/tests/waku_core/test_message_digest.nim @@ -35,7 +35,7 @@ suite "Waku Message - Deterministic hashing": byteutils.toHex(message.payload) == "010203045445535405060708" byteutils.toHex(message.meta) == "" byteutils.toHex(toBytesBE(uint64(message.timestamp))) == "175789bfa23f8400" - messageHash.toHex() == + byteutils.toHex(messageHash) == "cccab07fed94181c83937c8ca8340c9108492b7ede354a6d95421ad34141fd37" test "digest computation - meta field (12 bytes)": @@ -69,7 +69,7 @@ suite "Waku Message - Deterministic hashing": byteutils.toHex(message.payload) == "010203045445535405060708" byteutils.toHex(message.meta) == "73757065722d736563726574" byteutils.toHex(toBytesBE(uint64(message.timestamp))) == "175789bfa23f8400" - messageHash.toHex() == + byteutils.toHex(messageHash) == "b9b4852f9d8c489846e8bfc6c5ca6a1a8d460a40d28832a966e029eb39619199" test "digest computation - meta field (64 bytes)": @@ -104,7 +104,7 @@ suite "Waku Message - Deterministic hashing": byteutils.toHex(message.meta) == "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f" byteutils.toHex(toBytesBE(uint64(message.timestamp))) == "175789bfa23f8400" - messageHash.toHex() == + byteutils.toHex(messageHash) == "653460d04f66c5b11814d235152f4f246e6f03ef80a305a825913636fbafd0ba" test "digest computation - zero length payload": @@ -132,7 +132,7 @@ suite "Waku Message - Deterministic hashing": ## Then check: - messageHash.toHex() == + byteutils.toHex(messageHash) == "0f6448cc23b2db6c696aa6ab4b693eff4cf3549ff346fe1dbeb281697396a09f" test "waku message - check meta size is enforced": diff --git a/tests/waku_discv5/test_waku_discv5.nim b/tests/waku_discv5/test_waku_discv5.nim index 6685bda32..20a0c6965 100644 --- a/tests/waku_discv5/test_waku_discv5.nim +++ b/tests/waku_discv5/test_waku_discv5.nim @@ -426,7 +426,6 @@ suite "Waku Discovery v5": confBuilder.withNodeKey(libp2p_keys.PrivateKey.random(Secp256k1, myRng[])[]) confBuilder.discv5Conf.withEnabled(true) confBuilder.discv5Conf.withUdpPort(9000.Port) - let conf = confBuilder.build().valueOr: raiseAssert error @@ -468,6 +467,9 @@ suite "Waku Discovery v5": # leave some time for discv5 to act await sleepAsync(chronos.seconds(10)) + # Connect peers via peer manager to ensure identify happens + discard await waku0.node.peerManager.connectPeer(waku1.node.switch.peerInfo) + var r = waku0.node.peerManager.selectPeer(WakuPeerExchangeCodec) assert r.isSome(), "could not retrieve peer mounting WakuPeerExchangeCodec" @@ -480,7 +482,7 @@ suite "Waku Discovery v5": r = waku2.node.peerManager.selectPeer(WakuPeerExchangeCodec) assert r.isSome(), "could not retrieve peer mounting WakuPeerExchangeCodec" - r = waku2.node.peerManager.selectPeer(RendezVousCodec) + r = waku2.node.peerManager.selectPeer(WakuRendezVousCodec) assert r.isSome(), "could not retrieve peer mounting RendezVousCodec" asyncTest "Discv5 bootstrap nodes should be added to the peer store": diff --git a/tests/waku_filter_v2/test_waku_filter_dos_protection.nim b/tests/waku_filter_v2/test_waku_filter_dos_protection.nim index 7c8c640ba..be92fc409 100644 --- a/tests/waku_filter_v2/test_waku_filter_dos_protection.nim +++ b/tests/waku_filter_v2/test_waku_filter_dos_protection.nim @@ -122,24 +122,51 @@ suite "Waku Filter - DOS protection": check client2.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) == none(FilterSubscribeErrorKind) - await sleepAsync(20.milliseconds) - check client1.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) == - none(FilterSubscribeErrorKind) + # Avoid using tiny sleeps to control refill behavior: CI scheduling can + # oversleep and mint additional tokens. Instead, issue a small burst of + # subscribe requests and require at least one TOO_MANY_REQUESTS. + var c1SubscribeFutures = newSeq[Future[FilterSubscribeResult]]() + for i in 0 ..< 6: + c1SubscribeFutures.add( + client1.wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + ) + + let c1Finished = await allFinished(c1SubscribeFutures) + var c1GotTooMany = false + for fut in c1Finished: + check not fut.failed() + let res = fut.read() + if res.isErr() and res.error().kind == FilterSubscribeErrorKind.TOO_MANY_REQUESTS: + c1GotTooMany = true + break + check c1GotTooMany + + # Ensure the other client is not affected by client1's rate limit. check client2.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) == none(FilterSubscribeErrorKind) - await sleepAsync(20.milliseconds) - check client1.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) == - none(FilterSubscribeErrorKind) - await sleepAsync(20.milliseconds) - check client1.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) == - some(FilterSubscribeErrorKind.TOO_MANY_REQUESTS) - check client2.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) == - none(FilterSubscribeErrorKind) - check client2.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) == - some(FilterSubscribeErrorKind.TOO_MANY_REQUESTS) + + var c2SubscribeFutures = newSeq[Future[FilterSubscribeResult]]() + for i in 0 ..< 6: + c2SubscribeFutures.add( + client2.wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + ) + + let c2Finished = await allFinished(c2SubscribeFutures) + var c2GotTooMany = false + for fut in c2Finished: + check not fut.failed() + let res = fut.read() + if res.isErr() and res.error().kind == FilterSubscribeErrorKind.TOO_MANY_REQUESTS: + c2GotTooMany = true + break + check c2GotTooMany # ensure period of time has passed and clients can again use the service - await sleepAsync(1000.milliseconds) + await sleepAsync(1100.milliseconds) check client1.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) == none(FilterSubscribeErrorKind) check client2.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) == @@ -147,29 +174,55 @@ suite "Waku Filter - DOS protection": asyncTest "Ensure normal usage allowed": # Given + # Rate limit setting is (3 requests / 1000ms) per peer. + # In a token-bucket model this means: + # - capacity = 3 tokens + # - refill rate = 3 tokens / second => ~1 token every ~333ms + # - each request consumes 1 token (including UNSUBSCRIBE) check client1.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) == none(FilterSubscribeErrorKind) check wakuFilter.subscriptions.isSubscribed(client1.clientPeerId) - await sleepAsync(500.milliseconds) - check client1.ping(serverRemotePeerInfo) == none(FilterSubscribeErrorKind) - check wakuFilter.subscriptions.isSubscribed(client1.clientPeerId) + # Expected remaining tokens (approx): 2 await sleepAsync(500.milliseconds) check client1.ping(serverRemotePeerInfo) == none(FilterSubscribeErrorKind) check wakuFilter.subscriptions.isSubscribed(client1.clientPeerId) - await sleepAsync(50.milliseconds) + # After ~500ms, ~1 token refilled; PING consumes 1 => expected remaining: 2 + + await sleepAsync(500.milliseconds) + check client1.ping(serverRemotePeerInfo) == none(FilterSubscribeErrorKind) + check wakuFilter.subscriptions.isSubscribed(client1.clientPeerId) + + # After another ~500ms, ~1 token refilled; PING consumes 1 => expected remaining: 2 + check client1.unsubscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) == none(FilterSubscribeErrorKind) check wakuFilter.subscriptions.isSubscribed(client1.clientPeerId) == false - await sleepAsync(50.milliseconds) check client1.ping(serverRemotePeerInfo) == some(FilterSubscribeErrorKind.NOT_FOUND) - check client1.ping(serverRemotePeerInfo) == some(FilterSubscribeErrorKind.NOT_FOUND) - await sleepAsync(50.milliseconds) - check client1.ping(serverRemotePeerInfo) == - some(FilterSubscribeErrorKind.TOO_MANY_REQUESTS) + # After unsubscribing, PING is expected to return NOT_FOUND while still + # counting towards the rate limit. + + # CI can oversleep / schedule slowly, which can mint extra tokens between + # requests. To make the test robust, issue a small burst of pings and + # require at least one TOO_MANY_REQUESTS response. + var pingFutures = newSeq[Future[FilterSubscribeResult]]() + for i in 0 ..< 9: + pingFutures.add(client1.wakuFilterClient.ping(serverRemotePeerInfo)) + + let finished = await allFinished(pingFutures) + var gotTooMany = false + for fut in finished: + check not fut.failed() + let pingRes = fut.read() + if pingRes.isErr() and + pingRes.error().kind == FilterSubscribeErrorKind.TOO_MANY_REQUESTS: + gotTooMany = true + break + + check gotTooMany check client2.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) == none(FilterSubscribeErrorKind) diff --git a/tests/waku_lightpush/test_client.nim b/tests/waku_lightpush/test_client.nim index af22ffa5d..0bc9afdd4 100644 --- a/tests/waku_lightpush/test_client.nim +++ b/tests/waku_lightpush/test_client.nim @@ -38,7 +38,7 @@ suite "Waku Lightpush Client": asyncSetup: handlerFuture = newPushHandlerFuture() handler = proc( - peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + pubsubTopic: PubsubTopic, message: WakuMessage ): Future[WakuLightPushResult] {.async.} = let msgLen = message.encode().buffer.len if msgLen > int(DefaultMaxWakuMessageSize) + 64 * 1024: @@ -287,7 +287,7 @@ suite "Waku Lightpush Client": handlerError = "handler-error" handlerFuture2 = newFuture[void]() handler2 = proc( - peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + pubsubTopic: PubsubTopic, message: WakuMessage ): Future[WakuLightPushResult] {.async.} = handlerFuture2.complete() return lighpushErrorResult(LightPushErrorCode.PAYLOAD_TOO_LARGE, handlerError) diff --git a/tests/waku_lightpush/test_ratelimit.nim b/tests/waku_lightpush/test_ratelimit.nim index b2dcdc7b5..e023bf3f5 100644 --- a/tests/waku_lightpush/test_ratelimit.nim +++ b/tests/waku_lightpush/test_ratelimit.nim @@ -19,7 +19,7 @@ suite "Rate limited push service": ## Given var handlerFuture = newFuture[(string, WakuMessage)]() let handler: PushMessageHandler = proc( - peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + pubsubTopic: PubsubTopic, message: WakuMessage ): Future[WakuLightPushResult] {.async.} = handlerFuture.complete((pubsubTopic, message)) return lightpushSuccessResult(1) # succeed to publish to 1 peer. @@ -37,7 +37,7 @@ suite "Rate limited push service": handlerFuture = newFuture[(string, WakuMessage)]() let requestRes = - await client.publish(some(DefaultPubsubTopic), message, peer = serverPeerId) + await client.publish(some(DefaultPubsubTopic), message, serverPeerId) check await handlerFuture.withTimeout(50.millis) @@ -66,7 +66,7 @@ suite "Rate limited push service": var endTime = Moment.now() var elapsed: Duration = (endTime - startTime) await sleepAsync(tokenPeriod - elapsed + firstWaitExtend) - firstWaitEXtend = 100.millis + firstWaitExtend = 100.millis ## Cleanup await allFutures(clientSwitch.stop(), serverSwitch.stop()) @@ -80,11 +80,12 @@ suite "Rate limited push service": await allFutures(serverSwitch.start(), clientSwitch.start()) ## Given - var handlerFuture = newFuture[(string, WakuMessage)]() + # Don't rely on per-request timing assumptions or a single shared Future. + # CI can be slow enough that sequential requests accidentally refill tokens. + # Instead we issue a small burst and assert we observe at least one rejection. let handler = proc( - peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + pubsubTopic: PubsubTopic, message: WakuMessage ): Future[WakuLightPushResult] {.async.} = - handlerFuture.complete((pubsubTopic, message)) return lightpushSuccessResult(1) let @@ -93,45 +94,37 @@ suite "Rate limited push service": client = newTestWakuLightpushClient(clientSwitch) let serverPeerId = serverSwitch.peerInfo.toRemotePeerInfo() - let topic = DefaultPubsubTopic + let tokenPeriod = 500.millis - let successProc = proc(): Future[void] {.async.} = + # Fire a burst of requests; require at least one success and one rejection. + var publishFutures = newSeq[Future[WakuLightPushResult]]() + for i in 0 ..< 10: let message = fakeWakuMessage() - handlerFuture = newFuture[(string, WakuMessage)]() - let requestRes = - await client.publish(some(DefaultPubsubTopic), message, peer = serverPeerId) - discard await handlerFuture.withTimeout(10.millis) + publishFutures.add( + client.publish(some(DefaultPubsubTopic), message, serverPeerId) + ) - check: - requestRes.isOk() - handlerFuture.finished() - let (handledMessagePubsubTopic, handledMessage) = handlerFuture.read() - check: - handledMessagePubsubTopic == DefaultPubsubTopic - handledMessage == message + let finished = await allFinished(publishFutures) + var gotOk = false + var gotTooMany = false + for fut in finished: + check not fut.failed() + let res = fut.read() + if res.isOk(): + gotOk = true + else: + check res.error.code == LightPushErrorCode.TOO_MANY_REQUESTS + check res.error.desc == some(TooManyRequestsMessage) + gotTooMany = true - let rejectProc = proc(): Future[void] {.async.} = - let message = fakeWakuMessage() - handlerFuture = newFuture[(string, WakuMessage)]() - let requestRes = - await client.publish(some(DefaultPubsubTopic), message, peer = serverPeerId) - discard await handlerFuture.withTimeout(10.millis) + check gotOk + check gotTooMany - check: - requestRes.isErr() - requestRes.error.code == LightPushErrorCode.TOO_MANY_REQUESTS - requestRes.error.desc == some(TooManyRequestsMessage) - - for testCnt in 0 .. 2: - await successProc() - await sleepAsync(20.millis) - - await rejectProc() - - await sleepAsync(500.millis) - - ## next one shall succeed due to the rate limit time window has passed - await successProc() + # ensure period of time has passed and the client can again use the service + await sleepAsync(tokenPeriod + 100.millis) + let recoveryRes = + await client.publish(some(DefaultPubsubTopic), fakeWakuMessage(), serverPeerId) + check recoveryRes.isOk() ## Cleanup await allFutures(clientSwitch.stop(), serverSwitch.stop()) diff --git a/tests/waku_lightpush_legacy/test_client.nim b/tests/waku_lightpush_legacy/test_client.nim index 1dcb466c9..3d3027e9c 100644 --- a/tests/waku_lightpush_legacy/test_client.nim +++ b/tests/waku_lightpush_legacy/test_client.nim @@ -35,7 +35,7 @@ suite "Waku Legacy Lightpush Client": asyncSetup: handlerFuture = newPushHandlerFuture() handler = proc( - peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + pubsubTopic: PubsubTopic, message: WakuMessage ): Future[WakuLightPushResult[void]] {.async.} = let msgLen = message.encode().buffer.len if msgLen > int(DefaultMaxWakuMessageSize) + 64 * 1024: @@ -282,7 +282,7 @@ suite "Waku Legacy Lightpush Client": handlerError = "handler-error" handlerFuture2 = newFuture[void]() handler2 = proc( - peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + pubsubTopic: PubsubTopic, message: WakuMessage ): Future[WakuLightPushResult[void]] {.async.} = handlerFuture2.complete() return err(handlerError) diff --git a/tests/waku_lightpush_legacy/test_ratelimit.nim b/tests/waku_lightpush_legacy/test_ratelimit.nim index 3df8d369d..ae5f5ed28 100644 --- a/tests/waku_lightpush_legacy/test_ratelimit.nim +++ b/tests/waku_lightpush_legacy/test_ratelimit.nim @@ -25,7 +25,7 @@ suite "Rate limited push service": ## Given var handlerFuture = newFuture[(string, WakuMessage)]() let handler: PushMessageHandler = proc( - peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + pubsubTopic: PubsubTopic, message: WakuMessage ): Future[WakuLightPushResult[void]] {.async.} = handlerFuture.complete((pubsubTopic, message)) return ok() @@ -86,58 +86,52 @@ suite "Rate limited push service": await allFutures(serverSwitch.start(), clientSwitch.start()) ## Given - var handlerFuture = newFuture[(string, WakuMessage)]() let handler = proc( - peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + pubsubTopic: PubsubTopic, message: WakuMessage ): Future[WakuLightPushResult[void]] {.async.} = - handlerFuture.complete((pubsubTopic, message)) return ok() let + tokenPeriod = 500.millis server = await newTestWakuLegacyLightpushNode( - serverSwitch, handler, some((3, 500.millis)) + serverSwitch, handler, some((3, tokenPeriod)) ) client = newTestWakuLegacyLightpushClient(clientSwitch) let serverPeerId = serverSwitch.peerInfo.toRemotePeerInfo() - let topic = DefaultPubsubTopic - let successProc = proc(): Future[void] {.async.} = - let message = fakeWakuMessage() - handlerFuture = newFuture[(string, WakuMessage)]() - let requestRes = - await client.publish(DefaultPubsubTopic, message, peer = serverPeerId) - discard await handlerFuture.withTimeout(10.millis) + # Avoid assuming the exact Nth request will be rejected. With Chronos TokenBucket + # minting semantics and real network latency, CI timing can allow refills. + # Instead, send a short burst and require that we observe at least one rejection. + let burstSize = 10 + var publishFutures: seq[Future[WakuLightPushResult[string]]] = @[] + for _ in 0 ..< burstSize: + publishFutures.add( + client.publish(DefaultPubsubTopic, fakeWakuMessage(), peer = serverPeerId) + ) - check: - requestRes.isOk() - handlerFuture.finished() - let (handledMessagePubsubTopic, handledMessage) = handlerFuture.read() - check: - handledMessagePubsubTopic == DefaultPubsubTopic - handledMessage == message + let finished = await allFinished(publishFutures) + var gotOk = false + var gotTooMany = false + for fut in finished: + check not fut.failed() + let res = fut.read() + if res.isOk(): + gotOk = true + elif res.error == "TOO_MANY_REQUESTS": + gotTooMany = true - let rejectProc = proc(): Future[void] {.async.} = - let message = fakeWakuMessage() - handlerFuture = newFuture[(string, WakuMessage)]() - let requestRes = - await client.publish(DefaultPubsubTopic, message, peer = serverPeerId) - discard await handlerFuture.withTimeout(10.millis) + check: + gotOk + gotTooMany - check: - requestRes.isErr() - requestRes.error == "TOO_MANY_REQUESTS" - - for testCnt in 0 .. 2: - await successProc() - await sleepAsync(20.millis) - - await rejectProc() - - await sleepAsync(500.millis) + await sleepAsync(tokenPeriod + 100.millis) ## next one shall succeed due to the rate limit time window has passed - await successProc() + let afterCooldownRes = + await client.publish(DefaultPubsubTopic, fakeWakuMessage(), peer = serverPeerId) + check: + afterCooldownRes.isOk() ## Cleanup await allFutures(clientSwitch.stop(), serverSwitch.stop()) diff --git a/tests/waku_relay/test_wakunode_relay.nim b/tests/waku_relay/test_wakunode_relay.nim index 2b4f32617..a687119bd 100644 --- a/tests/waku_relay/test_wakunode_relay.nim +++ b/tests/waku_relay/test_wakunode_relay.nim @@ -1,7 +1,7 @@ {.used.} import - std/[os, sequtils, sysrand, math], + std/[os, strutils, sequtils, sysrand, math], stew/byteutils, testutils/unittests, chronos, @@ -450,7 +450,8 @@ suite "WakuNode - Relay": await sleepAsync(500.millis) let res = await node2.publish(some($shard), message) - assert res.isOk(), $res.error + check res.isErr() + check contains($res.error, "NoPeersToPublish") await sleepAsync(500.millis) diff --git a/tests/waku_relay/utils.nim b/tests/waku_relay/utils.nim index d5703d415..4e958a4ea 100644 --- a/tests/waku_relay/utils.nim +++ b/tests/waku_relay/utils.nim @@ -11,15 +11,15 @@ import from std/times import epochTime import - waku/ - [ - waku_relay, - node/waku_node, - node/peer_manager, - waku_core, - waku_node, - waku_rln_relay, - ], + waku/[ + waku_relay, + node/waku_node, + node/peer_manager, + waku_core, + waku_node, + waku_rln_relay, + common/broker/broker_context, + ], ../waku_store/store_utils, ../waku_archive/archive_utils, ../testlib/[wakucore, futures] diff --git a/tests/waku_rln_relay/anvil_state/state-deployed-contracts-mint-and-approved.json.gz b/tests/waku_rln_relay/anvil_state/state-deployed-contracts-mint-and-approved.json.gz new file mode 100644 index 000000000..b5fdebb74 Binary files /dev/null and b/tests/waku_rln_relay/anvil_state/state-deployed-contracts-mint-and-approved.json.gz differ diff --git a/tests/waku_rln_relay/test_rln_contract_deployment.nim b/tests/waku_rln_relay/test_rln_contract_deployment.nim new file mode 100644 index 000000000..5a9624ce8 --- /dev/null +++ b/tests/waku_rln_relay/test_rln_contract_deployment.nim @@ -0,0 +1,29 @@ +{.used.} + +{.push raises: [].} + +import std/[options, os], results, testutils/unittests, chronos, web3 + +import + waku/[ + waku_rln_relay, + waku_rln_relay/conversion_utils, + waku_rln_relay/group_manager/on_chain/group_manager, + ], + ./utils_onchain + +suite "Token and RLN Contract Deployment": + test "anvil should dump state to file on exit": + # git will ignore this file, if the contract has been updated and the state file needs to be regenerated then this file can be renamed to replace the one in the repo (tests/waku_rln_relay/anvil_state/tests/waku_rln_relay/anvil_state/state-deployed-contracts-mint-and-approved.json) + let testStateFile = some("tests/waku_rln_relay/anvil_state/anvil_state.ignore.json") + let anvilProc = runAnvil(stateFile = testStateFile, dumpStateOnExit = true) + let manager = waitFor setupOnchainGroupManager(deployContracts = true) + + stopAnvil(anvilProc) + + check: + fileExists(testStateFile.get()) + + #The test should still pass even if thie compression fails + compressGzipFile(testStateFile.get(), testStateFile.get() & ".gz").isOkOr: + error "Failed to compress state file", error = error diff --git a/tests/waku_rln_relay/test_rln_group_manager_onchain.nim b/tests/waku_rln_relay/test_rln_group_manager_onchain.nim index cf697961a..aac900911 100644 --- a/tests/waku_rln_relay/test_rln_group_manager_onchain.nim +++ b/tests/waku_rln_relay/test_rln_group_manager_onchain.nim @@ -33,8 +33,8 @@ suite "Onchain group manager": var manager {.threadVar.}: OnchainGroupManager setup: - anvilProc = runAnvil() - manager = waitFor setupOnchainGroupManager() + anvilProc = runAnvil(stateFile = some(DEFAULT_ANVIL_STATE_PATH)) + manager = waitFor setupOnchainGroupManager(deployContracts = false) teardown: stopAnvil(anvilProc) diff --git a/tests/waku_rln_relay/test_waku_rln_relay.nim b/tests/waku_rln_relay/test_waku_rln_relay.nim index 0bbb448e1..d9fe0d890 100644 --- a/tests/waku_rln_relay/test_waku_rln_relay.nim +++ b/tests/waku_rln_relay/test_waku_rln_relay.nim @@ -15,6 +15,7 @@ import waku_rln_relay/rln, waku_rln_relay/protocol_metrics, waku_keystore, + common/broker/broker_context, ], ./rln/waku_rln_relay_utils, ./utils_onchain, @@ -27,8 +28,8 @@ suite "Waku rln relay": var manager {.threadVar.}: OnchainGroupManager setup: - anvilProc = runAnvil() - manager = waitFor setupOnchainGroupManager() + anvilProc = runAnvil(stateFile = some(DEFAULT_ANVIL_STATE_PATH)) + manager = waitFor setupOnchainGroupManager(deployContracts = false) teardown: stopAnvil(anvilProc) @@ -70,53 +71,6 @@ suite "Waku rln relay": info "the generated identity credential: ", idCredential - test "hash Nim Wrappers": - # create an RLN instance - let rlnInstance = createRLNInstanceWrapper() - require: - rlnInstance.isOk() - - # prepare the input - let - msg = "Hello".toBytes() - hashInput = encodeLengthPrefix(msg) - hashInputBuffer = toBuffer(hashInput) - - # prepare other inputs to the hash function - let outputBuffer = default(Buffer) - - let hashSuccess = sha256(unsafeAddr hashInputBuffer, unsafeAddr outputBuffer, true) - require: - hashSuccess - let outputArr = cast[ptr array[32, byte]](outputBuffer.`ptr`)[] - - check: - "1e32b3ab545c07c8b4a7ab1ca4f46bc31e4fdc29ac3b240ef1d54b4017a26e4c" == - outputArr.inHex() - - let - hashOutput = cast[ptr array[32, byte]](outputBuffer.`ptr`)[] - hashOutputHex = hashOutput.toHex() - - info "hash output", hashOutputHex - - test "sha256 hash utils": - # create an RLN instance - let rlnInstance = createRLNInstanceWrapper() - require: - rlnInstance.isOk() - let rln = rlnInstance.get() - - # prepare the input - let msg = "Hello".toBytes() - - let hashRes = sha256(msg) - - check: - hashRes.isOk() - "1e32b3ab545c07c8b4a7ab1ca4f46bc31e4fdc29ac3b240ef1d54b4017a26e4c" == - hashRes.get().inHex() - test "poseidon hash utils": # create an RLN instance let rlnInstance = createRLNInstanceWrapper() @@ -280,8 +234,10 @@ suite "Waku rln relay": let index = MembershipIndex(5) let wakuRlnConfig = getWakuRlnConfig(manager = manager, index = index) - let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr: - raiseAssert $error + var wakuRlnRelay: WakuRlnRelay + lockNewGlobalBrokerContext: + wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr: + raiseAssert $error let manager = cast[OnchainGroupManager](wakuRlnRelay.groupManager) let idCredentials = generateCredentials() @@ -337,8 +293,10 @@ suite "Waku rln relay": let wakuRlnConfig = getWakuRlnConfig(manager = manager, index = index) - let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr: - raiseAssert $error + var wakuRlnRelay: WakuRlnRelay + lockNewGlobalBrokerContext: + wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr: + raiseAssert $error let manager = cast[OnchainGroupManager](wakuRlnRelay.groupManager) let idCredentials = generateCredentials() @@ -387,8 +345,10 @@ suite "Waku rln relay": asyncTest "multiple senders with same external nullifier": let index1 = MembershipIndex(5) let rlnConf1 = getWakuRlnConfig(manager = manager, index = index1) - let wakuRlnRelay1 = (await WakuRlnRelay.new(rlnConf1)).valueOr: - raiseAssert "failed to create waku rln relay: " & $error + var wakuRlnRelay1: WakuRlnRelay + lockNewGlobalBrokerContext: + wakuRlnRelay1 = (await WakuRlnRelay.new(rlnConf1)).valueOr: + raiseAssert "failed to create waku rln relay: " & $error let manager1 = cast[OnchainGroupManager](wakuRlnRelay1.groupManager) let idCredentials1 = generateCredentials() @@ -401,8 +361,10 @@ suite "Waku rln relay": let index2 = MembershipIndex(6) let rlnConf2 = getWakuRlnConfig(manager = manager, index = index2) - let wakuRlnRelay2 = (await WakuRlnRelay.new(rlnConf2)).valueOr: - raiseAssert "failed to create waku rln relay: " & $error + var wakuRlnRelay2: WakuRlnRelay + lockNewGlobalBrokerContext: + wakuRlnRelay2 = (await WakuRlnRelay.new(rlnConf2)).valueOr: + raiseAssert "failed to create waku rln relay: " & $error let manager2 = cast[OnchainGroupManager](wakuRlnRelay2.groupManager) let idCredentials2 = generateCredentials() @@ -533,9 +495,10 @@ suite "Waku rln relay": let wakuRlnConfig = getWakuRlnConfig( manager = manager, index = index, epochSizeSec = rlnEpochSizeSec.uint64 ) - - let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr: - raiseAssert $error + var wakuRlnRelay: WakuRlnRelay + lockNewGlobalBrokerContext: + wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr: + raiseAssert $error let rlnMaxEpochGap = wakuRlnRelay.rlnMaxEpochGap let testProofMetadata = default(ProofMetadata) diff --git a/tests/waku_rln_relay/test_wakunode_rln_relay.nim b/tests/waku_rln_relay/test_wakunode_rln_relay.nim index 7308ae257..fcf97a671 100644 --- a/tests/waku_rln_relay/test_wakunode_rln_relay.nim +++ b/tests/waku_rln_relay/test_wakunode_rln_relay.nim @@ -12,7 +12,8 @@ import waku/[waku_core, waku_node, waku_rln_relay], ../testlib/[wakucore, futures, wakunode, testutils], ./utils_onchain, - ./rln/waku_rln_relay_utils + ./rln/waku_rln_relay_utils, + waku/common/broker/broker_context from std/times import epochTime @@ -30,75 +31,77 @@ procSuite "WakuNode - RLN relay": var manager {.threadVar.}: OnchainGroupManager setup: - anvilProc = runAnvil() - manager = waitFor setupOnchainGroupManager() + anvilProc = runAnvil(stateFile = some(DEFAULT_ANVIL_STATE_PATH)) + manager = waitFor setupOnchainGroupManager(deployContracts = false) teardown: stopAnvil(anvilProc) asyncTest "testing rln-relay with valid proof": - let - # publisher node - nodeKey1 = generateSecp256k1Key() + var node1, node2, node3: WakuNode # publisher node + let contentTopic = ContentTopic("/waku/2/default-content/proto") + # set up three nodes + lockNewGlobalBrokerContext: + let nodeKey1 = generateSecp256k1Key() node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(0)) + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + # mount rlnrelay in off-chain mode + let wakuRlnConfig1 = + getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) + + await node1.mountRlnRelay(wakuRlnConfig1) + await node1.start() + + # Registration is mandatory before sending messages with rln-relay + let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager) + let idCredentials1 = generateCredentials() + + try: + waitFor manager1.register(idCredentials1, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, + "exception raised when calling register: " & getCurrentExceptionMsg() + + let rootUpdated1 = waitFor manager1.updateRoots() + info "Updated root for node1", rootUpdated1 + + lockNewGlobalBrokerContext: # Relay node - nodeKey2 = generateSecp256k1Key() + let nodeKey2 = generateSecp256k1Key() node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(0)) + + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + # mount rlnrelay in off-chain mode + let wakuRlnConfig2 = + getWakuRlnConfig(manager = manager, index = MembershipIndex(2)) + + await node2.mountRlnRelay(wakuRlnConfig2) + await node2.start() + + let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager) + let rootUpdated2 = waitFor manager2.updateRoots() + info "Updated root for node2", rootUpdated2 + + lockNewGlobalBrokerContext: # Subscriber - nodeKey3 = generateSecp256k1Key() + let nodeKey3 = generateSecp256k1Key() node3 = newTestWakuNode(nodeKey3, parseIpAddress("0.0.0.0"), Port(0)) - contentTopic = ContentTopic("/waku/2/default-content/proto") + (await node3.mountRelay()).isOkOr: + assert false, "Failed to mount relay" - # set up three nodes - # node1 - (await node1.mountRelay()).isOkOr: - assert false, "Failed to mount relay" + let wakuRlnConfig3 = + getWakuRlnConfig(manager = manager, index = MembershipIndex(3)) - # mount rlnrelay in off-chain mode - let wakuRlnConfig1 = getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) + await node3.mountRlnRelay(wakuRlnConfig3) + await node3.start() - await node1.mountRlnRelay(wakuRlnConfig1) - await node1.start() - - # Registration is mandatory before sending messages with rln-relay - let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager) - let idCredentials1 = generateCredentials() - - try: - waitFor manager1.register(idCredentials1, UserMessageLimit(20)) - except Exception, CatchableError: - assert false, - "exception raised when calling register: " & getCurrentExceptionMsg() - - let rootUpdated1 = waitFor manager1.updateRoots() - info "Updated root for node1", rootUpdated1 - - # node 2 - (await node2.mountRelay()).isOkOr: - assert false, "Failed to mount relay" - # mount rlnrelay in off-chain mode - let wakuRlnConfig2 = getWakuRlnConfig(manager = manager, index = MembershipIndex(2)) - - await node2.mountRlnRelay(wakuRlnConfig2) - await node2.start() - - let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager) - let rootUpdated2 = waitFor manager2.updateRoots() - info "Updated root for node2", rootUpdated2 - - # node 3 - (await node3.mountRelay()).isOkOr: - assert false, "Failed to mount relay" - - let wakuRlnConfig3 = getWakuRlnConfig(manager = manager, index = MembershipIndex(3)) - - await node3.mountRlnRelay(wakuRlnConfig3) - await node3.start() - - let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager) - let rootUpdated3 = waitFor manager3.updateRoots() - info "Updated root for node3", rootUpdated3 + let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager) + let rootUpdated3 = waitFor manager3.updateRoots() + info "Updated root for node3", rootUpdated3 # connect them together await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) @@ -156,10 +159,67 @@ procSuite "WakuNode - RLN relay": asyncTest "testing rln-relay is applied in all rln shards/content topics": # create 3 nodes - let nodes = toSeq(0 ..< 3).mapIt( - newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) - ) - await allFutures(nodes.mapIt(it.start())) + var node1, node2, node3: WakuNode + lockNewGlobalBrokerContext: + let nodeKey1 = generateSecp256k1Key() + node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(0)) + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + let wakuRlnConfig1 = + getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) + await node1.mountRlnRelay(wakuRlnConfig1) + await node1.start() + let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager) + let idCredentials1 = generateCredentials() + + try: + waitFor manager1.register(idCredentials1, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, + "exception raised when calling register: " & getCurrentExceptionMsg() + + let rootUpdated1 = waitFor manager1.updateRoots() + info "Updated root for node", node = 1, rootUpdated = rootUpdated1 + lockNewGlobalBrokerContext: + let nodeKey2 = generateSecp256k1Key() + node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(0)) + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + let wakuRlnConfig2 = + getWakuRlnConfig(manager = manager, index = MembershipIndex(2)) + await node2.mountRlnRelay(wakuRlnConfig2) + await node2.start() + let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager) + let idCredentials2 = generateCredentials() + + try: + waitFor manager2.register(idCredentials2, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, + "exception raised when calling register: " & getCurrentExceptionMsg() + + let rootUpdated2 = waitFor manager2.updateRoots() + info "Updated root for node", node = 2, rootUpdated = rootUpdated2 + lockNewGlobalBrokerContext: + let nodeKey3 = generateSecp256k1Key() + node3 = newTestWakuNode(nodeKey3, parseIpAddress("0.0.0.0"), Port(0)) + (await node3.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + let wakuRlnConfig3 = + getWakuRlnConfig(manager = manager, index = MembershipIndex(3)) + await node3.mountRlnRelay(wakuRlnConfig3) + await node3.start() + let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager) + let idCredentials3 = generateCredentials() + + try: + waitFor manager3.register(idCredentials3, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, + "exception raised when calling register: " & getCurrentExceptionMsg() + + let rootUpdated3 = waitFor manager3.updateRoots() + info "Updated root for node", node = 3, rootUpdated = rootUpdated3 let shards = @[RelayShard(clusterId: 0, shardId: 0), RelayShard(clusterId: 0, shardId: 1)] @@ -169,31 +229,9 @@ procSuite "WakuNode - RLN relay": ContentTopic("/waku/2/content-topic-b/proto"), ] - # set up three nodes - await allFutures(nodes.mapIt(it.mountRelay())) - - # mount rlnrelay in off-chain mode - for index, node in nodes: - let wakuRlnConfig = - getWakuRlnConfig(manager = manager, index = MembershipIndex(index + 1)) - - await node.mountRlnRelay(wakuRlnConfig) - await node.start() - let manager = cast[OnchainGroupManager](node.wakuRlnRelay.groupManager) - let idCredentials = generateCredentials() - - try: - waitFor manager.register(idCredentials, UserMessageLimit(20)) - except Exception, CatchableError: - assert false, - "exception raised when calling register: " & getCurrentExceptionMsg() - - let rootUpdated = waitFor manager.updateRoots() - info "Updated root for node", node = index + 1, rootUpdated = rootUpdated - # connect them together - await nodes[0].connectToNodes(@[nodes[1].switch.peerInfo.toRemotePeerInfo()]) - await nodes[2].connectToNodes(@[nodes[1].switch.peerInfo.toRemotePeerInfo()]) + await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) + await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) var rxMessagesTopic1 = 0 var rxMessagesTopic2 = 0 @@ -211,15 +249,15 @@ procSuite "WakuNode - RLN relay": ): Future[void] {.async, gcsafe.} = await sleepAsync(0.milliseconds) - nodes[0].subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: - assert false, "Failed to subscribe to pubsub topic in nodes[0]: " & $error - nodes[1].subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: - assert false, "Failed to subscribe to pubsub topic in nodes[1]: " & $error + node1.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic in node1: " & $error + node2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic in node2: " & $error # mount the relay handlers - nodes[2].subscribe((kind: PubsubSub, topic: $shards[0]), relayHandler).isOkOr: + node3.subscribe((kind: PubsubSub, topic: $shards[0]), relayHandler).isOkOr: assert false, "Failed to subscribe to pubsub topic: " & $error - nodes[2].subscribe((kind: PubsubSub, topic: $shards[1]), relayHandler).isOkOr: + node3.subscribe((kind: PubsubSub, topic: $shards[1]), relayHandler).isOkOr: assert false, "Failed to subscribe to pubsub topic: " & $error await sleepAsync(1000.millis) @@ -236,8 +274,8 @@ procSuite "WakuNode - RLN relay": contentTopic: contentTopics[0], ) - nodes[0].wakuRlnRelay.unsafeAppendRLNProof( - message, nodes[0].wakuRlnRelay.getCurrentEpoch(), MessageId(i.uint8) + node1.wakuRlnRelay.unsafeAppendRLNProof( + message, node1.wakuRlnRelay.getCurrentEpoch(), MessageId(i.uint8) ).isOkOr: raiseAssert $error messages1.add(message) @@ -249,8 +287,8 @@ procSuite "WakuNode - RLN relay": contentTopic: contentTopics[1], ) - nodes[1].wakuRlnRelay.unsafeAppendRLNProof( - message, nodes[1].wakuRlnRelay.getCurrentEpoch(), MessageId(i.uint8) + node2.wakuRlnRelay.unsafeAppendRLNProof( + message, node2.wakuRlnRelay.getCurrentEpoch(), MessageId(i.uint8) ).isOkOr: raiseAssert $error messages2.add(message) @@ -258,9 +296,9 @@ procSuite "WakuNode - RLN relay": # publish 3 messages from node[0] (last 2 are spam, window is 10 secs) # publish 3 messages from node[1] (last 2 are spam, window is 10 secs) for msg in messages1: - discard await nodes[0].publish(some($shards[0]), msg) + discard await node1.publish(some($shards[0]), msg) for msg in messages2: - discard await nodes[1].publish(some($shards[1]), msg) + discard await node2.publish(some($shards[1]), msg) # wait for gossip to propagate await sleepAsync(5000.millis) @@ -271,70 +309,70 @@ procSuite "WakuNode - RLN relay": rxMessagesTopic1 == 3 rxMessagesTopic2 == 3 - await allFutures(nodes.mapIt(it.stop())) + await node1.stop() + await node2.stop() + await node3.stop() asyncTest "testing rln-relay with invalid proof": - let + var node1, node2, node3: WakuNode + let contentTopic = ContentTopic("/waku/2/default-content/proto") + lockNewGlobalBrokerContext: # publisher node - nodeKey1 = generateSecp256k1Key() + let nodeKey1 = generateSecp256k1Key() node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(0)) + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + # mount rlnrelay in off-chain mode + let wakuRlnConfig1 = + getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) + + await node1.mountRlnRelay(wakuRlnConfig1) + await node1.start() + + let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager) + let idCredentials1 = generateCredentials() + + try: + waitFor manager1.register(idCredentials1, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, + "exception raised when calling register: " & getCurrentExceptionMsg() + + let rootUpdated1 = waitFor manager1.updateRoots() + info "Updated root for node1", rootUpdated1 + lockNewGlobalBrokerContext: # Relay node - nodeKey2 = generateSecp256k1Key() + let nodeKey2 = generateSecp256k1Key() node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(0)) + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + # mount rlnrelay in off-chain mode + let wakuRlnConfig2 = + getWakuRlnConfig(manager = manager, index = MembershipIndex(2)) + + await node2.mountRlnRelay(wakuRlnConfig2) + await node2.start() + + let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager) + let rootUpdated2 = waitFor manager2.updateRoots() + info "Updated root for node2", rootUpdated2 + lockNewGlobalBrokerContext: # Subscriber - nodeKey3 = generateSecp256k1Key() + let nodeKey3 = generateSecp256k1Key() node3 = newTestWakuNode(nodeKey3, parseIpAddress("0.0.0.0"), Port(0)) + (await node3.mountRelay()).isOkOr: + assert false, "Failed to mount relay" - contentTopic = ContentTopic("/waku/2/default-content/proto") + let wakuRlnConfig3 = + getWakuRlnConfig(manager = manager, index = MembershipIndex(3)) - # set up three nodes - # node1 - (await node1.mountRelay()).isOkOr: - assert false, "Failed to mount relay" + await node3.mountRlnRelay(wakuRlnConfig3) + await node3.start() - # mount rlnrelay in off-chain mode - let wakuRlnConfig1 = getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) - - await node1.mountRlnRelay(wakuRlnConfig1) - await node1.start() - - let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager) - let idCredentials1 = generateCredentials() - - try: - waitFor manager1.register(idCredentials1, UserMessageLimit(20)) - except Exception, CatchableError: - assert false, - "exception raised when calling register: " & getCurrentExceptionMsg() - - let rootUpdated1 = waitFor manager1.updateRoots() - info "Updated root for node1", rootUpdated1 - - # node 2 - (await node2.mountRelay()).isOkOr: - assert false, "Failed to mount relay" - # mount rlnrelay in off-chain mode - let wakuRlnConfig2 = getWakuRlnConfig(manager = manager, index = MembershipIndex(2)) - - await node2.mountRlnRelay(wakuRlnConfig2) - await node2.start() - - let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager) - let rootUpdated2 = waitFor manager2.updateRoots() - info "Updated root for node2", rootUpdated2 - - # node 3 - (await node3.mountRelay()).isOkOr: - assert false, "Failed to mount relay" - - let wakuRlnConfig3 = getWakuRlnConfig(manager = manager, index = MembershipIndex(3)) - - await node3.mountRlnRelay(wakuRlnConfig3) - await node3.start() - - let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager) - let rootUpdated3 = waitFor manager3.updateRoots() - info "Updated root for node3", rootUpdated3 + let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager) + let rootUpdated3 = waitFor manager3.updateRoots() + info "Updated root for node3", rootUpdated3 # connect them together await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) @@ -390,72 +428,70 @@ procSuite "WakuNode - RLN relay": await node3.stop() asyncTest "testing rln-relay double-signaling detection": - let + var node1, node2, node3: WakuNode + let contentTopic = ContentTopic("/waku/2/default-content/proto") + lockNewGlobalBrokerContext: # publisher node - nodeKey1 = generateSecp256k1Key() + let nodeKey1 = generateSecp256k1Key() node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(0)) + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + # mount rlnrelay in off-chain mode + let wakuRlnConfig1 = + getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) + + await node1.mountRlnRelay(wakuRlnConfig1) + await node1.start() + + # Registration is mandatory before sending messages with rln-relay + let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager) + let idCredentials1 = generateCredentials() + + try: + waitFor manager1.register(idCredentials1, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, + "exception raised when calling register: " & getCurrentExceptionMsg() + + let rootUpdated1 = waitFor manager1.updateRoots() + info "Updated root for node1", rootUpdated1 + lockNewGlobalBrokerContext: # Relay node - nodeKey2 = generateSecp256k1Key() + let nodeKey2 = generateSecp256k1Key() node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(0)) + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + # mount rlnrelay in off-chain mode + let wakuRlnConfig2 = + getWakuRlnConfig(manager = manager, index = MembershipIndex(2)) + + await node2.mountRlnRelay(wakuRlnConfig2) + await node2.start() + + # Registration is mandatory before sending messages with rln-relay + let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager) + let rootUpdated2 = waitFor manager2.updateRoots() + info "Updated root for node2", rootUpdated2 + lockNewGlobalBrokerContext: # Subscriber - nodeKey3 = generateSecp256k1Key() + let nodeKey3 = generateSecp256k1Key() node3 = newTestWakuNode(nodeKey3, parseIpAddress("0.0.0.0"), Port(0)) + (await node3.mountRelay()).isOkOr: + assert false, "Failed to mount relay" - contentTopic = ContentTopic("/waku/2/default-content/proto") + # mount rlnrelay in off-chain mode + let wakuRlnConfig3 = + getWakuRlnConfig(manager = manager, index = MembershipIndex(3)) - # set up three nodes - # node1 - (await node1.mountRelay()).isOkOr: - assert false, "Failed to mount relay" + await node3.mountRlnRelay(wakuRlnConfig3) + await node3.start() - # mount rlnrelay in off-chain mode - let wakuRlnConfig1 = getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) - - await node1.mountRlnRelay(wakuRlnConfig1) - await node1.start() - - # Registration is mandatory before sending messages with rln-relay - let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager) - let idCredentials1 = generateCredentials() - - try: - waitFor manager1.register(idCredentials1, UserMessageLimit(20)) - except Exception, CatchableError: - assert false, - "exception raised when calling register: " & getCurrentExceptionMsg() - - let rootUpdated1 = waitFor manager1.updateRoots() - info "Updated root for node1", rootUpdated1 - - # node 2 - (await node2.mountRelay()).isOkOr: - assert false, "Failed to mount relay" - - # mount rlnrelay in off-chain mode - let wakuRlnConfig2 = getWakuRlnConfig(manager = manager, index = MembershipIndex(2)) - - await node2.mountRlnRelay(wakuRlnConfig2) - await node2.start() - - # Registration is mandatory before sending messages with rln-relay - let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager) - let rootUpdated2 = waitFor manager2.updateRoots() - info "Updated root for node2", rootUpdated2 - - # node 3 - (await node3.mountRelay()).isOkOr: - assert false, "Failed to mount relay" - - # mount rlnrelay in off-chain mode - let wakuRlnConfig3 = getWakuRlnConfig(manager = manager, index = MembershipIndex(3)) - - await node3.mountRlnRelay(wakuRlnConfig3) - await node3.start() - - # Registration is mandatory before sending messages with rln-relay - let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager) - let rootUpdated3 = waitFor manager3.updateRoots() - info "Updated root for node3", rootUpdated3 + # Registration is mandatory before sending messages with rln-relay + let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager) + let rootUpdated3 = waitFor manager3.updateRoots() + info "Updated root for node3", rootUpdated3 # connect the nodes together node1 <-> node2 <-> node3 await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) @@ -565,49 +601,49 @@ procSuite "WakuNode - RLN relay": xasyncTest "clearNullifierLog: should clear epochs > MaxEpochGap": ## This is skipped because is flaky and made CI randomly fail but is useful to run manually # Given two nodes + var node1, node2: WakuNode let contentTopic = ContentTopic("/waku/2/default-content/proto") shardSeq = @[DefaultRelayShard] - nodeKey1 = generateSecp256k1Key() - node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(0)) - nodeKey2 = generateSecp256k1Key() - node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(0)) epochSizeSec: uint64 = 5 # This means rlnMaxEpochGap = 4 + lockNewGlobalBrokerContext: + let nodeKey1 = generateSecp256k1Key() + node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(0)) + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + let wakuRlnConfig1 = + getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) + await node1.mountRlnRelay(wakuRlnConfig1) + await node1.start() - # Given both nodes mount relay and rlnrelay - (await node1.mountRelay()).isOkOr: - assert false, "Failed to mount relay" - let wakuRlnConfig1 = getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) - await node1.mountRlnRelay(wakuRlnConfig1) - await node1.start() + # Registration is mandatory before sending messages with rln-relay + let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager) + let idCredentials1 = generateCredentials() - # Registration is mandatory before sending messages with rln-relay - let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager) - let idCredentials1 = generateCredentials() + try: + waitFor manager1.register(idCredentials1, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, + "exception raised when calling register: " & getCurrentExceptionMsg() - try: - waitFor manager1.register(idCredentials1, UserMessageLimit(20)) - except Exception, CatchableError: - assert false, - "exception raised when calling register: " & getCurrentExceptionMsg() + let rootUpdated1 = waitFor manager1.updateRoots() + info "Updated root for node1", rootUpdated1 + lockNewGlobalBrokerContext: + let nodeKey2 = generateSecp256k1Key() + node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(0)) + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + let wakuRlnConfig2 = + getWakuRlnConfig(manager = manager, index = MembershipIndex(2)) + await node2.mountRlnRelay(wakuRlnConfig2) + await node2.start() - let rootUpdated1 = waitFor manager1.updateRoots() - info "Updated root for node1", rootUpdated1 - - # Mount rlnrelay in node2 in off-chain mode - (await node2.mountRelay()).isOkOr: - assert false, "Failed to mount relay" - let wakuRlnConfig2 = getWakuRlnConfig(manager = manager, index = MembershipIndex(2)) - await node2.mountRlnRelay(wakuRlnConfig2) - await node2.start() - - # Registration is mandatory before sending messages with rln-relay - let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager) - let rootUpdated2 = waitFor manager2.updateRoots() - info "Updated root for node2", rootUpdated2 + # Registration is mandatory before sending messages with rln-relay + let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager) + let rootUpdated2 = waitFor manager2.updateRoots() + info "Updated root for node2", rootUpdated2 # Given the two nodes are started and connected - waitFor allFutures(node1.start(), node2.start()) await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) # Given some messages diff --git a/tests/waku_rln_relay/utils.nim b/tests/waku_rln_relay/utils.nim index a4247ab44..8aed18f9b 100644 --- a/tests/waku_rln_relay/utils.nim +++ b/tests/waku_rln_relay/utils.nim @@ -24,7 +24,6 @@ proc deployContract*( tr.`from` = Opt.some(web3.defaultAccount) let sData = code & contractInput tr.data = Opt.some(hexToSeqByte(sData)) - tr.gas = Opt.some(Quantity(3000000000000)) if gasPrice != 0: tr.gasPrice = Opt.some(gasPrice.Quantity) diff --git a/tests/waku_rln_relay/utils_onchain.nim b/tests/waku_rln_relay/utils_onchain.nim index 85f627aa0..9f1048097 100644 --- a/tests/waku_rln_relay/utils_onchain.nim +++ b/tests/waku_rln_relay/utils_onchain.nim @@ -3,7 +3,7 @@ {.push raises: [].} import - std/[options, os, osproc, deques, streams, strutils, tempfiles, strformat], + std/[options, os, osproc, streams, strutils, strformat], results, stew/byteutils, testutils/unittests, @@ -14,7 +14,6 @@ import web3/conversions, web3/eth_api_types, json_rpc/rpcclient, - json, libp2p/crypto/crypto, eth/keys, results @@ -24,25 +23,19 @@ import waku_rln_relay, waku_rln_relay/protocol_types, waku_rln_relay/constants, - waku_rln_relay/contract, waku_rln_relay/rln, ], - ../testlib/common, - ./utils + ../testlib/common const CHAIN_ID* = 1234'u256 -template skip0xPrefix(hexStr: string): int = - ## Returns the index of the first meaningful char in `hexStr` by skipping - ## "0x" prefix - if hexStr.len > 1 and hexStr[0] == '0' and hexStr[1] in {'x', 'X'}: 2 else: 0 - -func strip0xPrefix(s: string): string = - let prefixLen = skip0xPrefix(s) - if prefixLen != 0: - s[prefixLen .. ^1] - else: - s +# Path to the file which Anvil loads at startup to initialize the chain with pre-deployed contracts, an account funded with tokens and approved for spending +const DEFAULT_ANVIL_STATE_PATH* = + "tests/waku_rln_relay/anvil_state/state-deployed-contracts-mint-and-approved.json.gz" +# The contract address of the TestStableToken used for the RLN Membership registration fee +const TOKEN_ADDRESS* = "0x5FbDB2315678afecb367f032d93F642f64180aa3" +# The contract address used ti interact with the WakuRLNV2 contract via the proxy +const WAKU_RLNV2_PROXY_ADDRESS* = "0x5fc8d32690cc91d4c39d9d3abcbd16989f875707" proc generateCredentials*(): IdentityCredential = let credRes = membershipKeyGen() @@ -82,6 +75,10 @@ proc getForgePath(): string = forgePath = joinPath(forgePath, ".foundry/bin/forge") return $forgePath +template execForge(cmd: string): tuple[output: string, exitCode: int] = + # unset env vars that affect e.g. "forge script" before running forge + execCmdEx("unset ETH_FROM ETH_PASSWORD && " & cmd) + contract(ERC20Token): proc allowance(owner: Address, spender: Address): UInt256 {.view.} proc balanceOf(account: Address): UInt256 {.view.} @@ -102,7 +99,7 @@ proc sendMintCall( recipientAddress: Address, amountTokens: UInt256, recipientBalanceBeforeExpectedTokens: Option[UInt256] = none(UInt256), -): Future[TxHash] {.async.} = +): Future[void] {.async.} = let doBalanceAssert = recipientBalanceBeforeExpectedTokens.isSome() if doBalanceAssert: @@ -138,7 +135,7 @@ proc sendMintCall( tx.data = Opt.some(byteutils.hexToSeqByte(mintCallData)) trace "Sending mint call" - let txHash = await web3.send(tx) + discard await web3.send(tx) let balanceOfSelector = "0x70a08231" let balanceCallData = balanceOfSelector & paddedAddress @@ -153,8 +150,6 @@ proc sendMintCall( assert balanceAfterMint == balanceAfterExpectedTokens, fmt"Balance is {balanceAfterMint} after transfer but expected {balanceAfterExpectedTokens}" - return txHash - # Check how many tokens a spender (the RLN contract) is allowed to spend on behalf of the owner (account which wishes to register a membership) proc checkTokenAllowance( web3: Web3, tokenAddress: Address, owner: Address, spender: Address @@ -225,11 +220,14 @@ proc deployTestToken*( # Deploy TestToken contract let forgeCmdTestToken = fmt"""cd {submodulePath} && {forgePath} script test/TestToken.sol --broadcast -vvv --rpc-url http://localhost:8540 --tc TestTokenFactory --private-key {pk} && rm -rf broadcast/*/*/run-1*.json && rm -rf cache/*/*/run-1*.json""" - let (outputDeployTestToken, exitCodeDeployTestToken) = execCmdEx(forgeCmdTestToken) + let (outputDeployTestToken, exitCodeDeployTestToken) = execForge(forgeCmdTestToken) trace "Executed forge command to deploy TestToken contract", output = outputDeployTestToken if exitCodeDeployTestToken != 0: - return error("Forge command to deploy TestToken contract failed") + error "Forge command to deploy TestToken contract failed", + error = outputDeployTestToken + return + err("Forge command to deploy TestToken contract failed: " & outputDeployTestToken) # Parse the command output to find contract address let testTokenAddress = getContractAddressFromDeployScriptOutput(outputDeployTestToken).valueOr: @@ -351,7 +349,7 @@ proc executeForgeContractDeployScripts*( let forgeCmdPriceCalculator = fmt"""cd {submodulePath} && {forgePath} script script/Deploy.s.sol --broadcast -vvvv --rpc-url http://localhost:8540 --tc DeployPriceCalculator --private-key {privateKey} && rm -rf broadcast/*/*/run-1*.json && rm -rf cache/*/*/run-1*.json""" let (outputDeployPriceCalculator, exitCodeDeployPriceCalculator) = - execCmdEx(forgeCmdPriceCalculator) + execForge(forgeCmdPriceCalculator) trace "Executed forge command to deploy LinearPriceCalculator contract", output = outputDeployPriceCalculator if exitCodeDeployPriceCalculator != 0: @@ -368,7 +366,7 @@ proc executeForgeContractDeployScripts*( let forgeCmdWakuRln = fmt"""cd {submodulePath} && {forgePath} script script/Deploy.s.sol --broadcast -vvvv --rpc-url http://localhost:8540 --tc DeployWakuRlnV2 --private-key {privateKey} && rm -rf broadcast/*/*/run-1*.json && rm -rf cache/*/*/run-1*.json""" - let (outputDeployWakuRln, exitCodeDeployWakuRln) = execCmdEx(forgeCmdWakuRln) + let (outputDeployWakuRln, exitCodeDeployWakuRln) = execForge(forgeCmdWakuRln) trace "Executed forge command to deploy WakuRlnV2 contract", output = outputDeployWakuRln if exitCodeDeployWakuRln != 0: @@ -388,7 +386,7 @@ proc executeForgeContractDeployScripts*( # Deploy Proxy contract let forgeCmdProxy = fmt"""cd {submodulePath} && {forgePath} script script/Deploy.s.sol --broadcast -vvvv --rpc-url http://localhost:8540 --tc DeployProxy --private-key {privateKey} && rm -rf broadcast/*/*/run-1*.json && rm -rf cache/*/*/run-1*.json""" - let (outputDeployProxy, exitCodeDeployProxy) = execCmdEx(forgeCmdProxy) + let (outputDeployProxy, exitCodeDeployProxy) = execForge(forgeCmdProxy) trace "Executed forge command to deploy proxy contract", output = outputDeployProxy if exitCodeDeployProxy != 0: error "Forge command to deploy Proxy failed", error = outputDeployProxy @@ -480,31 +478,126 @@ proc getAnvilPath*(): string = anvilPath = joinPath(anvilPath, ".foundry/bin/anvil") return $anvilPath +proc decompressGzipFile*( + compressedPath: string, targetPath: string +): Result[void, string] = + ## Decompress a gzipped file using the gunzip command-line utility + let cmd = fmt"gunzip -c {compressedPath} > {targetPath}" + + try: + let (output, exitCode) = execCmdEx(cmd) + if exitCode != 0: + return err( + "Failed to decompress '" & compressedPath & "' to '" & targetPath & "': " & + output + ) + except OSError as e: + return err("Failed to execute gunzip command: " & e.msg) + except IOError as e: + return err("Failed to execute gunzip command: " & e.msg) + + ok() + +proc compressGzipFile*(sourcePath: string, targetPath: string): Result[void, string] = + ## Compress a file with gzip using the gzip command-line utility + let cmd = fmt"gzip -c {sourcePath} > {targetPath}" + + try: + let (output, exitCode) = execCmdEx(cmd) + if exitCode != 0: + return err( + "Failed to compress '" & sourcePath & "' to '" & targetPath & "': " & output + ) + except OSError as e: + return err("Failed to execute gzip command: " & e.msg) + except IOError as e: + return err("Failed to execute gzip command: " & e.msg) + + ok() + # Runs Anvil daemon -proc runAnvil*(port: int = 8540, chainId: string = "1234"): Process = +proc runAnvil*( + port: int = 8540, + chainId: string = "1234", + stateFile: Option[string] = none(string), + dumpStateOnExit: bool = false, +): Process = # Passed options are # --port Port to listen on. # --gas-limit Sets the block gas limit in WEI. # --balance The default account balance, specified in ether. # --chain-id Chain ID of the network. + # --load-state Initialize the chain from a previously saved state snapshot (read-only) + # --dump-state Dump the state on exit to the given file (write-only) + # Values used are representative of Linea Sepolia testnet # See anvil documentation https://book.getfoundry.sh/reference/anvil/ for more details try: let anvilPath = getAnvilPath() info "Anvil path", anvilPath - let runAnvil = startProcess( - anvilPath, - args = [ + + var args = + @[ "--port", $port, "--gas-limit", - "300000000000000", + "30000000", + "--gas-price", + "7", + "--base-fee", + "7", "--balance", - "1000000000", + "10000000000", "--chain-id", $chainId, - ], - options = {poUsePath}, - ) + "--disable-min-priority-fee", + ] + + # Add state file argument if provided + if stateFile.isSome(): + var statePath = stateFile.get() + info "State file parameter provided", + statePath = statePath, + dumpStateOnExit = dumpStateOnExit, + absolutePath = absolutePath(statePath) + + # Check if the file is gzip compressed and handle decompression + if statePath.endsWith(".gz"): + let decompressedPath = statePath[0 .. ^4] # Remove .gz extension + debug "Gzip compressed state file detected", + compressedPath = statePath, decompressedPath = decompressedPath + + if not fileExists(decompressedPath): + decompressGzipFile(statePath, decompressedPath).isOkOr: + error "Failed to decompress state file", error = error + return nil + + statePath = decompressedPath + + if dumpStateOnExit: + # Ensure the directory exists + let stateDir = parentDir(statePath) + if not dirExists(stateDir): + createDir(stateDir) + # Fresh deployment: start clean and dump state on exit + args.add("--dump-state") + args.add(statePath) + debug "Anvil configured to dump state on exit", path = statePath + else: + # Using cache: only load state, don't overwrite it (preserves clean cached state) + if fileExists(statePath): + args.add("--load-state") + args.add(statePath) + debug "Anvil configured to load state file (read-only)", path = statePath + else: + warn "State file does not exist, anvil will start fresh", + path = statePath, absolutePath = absolutePath(statePath) + else: + info "No state file provided, anvil will start fresh without state persistence" + + info "Starting anvil with arguments", args = args.join(" ") + + let runAnvil = + startProcess(anvilPath, args = args, options = {poUsePath, poStdErrToStdOut}) let anvilPID = runAnvil.processID # We read stdout from Anvil to see when daemon is ready @@ -516,7 +609,13 @@ proc runAnvil*(port: int = 8540, chainId: string = "1234"): Process = anvilStartLog.add(cmdline) if cmdline.contains("Listening on 127.0.0.1:" & $port): break + else: + error "Anvil daemon exited (closed output)", + pid = anvilPID, startLog = anvilStartLog + return except Exception, CatchableError: + warn "Anvil daemon stdout reading error; assuming it started OK", + pid = anvilPID, startLog = anvilStartLog, err = getCurrentExceptionMsg() break info "Anvil daemon is running and ready", pid = anvilPID, startLog = anvilStartLog return runAnvil @@ -536,7 +635,14 @@ proc stopAnvil*(runAnvil: Process) {.used.} = # Send termination signals when not defined(windows): discard execCmdEx(fmt"kill -TERM {anvilPID}") - discard execCmdEx(fmt"kill -9 {anvilPID}") + # Wait for graceful shutdown to allow state dumping + sleep(200) + # Only force kill if process is still running + let checkResult = execCmdEx(fmt"kill -0 {anvilPID} 2>/dev/null") + if checkResult.exitCode == 0: + info "Anvil process still running after TERM signal, sending KILL", + anvilPID = anvilPID + discard execCmdEx(fmt"kill -9 {anvilPID}") else: discard execCmdEx(fmt"taskkill /F /PID {anvilPID}") @@ -547,52 +653,100 @@ proc stopAnvil*(runAnvil: Process) {.used.} = info "Error stopping Anvil daemon", anvilPID = anvilPID, error = e.msg proc setupOnchainGroupManager*( - ethClientUrl: string = EthClient, amountEth: UInt256 = 10.u256 + ethClientUrl: string = EthClient, + amountEth: UInt256 = 10.u256, + deployContracts: bool = true, ): Future[OnchainGroupManager] {.async.} = + ## Setup an onchain group manager for testing + ## If deployContracts is false, it will assume that the Anvil testnet already has the required contracts deployed, this significantly speeds up test runs. + ## To run Anvil with a cached state file containing pre-deployed contracts, see runAnvil documentation. + ## + ## To generate/update the cached state file: + ## 1. Call runAnvil with stateFile and dumpStateOnExit=true + ## 2. Run setupOnchainGroupManager with deployContracts=true to deploy contracts + ## 3. The state will be saved to the specified file when anvil exits + ## 4. Commit this file to git + ## + ## To use cached state: + ## 1. Call runAnvil with stateFile and dumpStateOnExit=false + ## 2. Anvil loads state in read-only mode (won't overwrite the cached file) + ## 3. Call setupOnchainGroupManager with deployContracts=false + ## 4. Tests run fast using pre-deployed contracts let rlnInstanceRes = createRlnInstance() check: rlnInstanceRes.isOk() let rlnInstance = rlnInstanceRes.get() - # connect to the eth client let web3 = await newWeb3(ethClientUrl) let accounts = await web3.provider.eth_accounts() web3.defaultAccount = accounts[1] - let (privateKey, acc) = createEthAccount(web3) + var privateKey: keys.PrivateKey + var acc: Address + var testTokenAddress: Address + var contractAddress: Address - # we just need to fund the default account - # the send procedure returns a tx hash that we don't use, hence discard - discard await sendEthTransfer( - web3, web3.defaultAccount, acc, ethToWei(1000.u256), some(0.u256) - ) + if not deployContracts: + info "Using contract addresses from constants" - let testTokenAddress = (await deployTestToken(privateKey, acc, web3)).valueOr: - assert false, "Failed to deploy test token contract: " & $error - return + testTokenAddress = Address(hexToByteArray[20](TOKEN_ADDRESS)) + contractAddress = Address(hexToByteArray[20](WAKU_RLNV2_PROXY_ADDRESS)) - # mint the token from the generated account - discard await sendMintCall( - web3, web3.defaultAccount, testTokenAddress, acc, ethToWei(1000.u256), some(0.u256) - ) + (privateKey, acc) = createEthAccount(web3) - let contractAddress = (await executeForgeContractDeployScripts(privateKey, acc, web3)).valueOr: - assert false, "Failed to deploy RLN contract: " & $error - return + # Fund the test account + discard await sendEthTransfer(web3, web3.defaultAccount, acc, ethToWei(1000.u256)) - # If the generated account wishes to register a membership, it needs to approve the contract to spend its tokens - let tokenApprovalResult = await approveTokenAllowanceAndVerify( - web3, - acc, - privateKey, - testTokenAddress, - contractAddress, - ethToWei(200.u256), - some(0.u256), - ) + # Mint tokens to the test account + await sendMintCall( + web3, web3.defaultAccount, testTokenAddress, acc, ethToWei(1000.u256) + ) - assert tokenApprovalResult.isOk, tokenApprovalResult.error() + # Approve the contract to spend tokens + let tokenApprovalResult = await approveTokenAllowanceAndVerify( + web3, acc, privateKey, testTokenAddress, contractAddress, ethToWei(200.u256) + ) + assert tokenApprovalResult.isOk(), tokenApprovalResult.error + else: + info "Performing Token and RLN contracts deployment" + (privateKey, acc) = createEthAccount(web3) + + # fund the default account + discard await sendEthTransfer( + web3, web3.defaultAccount, acc, ethToWei(1000.u256), some(0.u256) + ) + + testTokenAddress = (await deployTestToken(privateKey, acc, web3)).valueOr: + assert false, "Failed to deploy test token contract: " & $error + return + + # mint the token from the generated account + await sendMintCall( + web3, + web3.defaultAccount, + testTokenAddress, + acc, + ethToWei(1000.u256), + some(0.u256), + ) + + contractAddress = (await executeForgeContractDeployScripts(privateKey, acc, web3)).valueOr: + assert false, "Failed to deploy RLN contract: " & $error + return + + # If the generated account wishes to register a membership, it needs to approve the contract to spend its tokens + let tokenApprovalResult = await approveTokenAllowanceAndVerify( + web3, + acc, + privateKey, + testTokenAddress, + contractAddress, + ethToWei(200.u256), + some(0.u256), + ) + + assert tokenApprovalResult.isOk(), tokenApprovalResult.error let manager = OnchainGroupManager( ethClientUrls: @[ethClientUrl], diff --git a/tests/waku_store/test_wakunode_store.nim b/tests/waku_store/test_wakunode_store.nim index b20309079..9239435af 100644 --- a/tests/waku_store/test_wakunode_store.nim +++ b/tests/waku_store/test_wakunode_store.nim @@ -374,6 +374,12 @@ procSuite "WakuNode - Store": waitFor allFutures(client.stop(), server.stop()) test "Store protocol queries overrun request rate limitation": + when defined(macosx): + # on macos CI, this test is resulting a code 200 (OK) instead of a 429 error + # means the runner is somehow too slow to cause a request limit failure + skip() + return + ## Setup let serverKey = generateSecp256k1Key() @@ -386,7 +392,7 @@ procSuite "WakuNode - Store": let mountArchiveRes = server.mountArchive(archiveA) assert mountArchiveRes.isOk(), mountArchiveRes.error - waitFor server.mountStore((3, 500.millis)) + waitFor server.mountStore((3, 200.millis)) client.mountStoreClient() @@ -413,11 +419,11 @@ procSuite "WakuNode - Store": for count in 0 ..< 3: waitFor successProc() - waitFor sleepAsync(20.millis) + waitFor sleepAsync(1.millis) waitFor failsProc() - waitFor sleepAsync(500.millis) + waitFor sleepAsync(200.millis) for count in 0 ..< 3: waitFor successProc() diff --git a/tests/wakunode2/test_app.nim b/tests/wakunode2/test_app.nim index b16880787..6ec6043fe 100644 --- a/tests/wakunode2/test_app.nim +++ b/tests/wakunode2/test_app.nim @@ -21,7 +21,7 @@ suite "Wakunode2 - Waku": raiseAssert error ## When - let version = waku.version + let version = waku.stateInfo.getNodeInfoItem(NodeInfoId.Version) ## Then check: @@ -60,7 +60,8 @@ suite "Wakunode2 - Waku initialization": not node.wakuRendezvous.isNil() ## Cleanup - waitFor waku.stop() + (waitFor waku.stop()).isOkOr: + raiseAssert error test "app properly handles dynamic port configuration": ## Given @@ -96,4 +97,5 @@ suite "Wakunode2 - Waku initialization": typedNodeEnr.get().tcp.get() != 0 ## Cleanup - waitFor waku.stop() + (waitFor waku.stop()).isOkOr: + raiseAssert error diff --git a/tests/wakunode_rest/test_rest_admin.nim b/tests/wakunode_rest/test_rest_admin.nim index 6de886f74..ef82b8dfc 100644 --- a/tests/wakunode_rest/test_rest_admin.nim +++ b/tests/wakunode_rest/test_rest_admin.nim @@ -65,7 +65,7 @@ suite "Waku v2 Rest API - Admin": ): Future[void] {.async, gcsafe.} = await sleepAsync(0.milliseconds) - let shard = RelayShard(clusterId: clusterId, shardId: 0) + let shard = RelayShard(clusterId: clusterId, shardId: 5) node1.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr: assert false, "Failed to subscribe to topic: " & $error node2.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr: @@ -212,6 +212,18 @@ suite "Waku v2 Rest API - Admin": let conn2 = await node1.peerManager.connectPeer(peerInfo2) let conn3 = await node1.peerManager.connectPeer(peerInfo3) + var count = 0 + while count < 20: + ## Wait ~1s at most for the peer store to update shard info + let getRes = await client.getPeers() + if getRes.data.allIt(it.shards == @[5.uint16]): + break + + count.inc() + await sleepAsync(50.milliseconds) + + assert count < 20, "Timeout waiting for shards to be updated in peer store" + # Check successful connections check: conn2 == true diff --git a/tests/wakunode_rest/test_rest_health.nim b/tests/wakunode_rest/test_rest_health.nim index dacfd801e..37abaf4f5 100644 --- a/tests/wakunode_rest/test_rest_health.nim +++ b/tests/wakunode_rest/test_rest_health.nim @@ -10,6 +10,7 @@ import libp2p/crypto/crypto import waku/[ + common/waku_protocol, waku_node, node/waku_node as waku_node2, # TODO: Remove after moving `git_version` to the app code. @@ -41,8 +42,8 @@ suite "Waku v2 REST API - health": var manager {.threadVar.}: OnchainGroupManager setup: - anvilProc = runAnvil() - manager = waitFor setupOnchainGroupManager() + anvilProc = runAnvil(stateFile = some(DEFAULT_ANVIL_STATE_PATH)) + manager = waitFor setupOnchainGroupManager(deployContracts = false) teardown: stopAnvil(anvilProc) @@ -50,33 +51,22 @@ suite "Waku v2 REST API - health": asyncTest "Get node health info - GET /health": # Given let node = testWakuNode() - let healthMonitor = NodeHealthMonitor() await node.start() (await node.mountRelay()).isOkOr: assert false, "Failed to mount relay" - healthMonitor.setOverallHealth(HealthStatus.INITIALIZING) - var restPort = Port(0) let restAddress = parseIpAddress("0.0.0.0") let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() restPort = restServer.httpServer.address.port # update with bound port for client use + let healthMonitor = NodeHealthMonitor.new(node) + installHealthApiHandler(restServer.router, healthMonitor) restServer.start() let client = newRestHttpClient(initTAddress(restAddress, restPort)) - # When - var response = await client.healthCheck() - - # Then - check: - response.status == 200 - $response.contentType == $MIMETYPE_JSON - response.data == - HealthReport(nodeHealth: HealthStatus.INITIALIZING, protocolsHealth: @[]) - - # now kick in rln (currently the only check for health) + # kick in rln (currently the only check for health) await node.mountRlnRelay( getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) ) @@ -84,51 +74,44 @@ suite "Waku v2 REST API - health": node.mountLightPushClient() await node.mountFilterClient() - healthMonitor.setNodeToHealthMonitor(node) + # We don't have a Waku, so we need to set the overall health to READY here in its behalf healthMonitor.setOverallHealth(HealthStatus.READY) + # When - response = await client.healthCheck() + var response = await client.healthCheck() + let report = response.data # Then check: response.status == 200 $response.contentType == $MIMETYPE_JSON - response.data.nodeHealth == HealthStatus.READY - response.data.protocolsHealth.len() == 15 - response.data.protocolsHealth[0].protocol == "Relay" - response.data.protocolsHealth[0].health == HealthStatus.NOT_READY - response.data.protocolsHealth[0].desc == some("No connected peers") - response.data.protocolsHealth[1].protocol == "Rln Relay" - response.data.protocolsHealth[1].health == HealthStatus.READY - response.data.protocolsHealth[2].protocol == "Lightpush" - response.data.protocolsHealth[2].health == HealthStatus.NOT_MOUNTED - response.data.protocolsHealth[3].protocol == "Legacy Lightpush" - response.data.protocolsHealth[3].health == HealthStatus.NOT_MOUNTED - response.data.protocolsHealth[4].protocol == "Filter" - response.data.protocolsHealth[4].health == HealthStatus.NOT_MOUNTED - response.data.protocolsHealth[5].protocol == "Store" - response.data.protocolsHealth[5].health == HealthStatus.NOT_MOUNTED - response.data.protocolsHealth[6].protocol == "Legacy Store" - response.data.protocolsHealth[6].health == HealthStatus.NOT_MOUNTED - response.data.protocolsHealth[7].protocol == "Peer Exchange" - response.data.protocolsHealth[7].health == HealthStatus.NOT_MOUNTED - response.data.protocolsHealth[8].protocol == "Rendezvous" - response.data.protocolsHealth[8].health == HealthStatus.NOT_MOUNTED - response.data.protocolsHealth[9].protocol == "Mix" - response.data.protocolsHealth[9].health == HealthStatus.NOT_MOUNTED - response.data.protocolsHealth[10].protocol == "Lightpush Client" - response.data.protocolsHealth[10].health == HealthStatus.NOT_READY - response.data.protocolsHealth[10].desc == + report.nodeHealth == HealthStatus.READY + report.protocolsHealth.len() == 15 + + report.getHealth(RelayProtocol).health == HealthStatus.NOT_READY + report.getHealth(RelayProtocol).desc == some("No connected peers") + + report.getHealth(RlnRelayProtocol).health == HealthStatus.READY + + report.getHealth(LightpushProtocol).health == HealthStatus.NOT_MOUNTED + report.getHealth(LegacyLightpushProtocol).health == HealthStatus.NOT_MOUNTED + report.getHealth(FilterProtocol).health == HealthStatus.NOT_MOUNTED + report.getHealth(StoreProtocol).health == HealthStatus.NOT_MOUNTED + report.getHealth(LegacyStoreProtocol).health == HealthStatus.NOT_MOUNTED + report.getHealth(PeerExchangeProtocol).health == HealthStatus.NOT_MOUNTED + report.getHealth(RendezvousProtocol).health == HealthStatus.NOT_MOUNTED + report.getHealth(MixProtocol).health == HealthStatus.NOT_MOUNTED + + report.getHealth(LightpushClientProtocol).health == HealthStatus.NOT_READY + report.getHealth(LightpushClientProtocol).desc == some("No Lightpush service peer available yet") - response.data.protocolsHealth[11].protocol == "Legacy Lightpush Client" - response.data.protocolsHealth[11].health == HealthStatus.NOT_MOUNTED - response.data.protocolsHealth[12].protocol == "Store Client" - response.data.protocolsHealth[12].health == HealthStatus.NOT_MOUNTED - response.data.protocolsHealth[13].protocol == "Legacy Store Client" - response.data.protocolsHealth[13].health == HealthStatus.NOT_MOUNTED - response.data.protocolsHealth[14].protocol == "Filter Client" - response.data.protocolsHealth[14].health == HealthStatus.NOT_READY - response.data.protocolsHealth[14].desc == + + report.getHealth(LegacyLightpushClientProtocol).health == HealthStatus.NOT_MOUNTED + report.getHealth(StoreClientProtocol).health == HealthStatus.NOT_MOUNTED + report.getHealth(LegacyStoreClientProtocol).health == HealthStatus.NOT_MOUNTED + + report.getHealth(FilterClientProtocol).health == HealthStatus.NOT_READY + report.getHealth(FilterClientProtocol).desc == some("No Filter service peer available yet") await restServer.stop() diff --git a/tests/wakunode_rest/test_rest_lightpush.nim b/tests/wakunode_rest/test_rest_lightpush.nim index cc5c715b8..deba7de22 100644 --- a/tests/wakunode_rest/test_rest_lightpush.nim +++ b/tests/wakunode_rest/test_rest_lightpush.nim @@ -61,7 +61,7 @@ proc init( assert false, "Failed to mount relay: " & $error (await testSetup.serviceNode.mountRelay()).isOkOr: assert false, "Failed to mount relay: " & $error - await testSetup.serviceNode.mountLightPush(rateLimit) + check (await testSetup.serviceNode.mountLightPush(rateLimit)).isOk() testSetup.pushNode.mountLightPushClient() testSetup.serviceNode.peerManager.addServicePeer( diff --git a/tests/wakunode_rest/test_rest_lightpush_legacy.nim b/tests/wakunode_rest/test_rest_lightpush_legacy.nim index 526a6c24e..4043eeed9 100644 --- a/tests/wakunode_rest/test_rest_lightpush_legacy.nim +++ b/tests/wakunode_rest/test_rest_lightpush_legacy.nim @@ -61,7 +61,7 @@ proc init( assert false, "Failed to mount relay" (await testSetup.serviceNode.mountRelay()).isOkOr: assert false, "Failed to mount relay" - await testSetup.serviceNode.mountLegacyLightPush(rateLimit) + check (await testSetup.serviceNode.mountLegacyLightPush(rateLimit)).isOk() testSetup.pushNode.mountLegacyLightPushClient() testSetup.serviceNode.peerManager.addServicePeer( diff --git a/tests/wakunode_rest/test_rest_relay.nim b/tests/wakunode_rest/test_rest_relay.nim index ca9f7cb17..f16e5c4f4 100644 --- a/tests/wakunode_rest/test_rest_relay.nim +++ b/tests/wakunode_rest/test_rest_relay.nim @@ -21,6 +21,7 @@ import rest_api/endpoint/relay/client as relay_rest_client, waku_relay, waku_rln_relay, + common/broker/broker_context, ], ../testlib/wakucore, ../testlib/wakunode, @@ -505,15 +506,41 @@ suite "Waku v2 Rest API - Relay": asyncTest "Post a message to a content topic - POST /relay/v1/auto/messages/{topic}": ## "Relay API: publish and subscribe/unsubscribe": # Given - let node = testWakuNode() - (await node.mountRelay()).isOkOr: - assert false, "Failed to mount relay" - require node.mountAutoSharding(1, 8).isOk + var meshNode: WakuNode + lockNewGlobalBrokerContext: + meshNode = testWakuNode() + (await meshNode.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + require meshNode.mountAutoSharding(1, 8).isOk - let wakuRlnConfig = getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) + let wakuRlnConfig = + getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) + + await meshNode.mountRlnRelay(wakuRlnConfig) + await meshNode.start() + const testPubsubTopic = PubsubTopic("/waku/2/rs/1/0") + proc dummyHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + discard + + meshNode.subscribe((kind: ContentSub, topic: DefaultContentTopic), dummyHandler).isOkOr: + raiseAssert "Failed to subscribe meshNode: " & error + + var node: WakuNode + lockNewGlobalBrokerContext: + node = testWakuNode() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + require node.mountAutoSharding(1, 8).isOk + + let wakuRlnConfig = + getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) + + await node.mountRlnRelay(wakuRlnConfig) + await node.start() + await node.connectToNodes(@[meshNode.peerInfo.toRemotePeerInfo()]) - await node.mountRlnRelay(wakuRlnConfig) - await node.start() # Registration is mandatory before sending messages with rln-relay let manager = cast[OnchainGroupManager](node.wakuRlnRelay.groupManager) let idCredentials = generateCredentials() diff --git a/tools/confutils/cli_args.nim b/tools/confutils/cli_args.nim index e6b3fc97d..4a6e8c618 100644 --- a/tools/confutils/cli_args.nim +++ b/tools/confutils/cli_args.nim @@ -30,7 +30,8 @@ import waku_core/message/default_values, waku_mix, ], - ../../tools/rln_keystore_generator/rln_keystore_generator + ../../tools/rln_keystore_generator/rln_keystore_generator, + ./entry_nodes import ./envvar as confEnvvarDefs, ./envvar_net as confEnvvarNet @@ -52,6 +53,11 @@ type StartUpCommand* = enum noCommand # default, runs waku generateRlnKeystore # generates a new RLN keystore +type WakuMode* {.pure.} = enum + noMode # default - use explicit CLI flags as-is + Core # full service node + Edge # client-only node + type WakuNodeConf* = object configFile* {. desc: "Loads configuration from a TOML file (cmd-line parameters take precedence)", @@ -150,9 +156,16 @@ type WakuNodeConf* = object .}: seq[ProtectedShard] ## General node config + mode* {. + desc: + "Node operation mode. 'Core' enables relay+service protocols. 'Edge' enables client-only protocols. Default: explicit CLI flags used.", + defaultValue: WakuMode.noMode, + name: "mode" + .}: WakuMode + preset* {. desc: - "Network preset to use. 'twn' is The RLN-protected Waku Network (cluster 1). Overrides other values.", + "Network preset to use. 'twn' is The RLN-protected Waku Network (cluster 1). 'logos.dev' is the Logos Dev Network (cluster 2). Overrides other values.", defaultValue: "", name: "preset" .}: string @@ -165,7 +178,7 @@ type WakuNodeConf* = object .}: uint16 agentString* {. - defaultValue: "nwaku-" & cli_args.git_version, + defaultValue: "logos-delivery-" & cli_args.git_version, desc: "Node agent string which is used as identifier in network", name: "agent-string" .}: string @@ -206,22 +219,17 @@ type WakuNodeConf* = object .}: bool maxConnections* {. - desc: "Maximum allowed number of libp2p connections.", - defaultValue: 50, + desc: + "Maximum allowed number of libp2p connections. (Default: 150) that's recommended value for better connectivity", + defaultValue: 150, name: "max-connections" .}: int - maxRelayPeers* {. - desc: - "Deprecated. Use relay-service-ratio instead. It represents the maximum allowed number of relay peers.", - name: "max-relay-peers" - .}: Option[int] - relayServiceRatio* {. desc: "This percentage ratio represents the relay peers to service peers. For example, 60:40, tells that 60% of the max-connections will be used for relay protocol and the other 40% of max-connections will be reserved for other service protocols (e.g., filter, lightpush, store, metadata, etc.)", - name: "relay-service-ratio", - defaultValue: "60:40" # 60:40 ratio of relay to service peers + defaultValue: "50:50", + name: "relay-service-ratio" .}: string colocationLimit* {. @@ -298,6 +306,14 @@ hence would have reachability issues.""", name: "rln-relay-dynamic" .}: bool + entryNodes* {. + desc: + "Entry node address (enrtree:, enr:, or multiaddr). " & + "Automatically classified and distributed to DNS discovery, discv5 bootstrap, " & + "and static nodes. Argument may be repeated.", + name: "entry-node" + .}: seq[string] + staticnodes* {. desc: "Peer multiaddr to directly connect with. Argument may be repeated.", name: "staticnode" @@ -458,13 +474,15 @@ hence would have reachability issues.""", desc: """Adds an extra effort in the delivery/reception of messages by leveraging store-v3 requests. with the drawback of consuming some more bandwidth.""", - defaultValue: false, + defaultValue: true, name: "reliability" .}: bool ## REST HTTP config rest* {. - desc: "Enable Waku REST HTTP server: true|false", defaultValue: true, name: "rest" + desc: "Enable Waku REST HTTP server: true|false", + defaultValue: false, + name: "rest" .}: bool restAddress* {. @@ -626,6 +644,20 @@ with the drawback of consuming some more bandwidth.""", name: "mixnode" .}: seq[MixNodePubInfo] + # Kademlia Discovery config + enableKadDiscovery* {. + desc: + "Enable extended kademlia discovery. Can be enabled without bootstrap nodes for the first node in the network.", + defaultValue: false, + name: "enable-kad-discovery" + .}: bool + + kadBootstrapNodes* {. + desc: + "Peer multiaddr for kademlia discovery bootstrap node (must include /p2p/). Argument may be repeated.", + name: "kad-bootstrap-node" + .}: seq[string] + ## websocket config websocketSupport* {. desc: "Enable websocket: true|false", @@ -898,12 +930,19 @@ proc toNetworkConf( "TWN - The Waku Network configuration will not be applied when `--cluster-id=1` is passed in future releases. Use `--preset=twn` instead." ) lcPreset = "twn" + if clusterId.isSome() and clusterId.get() == 2: + warn( + "Logos.dev - Logos.dev configuration will not be applied when `--cluster-id=2` is passed in future releases. Use `--preset=logos.dev` instead." + ) + lcPreset = "logos.dev" case lcPreset of "": ok(none(NetworkConf)) of "twn": ok(some(NetworkConf.TheWakuNetworkConf())) + of "logos.dev", "logosdev": + ok(some(NetworkConf.LogosDevConf())) else: err("Invalid --preset value passed: " & lcPreset) @@ -957,9 +996,6 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] = b.withExtMultiAddrsOnly(n.extMultiAddrsOnly) b.withMaxConnections(n.maxConnections) - if n.maxRelayPeers.isSome(): - b.withMaxRelayPeers(n.maxRelayPeers.get()) - if n.relayServiceRatio != "": b.withRelayServiceRatio(n.relayServiceRatio) b.withColocationLimit(n.colocationLimit) @@ -976,6 +1012,26 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] = b.withRelayShardedPeerManagement(n.relayShardedPeerManagement) b.withStaticNodes(n.staticNodes) + # Process entry nodes - supports enrtree:, enr:, and multiaddress formats + if n.entryNodes.len > 0: + let (enrTreeUrls, bootstrapEnrs, staticNodesFromEntry) = processEntryNodes( + n.entryNodes + ).valueOr: + return err("Failed to process entry nodes: " & error) + + # Set ENRTree URLs for DNS discovery + if enrTreeUrls.len > 0: + for url in enrTreeUrls: + b.dnsDiscoveryConf.withEnrTreeUrl(url) + + # Set ENR records as bootstrap nodes for discv5 + if bootstrapEnrs.len > 0: + b.discv5Conf.withBootstrapNodes(bootstrapEnrs) + + # Add static nodes (multiaddrs and those extracted from ENR entries) + if staticNodesFromEntry.len > 0: + b.withStaticNodes(staticNodesFromEntry) + if n.numShardsInNetwork != 0: b.withNumShardsInCluster(n.numShardsInNetwork) b.withShardingConf(AutoSharding) @@ -1063,6 +1119,31 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] = b.webSocketConf.withKeyPath(n.websocketSecureKeyPath) b.webSocketConf.withCertPath(n.websocketSecureCertPath) - b.rateLimitConf.withRateLimits(n.rateLimits) + if n.rateLimits.len > 0: + b.rateLimitConf.withRateLimits(n.rateLimits) + + b.kademliaDiscoveryConf.withEnabled(n.enableKadDiscovery) + b.kademliaDiscoveryConf.withBootstrapNodes(n.kadBootstrapNodes) + + # Mode-driven configuration overrides + case n.mode + of WakuMode.Core: + b.withRelay(true) + b.filterServiceConf.withEnabled(true) + b.withLightPush(true) + b.discv5Conf.withEnabled(true) + b.withPeerExchange(true) + b.withRendezvous(true) + b.rateLimitConf.withRateLimitsIfNotAssigned( + @["filter:100/1s", "lightpush:5/1s", "px:5/1s"] + ) + of WakuMode.Edge: + b.withPeerExchange(true) + b.withRelay(false) + b.filterServiceConf.withEnabled(false) + b.withLightPush(false) + b.storeServiceConf.withEnabled(false) + of WakuMode.noMode: + discard # use explicit CLI flags as-is return b.build() diff --git a/tools/confutils/config_option_meta.nim b/tools/confutils/config_option_meta.nim new file mode 100644 index 000000000..1880fdef5 --- /dev/null +++ b/tools/confutils/config_option_meta.nim @@ -0,0 +1,143 @@ +import std/[macros] + +type ConfigOptionMeta* = object + fieldName*: string + typeName*: string + cliName*: string + desc*: string + defaultValue*: string + command*: string + +proc getPragmaValue(pragmaNode: NimNode, pragmaName: string): string {.compileTime.} = + if pragmaNode.kind != nnkPragma: + return "" + + for item in pragmaNode: + if item.kind == nnkExprColonExpr and item[0].eqIdent(pragmaName): + return item[1].repr + + return "" + +proc getFieldName(fieldNode: NimNode): string {.compileTime.} = + case fieldNode.kind + of nnkPragmaExpr: + if fieldNode.len >= 1: + return getFieldName(fieldNode[0]) + of nnkPostfix: + if fieldNode.len >= 2: + return getFieldName(fieldNode[1]) + of nnkIdent, nnkSym: + return fieldNode.strVal + else: + discard + + return fieldNode.repr + +proc getFieldAndPragma( + fieldDef: NimNode +): tuple[fieldName, typeName: string, pragmaNode: NimNode] {.compileTime.} = + if fieldDef.kind != nnkIdentDefs: + return ("", "", newNimNode(nnkEmpty)) + + let declaredField = fieldDef[0] + var typeNode = fieldDef[1] + var pragmaNode = newNimNode(nnkEmpty) + + if declaredField.kind == nnkPragmaExpr: + pragmaNode = declaredField[1] + elif typeNode.kind == nnkPragmaExpr: + pragmaNode = typeNode[1] + typeNode = typeNode[0] + + return (getFieldName(declaredField), typeNode.repr, pragmaNode) + +proc makeMetaNode( + fieldName, typeName, cliName, desc, defaultValue, command: string +): NimNode {.compileTime.} = + result = newTree( + nnkObjConstr, + ident("ConfigOptionMeta"), + newTree(nnkExprColonExpr, ident("fieldName"), newLit(fieldName)), + newTree(nnkExprColonExpr, ident("typeName"), newLit(typeName)), + newTree(nnkExprColonExpr, ident("cliName"), newLit(cliName)), + newTree(nnkExprColonExpr, ident("desc"), newLit(desc)), + newTree(nnkExprColonExpr, ident("defaultValue"), newLit(defaultValue)), + newTree(nnkExprColonExpr, ident("command"), newLit(command)), + ) + +macro extractConfigOptionMeta*(T: typedesc): untyped = + proc findFirstRecList(n: NimNode): NimNode {.compileTime.} = + if n.kind == nnkRecList: + return n + for child in n: + let found = findFirstRecList(child) + if not found.isNil: + return found + return nil + + proc collectRecList( + recList: NimNode, metas: var seq[NimNode], commandCtx: string + ) {.compileTime.} = + for child in recList: + case child.kind + of nnkIdentDefs: + let (fieldName, typeName, pragmaNode) = getFieldAndPragma(child) + if fieldName.len == 0: + continue + let cliName = block: + let n = getPragmaValue(pragmaNode, "name") + if n.len > 0: n else: fieldName + let desc = getPragmaValue(pragmaNode, "desc") + let defaultValue = getPragmaValue(pragmaNode, "defaultValue") + metas.add( + makeMetaNode(fieldName, typeName, cliName, desc, defaultValue, commandCtx) + ) + of nnkRecCase: + let discriminator = child[0] + if discriminator.kind == nnkIdentDefs: + let (fieldName, typeName, pragmaNode) = getFieldAndPragma(discriminator) + if fieldName.len > 0: + let cliName = block: + let n = getPragmaValue(pragmaNode, "name") + if n.len > 0: n else: fieldName + let desc = getPragmaValue(pragmaNode, "desc") + let defaultValue = getPragmaValue(pragmaNode, "defaultValue") + metas.add( + makeMetaNode(fieldName, typeName, cliName, desc, defaultValue, commandCtx) + ) + + for i in 1 ..< child.len: + let branch = child[i] + case branch.kind + of nnkOfBranch: + let branchCtx = branch[0].repr + for j in 1 ..< branch.len: + if branch[j].kind == nnkRecList: + collectRecList(branch[j], metas, branchCtx) + of nnkElse: + for j in 0 ..< branch.len: + if branch[j].kind == nnkRecList: + collectRecList(branch[j], metas, commandCtx) + else: + discard + else: + discard + + let typeInst = getTypeInst(T) + var targetType = T + if typeInst.kind == nnkBracketExpr and typeInst.len >= 2: + targetType = typeInst[1] + + let typeImpl = getImpl(targetType) + let recList = findFirstRecList(typeImpl) + if recList.isNil: + return newTree(nnkPrefix, ident("@"), newNimNode(nnkBracket)) + + var metas: seq[NimNode] = @[] + collectRecList(recList, metas, "") + + let bracket = newNimNode(nnkBracket) + for node in metas: + bracket.add(node) + + result = newTree(nnkPrefix, ident("@"), bracket) diff --git a/waku/api/entry_nodes.nim b/tools/confutils/entry_nodes.nim similarity index 100% rename from waku/api/entry_nodes.nim rename to tools/confutils/entry_nodes.nim diff --git a/vendor/nim-chronos b/vendor/nim-chronos index 0646c444f..85af4db76 160000 --- a/vendor/nim-chronos +++ b/vendor/nim-chronos @@ -1 +1 @@ -Subproject commit 0646c444fce7c7ed08ef6f2c9a7abfd172ffe655 +Subproject commit 85af4db764ecd3573c4704139560df3943216cf1 diff --git a/vendor/nim-dnsdisc b/vendor/nim-dnsdisc index b71d029f4..203abd2b3 160000 --- a/vendor/nim-dnsdisc +++ b/vendor/nim-dnsdisc @@ -1 +1 @@ -Subproject commit b71d029f4da4ec56974d54c04518bada00e1b623 +Subproject commit 203abd2b3e758e0ea3ae325769b20a7e1bcd1010 diff --git a/vendor/nim-faststreams b/vendor/nim-faststreams index c3ac3f639..ce27581a3 160000 --- a/vendor/nim-faststreams +++ b/vendor/nim-faststreams @@ -1 +1 @@ -Subproject commit c3ac3f639ed1d62f59d3077d376a29c63ac9750c +Subproject commit ce27581a3e881f782f482cb66dc5b07a02bd615e diff --git a/vendor/nim-ffi b/vendor/nim-ffi new file mode 160000 index 000000000..06111de15 --- /dev/null +++ b/vendor/nim-ffi @@ -0,0 +1 @@ +Subproject commit 06111de155253b34e47ed2aaed1d61d08d62cc1b diff --git a/vendor/nim-http-utils b/vendor/nim-http-utils index 79cbab146..c53852d9e 160000 --- a/vendor/nim-http-utils +++ b/vendor/nim-http-utils @@ -1 +1 @@ -Subproject commit 79cbab1460f4c0cdde2084589d017c43a3d7b4f1 +Subproject commit c53852d9e24205b6363bba517fa8ee7bde823691 diff --git a/vendor/nim-json-serialization b/vendor/nim-json-serialization index b65fd6a7e..c343b0e24 160000 --- a/vendor/nim-json-serialization +++ b/vendor/nim-json-serialization @@ -1 +1 @@ -Subproject commit b65fd6a7e64c864dabe40e7dfd6c7d07db0014ac +Subproject commit c343b0e243d9e17e2c40f3a8a24340f7c4a71d44 diff --git a/vendor/nim-jwt b/vendor/nim-jwt new file mode 160000 index 000000000..18f8378de --- /dev/null +++ b/vendor/nim-jwt @@ -0,0 +1 @@ +Subproject commit 18f8378de52b241f321c1f9ea905456e89b95c6f diff --git a/vendor/nim-libp2p b/vendor/nim-libp2p index 0309685cd..ff8d51857 160000 --- a/vendor/nim-libp2p +++ b/vendor/nim-libp2p @@ -1 +1 @@ -Subproject commit 0309685cd27d4bf763c8b3be86a76c33bcfe67ea +Subproject commit ff8d51857b4b79a68468e7bcc27b2026cca02996 diff --git a/vendor/nim-lsquic b/vendor/nim-lsquic new file mode 160000 index 000000000..4fb03ee7b --- /dev/null +++ b/vendor/nim-lsquic @@ -0,0 +1 @@ +Subproject commit 4fb03ee7bfb39aecb3316889fdcb60bec3d0936f diff --git a/vendor/nim-metrics b/vendor/nim-metrics index ecf64c607..a1296caf3 160000 --- a/vendor/nim-metrics +++ b/vendor/nim-metrics @@ -1 +1 @@ -Subproject commit ecf64c6078d1276d3b7d9b3d931fbdb70004db11 +Subproject commit a1296caf3ebb5f30f51a5feae7749a30df2824c2 diff --git a/vendor/nim-presto b/vendor/nim-presto index 92b1c7ff1..d66043dd7 160000 --- a/vendor/nim-presto +++ b/vendor/nim-presto @@ -1 +1 @@ -Subproject commit 92b1c7ff141e6920e1f8a98a14c35c1fa098e3be +Subproject commit d66043dd7ede146442e6c39720c76a20bde5225f diff --git a/vendor/nim-serialization b/vendor/nim-serialization index 6f525d544..b0f2fa329 160000 --- a/vendor/nim-serialization +++ b/vendor/nim-serialization @@ -1 +1 @@ -Subproject commit 6f525d5447d97256750ca7856faead03e562ed20 +Subproject commit b0f2fa32960ea532a184394b0f27be37bd80248b diff --git a/vendor/nim-sqlite3-abi b/vendor/nim-sqlite3-abi index bdf01cf42..89ba51f55 160000 --- a/vendor/nim-sqlite3-abi +++ b/vendor/nim-sqlite3-abi @@ -1 +1 @@ -Subproject commit bdf01cf4236fb40788f0733466cdf6708783cbac +Subproject commit 89ba51f557414d3a3e17ab3df8270e1bdaa3ca2a diff --git a/vendor/nim-stew b/vendor/nim-stew index e57400149..b66168735 160000 --- a/vendor/nim-stew +++ b/vendor/nim-stew @@ -1 +1 @@ -Subproject commit e5740014961438610d336cd81706582dbf2c96f0 +Subproject commit b66168735d6f3841c5239c3169d3fe5fe98b1257 diff --git a/vendor/nim-testutils b/vendor/nim-testutils index 94d68e796..e4d37dc16 160000 --- a/vendor/nim-testutils +++ b/vendor/nim-testutils @@ -1 +1 @@ -Subproject commit 94d68e796c045d5b37cabc6be32d7bfa168f8857 +Subproject commit e4d37dc1652d5c63afb89907efb5a5e812261797 diff --git a/vendor/nim-toml-serialization b/vendor/nim-toml-serialization index fea85b27f..b5b387e6f 160000 --- a/vendor/nim-toml-serialization +++ b/vendor/nim-toml-serialization @@ -1 +1 @@ -Subproject commit fea85b27f0badcf617033ca1bc05444b5fd8aa7a +Subproject commit b5b387e6fb2a7cc75d54a269b07cc6218361bd46 diff --git a/vendor/nim-unittest2 b/vendor/nim-unittest2 index 8b51e99b4..26f2ef3ae 160000 --- a/vendor/nim-unittest2 +++ b/vendor/nim-unittest2 @@ -1 +1 @@ -Subproject commit 8b51e99b4a57fcfb31689230e75595f024543024 +Subproject commit 26f2ef3ae0ec72a2a75bfe557e02e88f6a31c189 diff --git a/vendor/nim-websock b/vendor/nim-websock index ebe308a79..35ae76f15 160000 --- a/vendor/nim-websock +++ b/vendor/nim-websock @@ -1 +1 @@ -Subproject commit ebe308a79a7b440a11dfbe74f352be86a3883508 +Subproject commit 35ae76f1559e835c80f9c1a3943bf995d3dd9eb5 diff --git a/vendor/waku-rlnv2-contract b/vendor/waku-rlnv2-contract index 900d4f95e..d9906ef40 160000 --- a/vendor/waku-rlnv2-contract +++ b/vendor/waku-rlnv2-contract @@ -1 +1 @@ -Subproject commit 900d4f95e0e618bdeb4c241f7a4b6347df6bb950 +Subproject commit d9906ef40f1e113fcf51de4ad27c61aa45375c2d diff --git a/waku.nim b/waku.nim index 18d52741e..65a017c5a 100644 --- a/waku.nim +++ b/waku.nim @@ -1,10 +1,10 @@ ## Main module for using nwaku as a Nimble library -## +## ## This module re-exports the public API for creating and managing Waku nodes ## when using nwaku as a library dependency. -import waku/api/[api, api_conf] -export api, api_conf +import waku/api +export api import waku/factory/waku export waku diff --git a/waku.nimble b/waku.nimble index c63d20246..d879bc0e1 100644 --- a/waku.nimble +++ b/waku.nimble @@ -24,13 +24,16 @@ requires "nim >= 2.2.4", "stew", "stint", "metrics", - "libp2p >= 1.14.2", + "libp2p >= 1.15.0", "web3", "presto", "regex", "results", "db_connector", - "minilru" + "minilru", + "lsquic", + "jwt", + "ffi" ### Helper functions proc buildModule(filePath, params = "", lang = "c"): bool = @@ -61,27 +64,21 @@ proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") = exec "nim " & lang & " --out:build/" & name & " --mm:refc " & extra_params & " " & srcDir & name & ".nim" -proc buildLibrary(name: string, srcDir = "./", params = "", `type` = "static") = +proc buildLibrary(lib_name: string, srcDir = "./", params = "", `type` = "static", srcFile = "libwaku.nim", mainPrefix = "libwaku") = if not dirExists "build": mkDir "build" # allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims" var extra_params = params - for i in 2 ..< paramCount(): + for i in 2 ..< (paramCount() - 1): extra_params &= " " & paramStr(i) if `type` == "static": - exec "nim c" & " --out:build/" & name & - ".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " & - extra_params & " " & srcDir & name & ".nim" + exec "nim c" & " --out:build/" & lib_name & + " --threads:on --app:staticlib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:" & mainPrefix & " --skipParentCfg:on -d:discv5_protocol_id=d5waku " & + extra_params & " " & srcDir & srcFile else: - let lib_name = (when defined(windows): toDll(name) else: name & ".so") - when defined(windows): - exec "nim c" & " --out:build/" & lib_name & - " --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:off -d:discv5_protocol_id=d5waku " & - extra_params & " " & srcDir & name & ".nim" - else: - exec "nim c" & " --out:build/" & lib_name & - " --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " & - extra_params & " " & srcDir & name & ".nim" + exec "nim c" & " --out:build/" & lib_name & + " --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:" & mainPrefix & " --skipParentCfg:off -d:discv5_protocol_id=d5waku " & + extra_params & " " & srcDir & srcFile proc buildMobileAndroid(srcDir = ".", params = "") = let cpu = getEnv("CPU") @@ -96,7 +93,7 @@ proc buildMobileAndroid(srcDir = ".", params = "") = extra_params &= " " & paramStr(i) exec "nim c" & " --out:" & outDir & - "/libwaku.so --threads:on --app:lib --opt:size --noMain --mm:refc -d:chronicles_sinks=textlines[dynamic] --header --passL:-L" & + "/libwaku.so --threads:on --app:lib --opt:size --noMain --mm:refc -d:chronicles_sinks=textlines[dynamic] --header -d:chronosEventEngine=epoll --passL:-L" & outdir & " --passL:-lrln --passL:-llog --cpu:" & cpu & " --os:android -d:androidNDK " & extra_params & " " & srcDir & "/libwaku.nim" @@ -139,7 +136,7 @@ task testwakunode2, "Build & run wakunode2 app tests": test "all_tests_wakunode2" task example2, "Build Waku examples": - buildBinary "waku_example", "examples/" + buildBinary "api_example", "examples/api_example/" buildBinary "publisher", "examples/" buildBinary "subscriber", "examples/" buildBinary "filter_subscriber", "examples/" @@ -153,7 +150,8 @@ task chat2, "Build example Waku chat usage": let name = "chat2" buildBinary name, "apps/chat2/", - "-d:chronicles_sinks=textlines[file] -d:ssl -d:chronicles_log_level='TRACE' " + "-d:chronicles_sinks=textlines[file] -d:chronicles_log_level='TRACE' " + # -d:ssl - cause unlisted exception error in libp2p/utility... task chat2mix, "Build example Waku chat mix usage": # NOTE For debugging, set debug level. For chat usage we want minimal log @@ -163,7 +161,8 @@ task chat2mix, "Build example Waku chat mix usage": let name = "chat2mix" buildBinary name, "apps/chat2mix/", - "-d:chronicles_sinks=textlines[file] -d:ssl -d:chronicles_log_level='TRACE' " + "-d:chronicles_sinks=textlines[file] -d:chronicles_log_level='TRACE' " + # -d:ssl - cause unlisted exception error in libp2p/utility... task chat2bridge, "Build chat2bridge": let name = "chat2bridge" @@ -177,6 +176,10 @@ task lightpushwithmix, "Build lightpushwithmix": let name = "lightpush_publisher_mix" buildBinary name, "examples/lightpush_mix/" +task api_example, "Build api_example": + let name = "api_example" + buildBinary name, "examples/api_example/" + task buildone, "Build custom target": let filepath = paramStr(paramCount()) discard buildModule filepath @@ -206,15 +209,202 @@ let chroniclesParams = "--warning:UnusedImport:on " & "-d:chronicles_log_level=TRACE" task libwakuStatic, "Build the cbindings waku node library": - let name = "libwaku" - buildLibrary name, "library/", chroniclesParams, "static" + let lib_name = paramStr(paramCount()) + buildLibrary lib_name, "library/", chroniclesParams, "static" task libwakuDynamic, "Build the cbindings waku node library": - let name = "libwaku" - buildLibrary name, "library/", chroniclesParams, "dynamic" + let lib_name = paramStr(paramCount()) + buildLibrary lib_name, "library/", chroniclesParams, "dynamic" ### Mobile Android task libWakuAndroid, "Build the mobile bindings for Android": let srcDir = "./library" let extraParams = "-d:chronicles_log_level=ERROR" buildMobileAndroid srcDir, extraParams + +### Mobile iOS +import std/sequtils + +proc buildMobileIOS(srcDir = ".", params = "") = + echo "Building iOS libwaku library" + + let iosArch = getEnv("IOS_ARCH") + let iosSdk = getEnv("IOS_SDK") + let sdkPath = getEnv("IOS_SDK_PATH") + + if sdkPath.len == 0: + quit "Error: IOS_SDK_PATH not set. Set it to the path of the iOS SDK" + + # Use SDK name in path to differentiate device vs simulator + let outDir = "build/ios/" & iosSdk & "-" & iosArch + if not dirExists outDir: + mkDir outDir + + var extra_params = params + for i in 2 ..< paramCount(): + extra_params &= " " & paramStr(i) + + let cpu = if iosArch == "arm64": "arm64" else: "amd64" + + # The output static library + let nimcacheDir = outDir & "/nimcache" + let objDir = outDir & "/obj" + let vendorObjDir = outDir & "/vendor_obj" + let aFile = outDir & "/libwaku.a" + + if not dirExists objDir: + mkDir objDir + if not dirExists vendorObjDir: + mkDir vendorObjDir + + let clangBase = "clang -arch " & iosArch & " -isysroot " & sdkPath & + " -mios-version-min=18.0 -fembed-bitcode -fPIC -O2" + + # Generate C sources from Nim (no linking) + exec "nim c" & + " --nimcache:" & nimcacheDir & + " --os:ios --cpu:" & cpu & + " --compileOnly:on" & + " --noMain --mm:refc" & + " --threads:on --opt:size --header" & + " -d:metrics -d:discv5_protocol_id=d5waku" & + " --nimMainPrefix:libwaku --skipParentCfg:on" & + " --cc:clang" & + " " & extra_params & + " " & srcDir & "/libwaku.nim" + + # Compile vendor C libraries for iOS + + # --- BearSSL --- + echo "Compiling BearSSL for iOS..." + let bearSslSrcDir = "./vendor/nim-bearssl/bearssl/csources/src" + let bearSslIncDir = "./vendor/nim-bearssl/bearssl/csources/inc" + for path in walkDirRec(bearSslSrcDir): + if path.endsWith(".c"): + let relPath = path.replace(bearSslSrcDir & "/", "").replace("/", "_") + let baseName = relPath.changeFileExt("o") + let oFile = vendorObjDir / ("bearssl_" & baseName) + if not fileExists(oFile): + exec clangBase & " -I" & bearSslIncDir & " -I" & bearSslSrcDir & " -c " & path & " -o " & oFile + + # --- secp256k1 --- + echo "Compiling secp256k1 for iOS..." + let secp256k1Dir = "./vendor/nim-secp256k1/vendor/secp256k1" + let secp256k1Flags = " -I" & secp256k1Dir & "/include" & + " -I" & secp256k1Dir & "/src" & + " -I" & secp256k1Dir & + " -DENABLE_MODULE_RECOVERY=1" & + " -DENABLE_MODULE_ECDH=1" & + " -DECMULT_WINDOW_SIZE=15" & + " -DECMULT_GEN_PREC_BITS=4" + + # Main secp256k1 source + let secp256k1Obj = vendorObjDir / "secp256k1.o" + if not fileExists(secp256k1Obj): + exec clangBase & secp256k1Flags & " -c " & secp256k1Dir & "/src/secp256k1.c -o " & secp256k1Obj + + # Precomputed tables (required for ecmult operations) + let secp256k1PreEcmultObj = vendorObjDir / "secp256k1_precomputed_ecmult.o" + if not fileExists(secp256k1PreEcmultObj): + exec clangBase & secp256k1Flags & " -c " & secp256k1Dir & "/src/precomputed_ecmult.c -o " & secp256k1PreEcmultObj + + let secp256k1PreEcmultGenObj = vendorObjDir / "secp256k1_precomputed_ecmult_gen.o" + if not fileExists(secp256k1PreEcmultGenObj): + exec clangBase & secp256k1Flags & " -c " & secp256k1Dir & "/src/precomputed_ecmult_gen.c -o " & secp256k1PreEcmultGenObj + + # --- miniupnpc --- + echo "Compiling miniupnpc for iOS..." + let miniupnpcSrcDir = "./vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc/src" + let miniupnpcIncDir = "./vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc/include" + let miniupnpcBuildDir = "./vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc/build" + let miniupnpcFiles = @[ + "addr_is_reserved.c", "connecthostport.c", "igd_desc_parse.c", + "minisoap.c", "minissdpc.c", "miniupnpc.c", "miniwget.c", + "minixml.c", "portlistingparse.c", "receivedata.c", "upnpcommands.c", + "upnpdev.c", "upnperrors.c", "upnpreplyparse.c" + ] + for fileName in miniupnpcFiles: + let srcPath = miniupnpcSrcDir / fileName + let oFile = vendorObjDir / ("miniupnpc_" & fileName.changeFileExt("o")) + if fileExists(srcPath) and not fileExists(oFile): + exec clangBase & + " -I" & miniupnpcIncDir & + " -I" & miniupnpcSrcDir & + " -I" & miniupnpcBuildDir & + " -DMINIUPNPC_SET_SOCKET_TIMEOUT" & + " -D_BSD_SOURCE -D_DEFAULT_SOURCE" & + " -c " & srcPath & " -o " & oFile + + # --- libnatpmp --- + echo "Compiling libnatpmp for iOS..." + let natpmpSrcDir = "./vendor/nim-nat-traversal/vendor/libnatpmp-upstream" + # Only compile natpmp.c - getgateway.c uses net/route.h which is not available on iOS + let natpmpObj = vendorObjDir / "natpmp_natpmp.o" + if not fileExists(natpmpObj): + exec clangBase & + " -I" & natpmpSrcDir & + " -DENABLE_STRNATPMPERR" & + " -c " & natpmpSrcDir & "/natpmp.c -o " & natpmpObj + + # Use iOS-specific stub for getgateway + let getgatewayStubSrc = "./library/ios_natpmp_stubs.c" + let getgatewayStubObj = vendorObjDir / "natpmp_getgateway_stub.o" + if fileExists(getgatewayStubSrc) and not fileExists(getgatewayStubObj): + exec clangBase & " -c " & getgatewayStubSrc & " -o " & getgatewayStubObj + + # --- BearSSL stubs (for tools functions not in main library) --- + echo "Compiling BearSSL stubs for iOS..." + let bearSslStubsSrc = "./library/ios_bearssl_stubs.c" + let bearSslStubsObj = vendorObjDir / "bearssl_stubs.o" + if fileExists(bearSslStubsSrc) and not fileExists(bearSslStubsObj): + exec clangBase & " -c " & bearSslStubsSrc & " -o " & bearSslStubsObj + + # Compile all Nim-generated C files to object files + echo "Compiling Nim-generated C files for iOS..." + var cFiles: seq[string] = @[] + for kind, path in walkDir(nimcacheDir): + if kind == pcFile and path.endsWith(".c"): + cFiles.add(path) + + for cFile in cFiles: + let baseName = extractFilename(cFile).changeFileExt("o") + let oFile = objDir / baseName + exec clangBase & + " -DENABLE_STRNATPMPERR" & + " -I./vendor/nimbus-build-system/vendor/Nim/lib/" & + " -I./vendor/nim-bearssl/bearssl/csources/inc/" & + " -I./vendor/nim-bearssl/bearssl/csources/tools/" & + " -I./vendor/nim-bearssl/bearssl/abi/" & + " -I./vendor/nim-secp256k1/vendor/secp256k1/include/" & + " -I./vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc/include/" & + " -I./vendor/nim-nat-traversal/vendor/libnatpmp-upstream/" & + " -I" & nimcacheDir & + " -c " & cFile & + " -o " & oFile + + # Create static library from all object files + echo "Creating static library..." + var objFiles: seq[string] = @[] + for kind, path in walkDir(objDir): + if kind == pcFile and path.endsWith(".o"): + objFiles.add(path) + for kind, path in walkDir(vendorObjDir): + if kind == pcFile and path.endsWith(".o"): + objFiles.add(path) + + exec "libtool -static -o " & aFile & " " & objFiles.join(" ") + + echo "✔ iOS library created: " & aFile + +task libWakuIOS, "Build the mobile bindings for iOS": + let srcDir = "./library" + let extraParams = "-d:chronicles_log_level=ERROR" + buildMobileIOS srcDir, extraParams + +task liblogosdeliveryStatic, "Build the liblogosdelivery (Logos Messaging Delivery API) static library": + let lib_name = paramStr(paramCount()) + buildLibrary lib_name, "liblogosdelivery/", chroniclesParams, "static", "liblogosdelivery.nim", "liblogosdelivery" + +task liblogosdeliveryDynamic, "Build the liblogosdelivery (Logos Messaging Delivery API) dynamic library": + let lib_name = paramStr(paramCount()) + buildLibrary lib_name, "liblogosdelivery/", chroniclesParams, "dynamic", "liblogosdelivery.nim", "liblogosdelivery" diff --git a/waku/api.nim b/waku/api.nim index c3211867d..a977a062a 100644 --- a/waku/api.nim +++ b/waku/api.nim @@ -1,3 +1,5 @@ -import ./api/[api, api_conf, entry_nodes] +import ./api/[api, api_conf] +import ./events/message_events +import tools/confutils/entry_nodes -export api, api_conf, entry_nodes +export api, api_conf, entry_nodes, message_events diff --git a/waku/api/api.nim b/waku/api/api.nim index 5bab06188..1eee982fd 100644 --- a/waku/api/api.nim +++ b/waku/api/api.nim @@ -1,12 +1,20 @@ import chronicles, chronos, results import waku/factory/waku +import waku/[requests/health_requests, waku_core, waku_node] +import waku/node/delivery_service/send_service +import waku/node/delivery_service/subscription_manager +import libp2p/peerid +import ../../tools/confutils/cli_args +import ./[api_conf, types] -import ./api_conf +export cli_args -# TODO: Specs says it should return a `WakuNode`. As `send` and other APIs are defined, we can align. -proc createNode*(config: NodeConfig): Future[Result[Waku, string]] {.async.} = - let wakuConf = toWakuConf(config).valueOr: +logScope: + topics = "api" + +proc createNode*(conf: WakuNodeConf): Future[Result[Waku, string]] {.async.} = + let wakuConf = conf.toWakuConf().valueOr: return err("Failed to handle the configuration: " & error) ## We are not defining app callbacks at node creation @@ -15,3 +23,54 @@ proc createNode*(config: NodeConfig): Future[Result[Waku, string]] {.async.} = return err("Failed setting up Waku: " & $error) return ok(wakuRes) + +proc checkApiAvailability(w: Waku): Result[void, string] = + if w.isNil(): + return err("Waku node is not initialized") + + # TODO: Conciliate request-bouncing health checks here with unit testing. + # (For now, better to just allow all sends and rely on retries.) + + return ok() + +proc subscribe*( + w: Waku, contentTopic: ContentTopic +): Future[Result[void, string]] {.async.} = + ?checkApiAvailability(w) + + return w.deliveryService.subscriptionManager.subscribe(contentTopic) + +proc unsubscribe*(w: Waku, contentTopic: ContentTopic): Result[void, string] = + ?checkApiAvailability(w) + + return w.deliveryService.subscriptionManager.unsubscribe(contentTopic) + +proc send*( + w: Waku, envelope: MessageEnvelope +): Future[Result[RequestId, string]] {.async.} = + ?checkApiAvailability(w) + + let isSubbed = w.deliveryService.subscriptionManager + .isSubscribed(envelope.contentTopic) + .valueOr(false) + if not isSubbed: + info "Auto-subscribing to topic on send", contentTopic = envelope.contentTopic + w.deliveryService.subscriptionManager.subscribe(envelope.contentTopic).isOkOr: + warn "Failed to auto-subscribe", error = error + return err("Failed to auto-subscribe before sending: " & error) + + let requestId = RequestId.new(w.rng) + + let deliveryTask = DeliveryTask.new(requestId, envelope, w.brokerCtx).valueOr: + return err("API send: Failed to create delivery task: " & error) + + info "API send: scheduling delivery task", + requestId = $requestId, + pubsubTopic = deliveryTask.pubsubTopic, + contentTopic = deliveryTask.msg.contentTopic, + msgHash = deliveryTask.msgHash.to0xHex(), + myPeerId = w.node.peerId() + + asyncSpawn w.deliveryService.sendService.send(deliveryTask) + + return ok(requestId) diff --git a/waku/api/api_conf.nim b/waku/api/api_conf.nim index 155554dfd..70bb02af3 100644 --- a/waku/api/api_conf.nim +++ b/waku/api/api_conf.nim @@ -1,13 +1,17 @@ import std/[net, options] import results +import json_serialization, json_serialization/std/options as json_options import waku/common/utils/parse_size_units, + waku/common/logging, waku/factory/waku_conf, waku/factory/conf_builder/conf_builder, waku/factory/networks_config, - ./entry_nodes + tools/confutils/entry_nodes + +export json_serialization, json_options type AutoShardingConfig* {.requiresInit.} = object numShardsInCluster*: uint16 @@ -81,11 +85,16 @@ type WakuMode* {.pure.} = enum Edge Core -type NodeConfig* {.requiresInit.} = object +type NodeConfig* {. + requiresInit, deprecated: "Use WakuNodeConf from tools/confutils/cli_args instead" +.} = object mode: WakuMode protocolsConfig: ProtocolsConfig networkingConfig: NetworkingConfig ethRpcEndpoints: seq[string] + p2pReliability: bool + logLevel: LogLevel + logFormat: LogFormat proc init*( T: typedesc[NodeConfig], @@ -93,17 +102,69 @@ proc init*( protocolsConfig: ProtocolsConfig = TheWakuNetworkPreset, networkingConfig: NetworkingConfig = DefaultNetworkingConfig, ethRpcEndpoints: seq[string] = @[], + p2pReliability: bool = false, + logLevel: LogLevel = LogLevel.INFO, + logFormat: LogFormat = LogFormat.TEXT, ): T = return T( mode: mode, protocolsConfig: protocolsConfig, networkingConfig: networkingConfig, ethRpcEndpoints: ethRpcEndpoints, + p2pReliability: p2pReliability, + logLevel: logLevel, + logFormat: logFormat, ) -proc toWakuConf*(nodeConfig: NodeConfig): Result[WakuConf, string] = +# -- Getters for ProtocolsConfig (private fields) - used for testing -- + +proc entryNodes*(c: ProtocolsConfig): seq[string] = + c.entryNodes + +proc staticStoreNodes*(c: ProtocolsConfig): seq[string] = + c.staticStoreNodes + +proc clusterId*(c: ProtocolsConfig): uint16 = + c.clusterId + +proc autoShardingConfig*(c: ProtocolsConfig): AutoShardingConfig = + c.autoShardingConfig + +proc messageValidation*(c: ProtocolsConfig): MessageValidation = + c.messageValidation + +# -- Getters for NodeConfig (private fields) - used for testing -- + +proc mode*(c: NodeConfig): WakuMode = + c.mode + +proc protocolsConfig*(c: NodeConfig): ProtocolsConfig = + c.protocolsConfig + +proc networkingConfig*(c: NodeConfig): NetworkingConfig = + c.networkingConfig + +proc ethRpcEndpoints*(c: NodeConfig): seq[string] = + c.ethRpcEndpoints + +proc p2pReliability*(c: NodeConfig): bool = + c.p2pReliability + +proc logLevel*(c: NodeConfig): LogLevel = + c.logLevel + +proc logFormat*(c: NodeConfig): LogFormat = + c.logFormat + +proc toWakuConf*( + nodeConfig: NodeConfig +): Result[WakuConf, string] {.deprecated: "Use WakuNodeConf.toWakuConf instead".} = var b = WakuConfBuilder.init() + # Apply log configuration + b.withLogLevel(nodeConfig.logLevel) + b.withLogFormat(nodeConfig.logFormat) + # Apply networking configuration let networkingConfig = nodeConfig.networkingConfig let ip = parseIpAddress(networkingConfig.listenIpv4) @@ -131,7 +192,16 @@ proc toWakuConf*(nodeConfig: NodeConfig): Result[WakuConf, string] = b.rateLimitConf.withRateLimits(@["filter:100/1s", "lightpush:5/1s", "px:5/1s"]) of Edge: - return err("Edge mode is not implemented") + # All client side protocols are mounted by default + # Peer exchange client is always enabled and start_node will start the px loop + # Metadata is always mounted + b.withPeerExchange(true) + # switch off all service side protocols and relay + b.withRelay(false) + b.filterServiceConf.withEnabled(false) + b.withLightPush(false) + b.storeServiceConf.withEnabled(false) + # Leave discv5 and rendezvous for user choice ## Network Conf let protocolsConfig = nodeConfig.protocolsConfig @@ -193,6 +263,7 @@ proc toWakuConf*(nodeConfig: NodeConfig): Result[WakuConf, string] = ## Various configurations b.withNatStrategy("any") + b.withP2PReliability(nodeConfig.p2pReliability) let wakuConf = b.build().valueOr: return err("Failed to build configuration: " & error) @@ -201,3 +272,263 @@ proc toWakuConf*(nodeConfig: NodeConfig): Result[WakuConf, string] = return err("Failed to validate configuration: " & error) return ok(wakuConf) + +# ---- JSON serialization (writeValue / readValue) ---- +# ---------- AutoShardingConfig ---------- + +proc writeValue*(w: var JsonWriter, val: AutoShardingConfig) {.raises: [IOError].} = + w.beginRecord() + w.writeField("numShardsInCluster", val.numShardsInCluster) + w.endRecord() + +proc readValue*( + r: var JsonReader, val: var AutoShardingConfig +) {.raises: [SerializationError, IOError].} = + var numShardsInCluster: Option[uint16] + + for fieldName in readObjectFields(r): + case fieldName + of "numShardsInCluster": + numShardsInCluster = some(r.readValue(uint16)) + else: + r.raiseUnexpectedField(fieldName, "AutoShardingConfig") + + if numShardsInCluster.isNone(): + r.raiseUnexpectedValue("Missing required field 'numShardsInCluster'") + + val = AutoShardingConfig(numShardsInCluster: numShardsInCluster.get()) + +# ---------- RlnConfig ---------- + +proc writeValue*(w: var JsonWriter, val: RlnConfig) {.raises: [IOError].} = + w.beginRecord() + w.writeField("contractAddress", val.contractAddress) + w.writeField("chainId", val.chainId) + w.writeField("epochSizeSec", val.epochSizeSec) + w.endRecord() + +proc readValue*( + r: var JsonReader, val: var RlnConfig +) {.raises: [SerializationError, IOError].} = + var + contractAddress: Option[string] + chainId: Option[uint] + epochSizeSec: Option[uint64] + + for fieldName in readObjectFields(r): + case fieldName + of "contractAddress": + contractAddress = some(r.readValue(string)) + of "chainId": + chainId = some(r.readValue(uint)) + of "epochSizeSec": + epochSizeSec = some(r.readValue(uint64)) + else: + r.raiseUnexpectedField(fieldName, "RlnConfig") + + if contractAddress.isNone(): + r.raiseUnexpectedValue("Missing required field 'contractAddress'") + if chainId.isNone(): + r.raiseUnexpectedValue("Missing required field 'chainId'") + if epochSizeSec.isNone(): + r.raiseUnexpectedValue("Missing required field 'epochSizeSec'") + + val = RlnConfig( + contractAddress: contractAddress.get(), + chainId: chainId.get(), + epochSizeSec: epochSizeSec.get(), + ) + +# ---------- NetworkingConfig ---------- + +proc writeValue*(w: var JsonWriter, val: NetworkingConfig) {.raises: [IOError].} = + w.beginRecord() + w.writeField("listenIpv4", val.listenIpv4) + w.writeField("p2pTcpPort", val.p2pTcpPort) + w.writeField("discv5UdpPort", val.discv5UdpPort) + w.endRecord() + +proc readValue*( + r: var JsonReader, val: var NetworkingConfig +) {.raises: [SerializationError, IOError].} = + var + listenIpv4: Option[string] + p2pTcpPort: Option[uint16] + discv5UdpPort: Option[uint16] + + for fieldName in readObjectFields(r): + case fieldName + of "listenIpv4": + listenIpv4 = some(r.readValue(string)) + of "p2pTcpPort": + p2pTcpPort = some(r.readValue(uint16)) + of "discv5UdpPort": + discv5UdpPort = some(r.readValue(uint16)) + else: + r.raiseUnexpectedField(fieldName, "NetworkingConfig") + + if listenIpv4.isNone(): + r.raiseUnexpectedValue("Missing required field 'listenIpv4'") + if p2pTcpPort.isNone(): + r.raiseUnexpectedValue("Missing required field 'p2pTcpPort'") + if discv5UdpPort.isNone(): + r.raiseUnexpectedValue("Missing required field 'discv5UdpPort'") + + val = NetworkingConfig( + listenIpv4: listenIpv4.get(), + p2pTcpPort: p2pTcpPort.get(), + discv5UdpPort: discv5UdpPort.get(), + ) + +# ---------- MessageValidation ---------- + +proc writeValue*(w: var JsonWriter, val: MessageValidation) {.raises: [IOError].} = + w.beginRecord() + w.writeField("maxMessageSize", val.maxMessageSize) + w.writeField("rlnConfig", val.rlnConfig) + w.endRecord() + +proc readValue*( + r: var JsonReader, val: var MessageValidation +) {.raises: [SerializationError, IOError].} = + var + maxMessageSize: Option[string] + rlnConfig: Option[Option[RlnConfig]] + + for fieldName in readObjectFields(r): + case fieldName + of "maxMessageSize": + maxMessageSize = some(r.readValue(string)) + of "rlnConfig": + rlnConfig = some(r.readValue(Option[RlnConfig])) + else: + r.raiseUnexpectedField(fieldName, "MessageValidation") + + if maxMessageSize.isNone(): + r.raiseUnexpectedValue("Missing required field 'maxMessageSize'") + + val = MessageValidation( + maxMessageSize: maxMessageSize.get(), rlnConfig: rlnConfig.get(none(RlnConfig)) + ) + +# ---------- ProtocolsConfig ---------- + +proc writeValue*(w: var JsonWriter, val: ProtocolsConfig) {.raises: [IOError].} = + w.beginRecord() + w.writeField("entryNodes", val.entryNodes) + w.writeField("staticStoreNodes", val.staticStoreNodes) + w.writeField("clusterId", val.clusterId) + w.writeField("autoShardingConfig", val.autoShardingConfig) + w.writeField("messageValidation", val.messageValidation) + w.endRecord() + +proc readValue*( + r: var JsonReader, val: var ProtocolsConfig +) {.raises: [SerializationError, IOError].} = + var + entryNodes: Option[seq[string]] + staticStoreNodes: Option[seq[string]] + clusterId: Option[uint16] + autoShardingConfig: Option[AutoShardingConfig] + messageValidation: Option[MessageValidation] + + for fieldName in readObjectFields(r): + case fieldName + of "entryNodes": + entryNodes = some(r.readValue(seq[string])) + of "staticStoreNodes": + staticStoreNodes = some(r.readValue(seq[string])) + of "clusterId": + clusterId = some(r.readValue(uint16)) + of "autoShardingConfig": + autoShardingConfig = some(r.readValue(AutoShardingConfig)) + of "messageValidation": + messageValidation = some(r.readValue(MessageValidation)) + else: + r.raiseUnexpectedField(fieldName, "ProtocolsConfig") + + if entryNodes.isNone(): + r.raiseUnexpectedValue("Missing required field 'entryNodes'") + if clusterId.isNone(): + r.raiseUnexpectedValue("Missing required field 'clusterId'") + + val = ProtocolsConfig.init( + entryNodes = entryNodes.get(), + staticStoreNodes = staticStoreNodes.get(@[]), + clusterId = clusterId.get(), + autoShardingConfig = autoShardingConfig.get(DefaultAutoShardingConfig), + messageValidation = messageValidation.get(DefaultMessageValidation), + ) + +# ---------- NodeConfig ---------- + +proc writeValue*(w: var JsonWriter, val: NodeConfig) {.raises: [IOError].} = + w.beginRecord() + w.writeField("mode", val.mode) + w.writeField("protocolsConfig", val.protocolsConfig) + w.writeField("networkingConfig", val.networkingConfig) + w.writeField("ethRpcEndpoints", val.ethRpcEndpoints) + w.writeField("p2pReliability", val.p2pReliability) + w.writeField("logLevel", val.logLevel) + w.writeField("logFormat", val.logFormat) + w.endRecord() + +proc readValue*( + r: var JsonReader, val: var NodeConfig +) {.raises: [SerializationError, IOError].} = + var + mode: Option[WakuMode] + protocolsConfig: Option[ProtocolsConfig] + networkingConfig: Option[NetworkingConfig] + ethRpcEndpoints: Option[seq[string]] + p2pReliability: Option[bool] + logLevel: Option[LogLevel] + logFormat: Option[LogFormat] + + for fieldName in readObjectFields(r): + case fieldName + of "mode": + mode = some(r.readValue(WakuMode)) + of "protocolsConfig": + protocolsConfig = some(r.readValue(ProtocolsConfig)) + of "networkingConfig": + networkingConfig = some(r.readValue(NetworkingConfig)) + of "ethRpcEndpoints": + ethRpcEndpoints = some(r.readValue(seq[string])) + of "p2pReliability": + p2pReliability = some(r.readValue(bool)) + of "logLevel": + logLevel = some(r.readValue(LogLevel)) + of "logFormat": + logFormat = some(r.readValue(LogFormat)) + else: + r.raiseUnexpectedField(fieldName, "NodeConfig") + + val = NodeConfig.init( + mode = mode.get(WakuMode.Core), + protocolsConfig = protocolsConfig.get(TheWakuNetworkPreset), + networkingConfig = networkingConfig.get(DefaultNetworkingConfig), + ethRpcEndpoints = ethRpcEndpoints.get(@[]), + p2pReliability = p2pReliability.get(false), + logLevel = logLevel.get(LogLevel.INFO), + logFormat = logFormat.get(LogFormat.TEXT), + ) + +# ---------- Decode helper ---------- +# Json.decode returns T via `result`, which conflicts with {.requiresInit.} +# on Nim 2.x. This helper avoids the issue by using readValue into a var. + +proc decodeNodeConfigFromJson*( + jsonStr: string +): NodeConfig {. + raises: [SerializationError], + deprecated: "Use WakuNodeConf with fieldPairs-based JSON parsing instead" +.} = + var val = NodeConfig.init() # default-initialized + try: + var stream = unsafeMemoryInput(jsonStr) + var reader = JsonReader[DefaultFlavor].init(stream) + reader.readValue(val) + except IOError as err: + raise (ref SerializationError)(msg: err.msg) + return val diff --git a/waku/api/send_api.md b/waku/api/send_api.md new file mode 100644 index 000000000..2a5a2f8a4 --- /dev/null +++ b/waku/api/send_api.md @@ -0,0 +1,46 @@ +# SEND API + +**THIS IS TO BE REMOVED BEFORE PR MERGE** + +This document collects logic and todo's around the Send API. + +## Overview + +Send api hides the complex logic of using raw protocols for reliable message delivery. +The delivery method is chosen based on the node configuration and actual availabilities of peers. + +## Delivery task + +Each message send request is bundled into a task that not just holds the composed message but also the state of the delivery. + +## Delivery methods + +Depending on the configuration and the availability of store client protocol + actual configured and/or discovered store nodes: +- P2PReliability validation - checking network store node whether the message is reached at least a store node. +- Simple retry until message is propagated to the network + - Relay says >0 peers as publish result + - LightpushClient returns with success + +Depending on node config: +- Relay +- Lightpush + +These methods are used in combination to achieve the best reliability. +Fallback mechanism is used to switch between methods if the current one fails. + +Relay+StoreCheck -> Relay+simple retry -> Lightpush+StoreCheck -> Lightpush simple retry -> Error + +Combination is dynamically chosen on node configuration. Levels can be skipped depending on actual connectivity. +Actual connectivity is checked: +- Relay's topic health check - at least dLow peers in the mesh for the topic +- Store nodes availability - at least one store service node is available in peer manager +- Lightpush client availability - at least one lightpush service node is available in peer manager + +## Delivery processing + +At every send request, each task is tried to be delivered right away. +Any further retries and store check is done as a background task in a loop with predefined intervals. +Each task is set for a maximum number of retries and/or maximum time to live. + +In each round of store check and retry send tasks are selected based on their state. +The state is updated based on the result of the delivery method. diff --git a/waku/api/types.nim b/waku/api/types.nim new file mode 100644 index 000000000..9eae503c8 --- /dev/null +++ b/waku/api/types.nim @@ -0,0 +1,65 @@ +{.push raises: [].} + +import bearssl/rand, std/times, chronos +import stew/byteutils +import waku/utils/requests as request_utils +import waku/waku_core/[topics/content_topic, message/message, time] +import waku/requests/requests + +type + MessageEnvelope* = object + contentTopic*: ContentTopic + payload*: seq[byte] + ephemeral*: bool + + RequestId* = distinct string + + ConnectionStatus* {.pure.} = enum + Disconnected + PartiallyConnected + Connected + +proc new*(T: typedesc[RequestId], rng: ref HmacDrbgContext): T = + ## Generate a new RequestId using the provided RNG. + RequestId(request_utils.generateRequestId(rng)) + +proc `$`*(r: RequestId): string {.inline.} = + string(r) + +proc `==`*(a, b: RequestId): bool {.inline.} = + string(a) == string(b) + +proc init*( + T: type MessageEnvelope, + contentTopic: ContentTopic, + payload: seq[byte] | string, + ephemeral: bool = false, +): MessageEnvelope = + when payload is seq[byte]: + MessageEnvelope(contentTopic: contentTopic, payload: payload, ephemeral: ephemeral) + else: + MessageEnvelope( + contentTopic: contentTopic, payload: payload.toBytes(), ephemeral: ephemeral + ) + +proc toWakuMessage*(envelope: MessageEnvelope): WakuMessage = + ## Convert a MessageEnvelope to a WakuMessage. + var wm = WakuMessage( + contentTopic: envelope.contentTopic, + payload: envelope.payload, + ephemeral: envelope.ephemeral, + timestamp: getNowInNanosecondTime(), + ) + + ## TODO: First find out if proof is needed at all + ## Follow up: left it to the send logic to add RLN proof if needed and possible + # let requestedProof = ( + # waitFor RequestGenerateRlnProof.request(wm, getTime().toUnixFloat()) + # ).valueOr: + # warn "Failed to add RLN proof to WakuMessage: ", error = error + # return wm + + # wm.proof = requestedProof.proof + return wm + +{.pop.} diff --git a/waku/common/broker/broker_context.nim b/waku/common/broker/broker_context.nim new file mode 100644 index 000000000..483a2e3a7 --- /dev/null +++ b/waku/common/broker/broker_context.nim @@ -0,0 +1,68 @@ +{.push raises: [].} + +import std/[strutils, concurrency/atomics], chronos + +type BrokerContext* = distinct uint32 + +func `==`*(a, b: BrokerContext): bool = + uint32(a) == uint32(b) + +func `!=`*(a, b: BrokerContext): bool = + uint32(a) != uint32(b) + +func `$`*(bc: BrokerContext): string = + toHex(uint32(bc), 8) + +const DefaultBrokerContext* = BrokerContext(0xCAFFE14E'u32) + +# Global broker context accessor. +# +# NOTE: This intentionally creates a *single* active BrokerContext per process +# (per event loop thread). Use only if you accept serialization of all broker +# context usage through the lock. +var globalBrokerContextLock {.threadvar.}: AsyncLock +globalBrokerContextLock = newAsyncLock() +var globalBrokerContextValue {.threadvar.}: BrokerContext +globalBrokerContextValue = DefaultBrokerContext +proc globalBrokerContext*(): BrokerContext = + ## Returns the currently active global broker context. + ## + ## This is intentionally lock-free; callers should use it inside + ## `withNewGlobalBrokerContext` / `withGlobalBrokerContext`. + globalBrokerContextValue + +var gContextCounter: Atomic[uint32] + +proc NewBrokerContext*(): BrokerContext = + var nextId = gContextCounter.fetchAdd(1, moRelaxed) + if nextId == uint32(DefaultBrokerContext): + nextId = gContextCounter.fetchAdd(1, moRelaxed) + return BrokerContext(nextId) + +template lockGlobalBrokerContext*(brokerCtx: BrokerContext, body: untyped): untyped = + ## Runs `body` while holding the global broker context lock with the provided + ## `brokerCtx` installed as the globally accessible context. + ## + ## This template is intended for use from within `chronos` async procs. + block: + await noCancel(globalBrokerContextLock.acquire()) + let previousBrokerCtx = globalBrokerContextValue + globalBrokerContextValue = brokerCtx + try: + body + finally: + globalBrokerContextValue = previousBrokerCtx + try: + globalBrokerContextLock.release() + except AsyncLockError: + doAssert false, "globalBrokerContextLock.release(): lock not held" + +template lockNewGlobalBrokerContext*(body: untyped): untyped = + ## Runs `body` while holding the global broker context lock with a freshly + ## generated broker context installed as the global accessor. + ## + ## The previous global broker context (if any) is restored on exit. + lockGlobalBrokerContext(NewBrokerContext()): + body + +{.pop.} diff --git a/waku/common/broker/event_broker.nim b/waku/common/broker/event_broker.nim new file mode 100644 index 000000000..779689f88 --- /dev/null +++ b/waku/common/broker/event_broker.nim @@ -0,0 +1,412 @@ +## EventBroker +## ------------------- +## EventBroker represents a reactive decoupling pattern, that +## allows event-driven development without +## need for direct dependencies in between emitters and listeners. +## Worth considering using it in a single or many emitters to many listeners scenario. +## +## Generates a standalone, type-safe event broker for the declared type. +## The macro exports the value type itself plus a broker companion that manages +## listeners via thread-local storage. +## +## Type definitions: +## - Inline `object` / `ref object` definitions are supported. +## - Native types, aliases, and externally-defined types are also supported. +## In that case, EventBroker will automatically wrap the declared RHS type in +## `distinct` unless you already used `distinct`. +## This keeps event types unique even when multiple brokers share the same +## underlying base type. +## +## Default vs. context aware use: +## Every generated broker is a thread-local global instance. This means EventBroker +## enables decoupled event exchange threadwise. +## +## Sometimes we use brokers inside a context (e.g. within a component that has many +## modules or subsystems). If you instantiate multiple such components in a single +## thread, and each component must have its own listener set for the same EventBroker +## type, you can use context-aware EventBroker. +## +## Context awareness is supported through the `BrokerContext` argument for +## `listen`, `emit`, `dropListener`, and `dropAllListeners`. +## Listener stores are kept separate per broker context. +## +## Default broker context is defined as `DefaultBrokerContext`. If you don't need +## context awareness, you can keep using the interfaces without the context +## argument, which operate on `DefaultBrokerContext`. +## +## Usage: +## Declare your desired event type inside an `EventBroker` macro, add any number of fields.: +## ```nim +## EventBroker: +## type TypeName = object +## field1*: FieldType +## field2*: AnotherFieldType +## ``` +## +## After this, you can register async listeners anywhere in your code with +## `TypeName.listen(...)`, which returns a handle to the registered listener. +## Listeners are async procs or lambdas that take a single argument of the event type. +## Any number of listeners can be registered in different modules. +## +## Events can be emitted from anywhere with no direct dependency on the listeners by +## calling `TypeName.emit(...)` with an instance of the event type. +## This will asynchronously notify all registered listeners with the emitted event. +## +## Whenever you no longer need a listener (or your object instance that listen to the event goes out of scope), +## you can remove it from the broker with the handle returned by `listen`. +## This is done by calling `TypeName.dropListener(handle)`. +## Alternatively, you can remove all registered listeners through `TypeName.dropAllListeners()`. +## +## +## Example: +## ```nim +## EventBroker: +## type GreetingEvent = object +## text*: string +## +## let handle = GreetingEvent.listen( +## proc(evt: GreetingEvent): Future[void] {.async.} = +## echo evt.text +## ) +## GreetingEvent.emit(text= "hi") +## GreetingEvent.dropListener(handle) +## ``` + +## Example (non-object event type): +## ```nim +## EventBroker: +## type CounterEvent = int # exported as: `distinct int` +## +## discard CounterEvent.listen( +## proc(evt: CounterEvent): Future[void] {.async.} = +## echo int(evt) +## ) +## CounterEvent.emit(CounterEvent(42)) +## ``` + +import std/[macros, tables] +import chronos, chronicles, results +import ./helper/broker_utils, broker_context + +export chronicles, results, chronos, broker_context + +macro EventBroker*(body: untyped): untyped = + when defined(eventBrokerDebug): + echo body.treeRepr + let parsed = parseSingleTypeDef(body, "EventBroker", collectFieldInfo = true) + let typeIdent = parsed.typeIdent + let objectDef = parsed.objectDef + let fieldNames = parsed.fieldNames + let fieldTypes = parsed.fieldTypes + let hasInlineFields = parsed.hasInlineFields + + let exportedTypeIdent = postfix(copyNimTree(typeIdent), "*") + let sanitized = sanitizeIdentName(typeIdent) + let typeNameLit = newLit($typeIdent) + let handlerProcIdent = ident(sanitized & "ListenerProc") + let listenerHandleIdent = ident(sanitized & "Listener") + let brokerTypeIdent = ident(sanitized & "Broker") + let exportedHandlerProcIdent = postfix(copyNimTree(handlerProcIdent), "*") + let exportedListenerHandleIdent = postfix(copyNimTree(listenerHandleIdent), "*") + let exportedBrokerTypeIdent = postfix(copyNimTree(brokerTypeIdent), "*") + let bucketTypeIdent = ident(sanitized & "CtxBucket") + let findBucketIdxIdent = ident(sanitized & "FindBucketIdx") + let getOrCreateBucketIdxIdent = ident(sanitized & "GetOrCreateBucketIdx") + let accessProcIdent = ident("access" & sanitized & "Broker") + let globalVarIdent = ident("g" & sanitized & "Broker") + let listenImplIdent = ident("register" & sanitized & "Listener") + let dropListenerImplIdent = ident("drop" & sanitized & "Listener") + let dropAllListenersImplIdent = ident("dropAll" & sanitized & "Listeners") + let emitImplIdent = ident("emit" & sanitized & "Value") + let listenerTaskIdent = ident("notify" & sanitized & "Listener") + + result = newStmtList() + + result.add( + quote do: + type + `exportedTypeIdent` = `objectDef` + `exportedListenerHandleIdent` = object + id*: uint64 + + `exportedHandlerProcIdent` = + proc(event: `typeIdent`): Future[void] {.async: (raises: []), gcsafe.} + `bucketTypeIdent` = object + brokerCtx: BrokerContext + listeners: Table[uint64, `handlerProcIdent`] + nextId: uint64 + + `exportedBrokerTypeIdent` = ref object + buckets: seq[`bucketTypeIdent`] + + ) + + result.add( + quote do: + var `globalVarIdent` {.threadvar.}: `brokerTypeIdent` + ) + + result.add( + quote do: + proc `accessProcIdent`(): `brokerTypeIdent` = + if `globalVarIdent`.isNil(): + new(`globalVarIdent`) + `globalVarIdent`.buckets = + @[ + `bucketTypeIdent`( + brokerCtx: DefaultBrokerContext, + listeners: initTable[uint64, `handlerProcIdent`](), + nextId: 1'u64, + ) + ] + `globalVarIdent` + + ) + + result.add( + quote do: + proc `findBucketIdxIdent`( + broker: `brokerTypeIdent`, brokerCtx: BrokerContext + ): int = + if brokerCtx == DefaultBrokerContext: + return 0 + for i in 1 ..< broker.buckets.len: + if broker.buckets[i].brokerCtx == brokerCtx: + return i + return -1 + + proc `getOrCreateBucketIdxIdent`( + broker: `brokerTypeIdent`, brokerCtx: BrokerContext + ): int = + let idx = `findBucketIdxIdent`(broker, brokerCtx) + if idx >= 0: + return idx + broker.buckets.add( + `bucketTypeIdent`( + brokerCtx: brokerCtx, + listeners: initTable[uint64, `handlerProcIdent`](), + nextId: 1'u64, + ) + ) + return broker.buckets.high + + proc `listenImplIdent`( + brokerCtx: BrokerContext, handler: `handlerProcIdent` + ): Result[`listenerHandleIdent`, string] = + if handler.isNil(): + return err("Must provide a non-nil event handler") + var broker = `accessProcIdent`() + + let bucketIdx = `getOrCreateBucketIdxIdent`(broker, brokerCtx) + if broker.buckets[bucketIdx].nextId == 0'u64: + broker.buckets[bucketIdx].nextId = 1'u64 + + if broker.buckets[bucketIdx].nextId == high(uint64): + error "Cannot add more listeners: ID space exhausted", + nextId = $broker.buckets[bucketIdx].nextId + return err("Cannot add more listeners, listener ID space exhausted") + + let newId = broker.buckets[bucketIdx].nextId + inc broker.buckets[bucketIdx].nextId + broker.buckets[bucketIdx].listeners[newId] = handler + return ok(`listenerHandleIdent`(id: newId)) + + ) + + result.add( + quote do: + proc `dropListenerImplIdent`( + brokerCtx: BrokerContext, handle: `listenerHandleIdent` + ) = + if handle.id == 0'u64: + return + var broker = `accessProcIdent`() + + let bucketIdx = `findBucketIdxIdent`(broker, brokerCtx) + if bucketIdx < 0: + return + + if broker.buckets[bucketIdx].listeners.len == 0: + return + broker.buckets[bucketIdx].listeners.del(handle.id) + if brokerCtx != DefaultBrokerContext and + broker.buckets[bucketIdx].listeners.len == 0: + broker.buckets.delete(bucketIdx) + + ) + + result.add( + quote do: + proc `dropAllListenersImplIdent`(brokerCtx: BrokerContext) = + var broker = `accessProcIdent`() + + let bucketIdx = `findBucketIdxIdent`(broker, brokerCtx) + if bucketIdx < 0: + return + if broker.buckets[bucketIdx].listeners.len > 0: + broker.buckets[bucketIdx].listeners.clear() + if brokerCtx != DefaultBrokerContext: + broker.buckets.delete(bucketIdx) + + ) + + result.add( + quote do: + proc listen*( + _: typedesc[`typeIdent`], handler: `handlerProcIdent` + ): Result[`listenerHandleIdent`, string] = + return `listenImplIdent`(DefaultBrokerContext, handler) + + proc listen*( + _: typedesc[`typeIdent`], + brokerCtx: BrokerContext, + handler: `handlerProcIdent`, + ): Result[`listenerHandleIdent`, string] = + return `listenImplIdent`(brokerCtx, handler) + + ) + + result.add( + quote do: + proc dropListener*(_: typedesc[`typeIdent`], handle: `listenerHandleIdent`) = + `dropListenerImplIdent`(DefaultBrokerContext, handle) + + proc dropListener*( + _: typedesc[`typeIdent`], + brokerCtx: BrokerContext, + handle: `listenerHandleIdent`, + ) = + `dropListenerImplIdent`(brokerCtx, handle) + + proc dropAllListeners*(_: typedesc[`typeIdent`]) = + `dropAllListenersImplIdent`(DefaultBrokerContext) + + proc dropAllListeners*(_: typedesc[`typeIdent`], brokerCtx: BrokerContext) = + `dropAllListenersImplIdent`(brokerCtx) + + ) + + result.add( + quote do: + proc `listenerTaskIdent`( + callback: `handlerProcIdent`, event: `typeIdent` + ) {.async: (raises: []), gcsafe.} = + if callback.isNil(): + return + try: + await callback(event) + except Exception: + error "Failed to execute event listener", error = getCurrentExceptionMsg() + + proc `emitImplIdent`( + brokerCtx: BrokerContext, event: `typeIdent` + ): Future[void] {.async: (raises: []), gcsafe.} = + when compiles(event.isNil()): + if event.isNil(): + error "Cannot emit uninitialized event object", eventType = `typeNameLit` + return + let broker = `accessProcIdent`() + let bucketIdx = `findBucketIdxIdent`(broker, brokerCtx) + if bucketIdx < 0: + # nothing to do as nobody is listening + return + if broker.buckets[bucketIdx].listeners.len == 0: + return + var callbacks: seq[`handlerProcIdent`] = @[] + for cb in broker.buckets[bucketIdx].listeners.values: + callbacks.add(cb) + for cb in callbacks: + asyncSpawn `listenerTaskIdent`(cb, event) + + proc emit*(event: `typeIdent`) = + asyncSpawn `emitImplIdent`(DefaultBrokerContext, event) + + proc emit*(_: typedesc[`typeIdent`], event: `typeIdent`) = + asyncSpawn `emitImplIdent`(DefaultBrokerContext, event) + + proc emit*( + _: typedesc[`typeIdent`], brokerCtx: BrokerContext, event: `typeIdent` + ) = + asyncSpawn `emitImplIdent`(brokerCtx, event) + + ) + + if hasInlineFields: + # Typedesc emit constructor overloads for inline object/ref object types. + var emitCtorParams = newTree(nnkFormalParams, newEmptyNode()) + let typedescParamType = + newTree(nnkBracketExpr, ident("typedesc"), copyNimTree(typeIdent)) + emitCtorParams.add( + newTree(nnkIdentDefs, ident("_"), typedescParamType, newEmptyNode()) + ) + for i in 0 ..< fieldNames.len: + emitCtorParams.add( + newTree( + nnkIdentDefs, + copyNimTree(fieldNames[i]), + copyNimTree(fieldTypes[i]), + newEmptyNode(), + ) + ) + + var emitCtorExpr = newTree(nnkObjConstr, copyNimTree(typeIdent)) + for i in 0 ..< fieldNames.len: + emitCtorExpr.add( + newTree( + nnkExprColonExpr, copyNimTree(fieldNames[i]), copyNimTree(fieldNames[i]) + ) + ) + + let emitCtorCallDefault = + newCall(copyNimTree(emitImplIdent), ident("DefaultBrokerContext"), emitCtorExpr) + let emitCtorBodyDefault = quote: + asyncSpawn `emitCtorCallDefault` + + let typedescEmitProcDefault = newTree( + nnkProcDef, + postfix(ident("emit"), "*"), + newEmptyNode(), + newEmptyNode(), + emitCtorParams, + newEmptyNode(), + newEmptyNode(), + emitCtorBodyDefault, + ) + result.add(typedescEmitProcDefault) + + var emitCtorParamsCtx = newTree(nnkFormalParams, newEmptyNode()) + emitCtorParamsCtx.add( + newTree(nnkIdentDefs, ident("_"), typedescParamType, newEmptyNode()) + ) + emitCtorParamsCtx.add( + newTree(nnkIdentDefs, ident("brokerCtx"), ident("BrokerContext"), newEmptyNode()) + ) + for i in 0 ..< fieldNames.len: + emitCtorParamsCtx.add( + newTree( + nnkIdentDefs, + copyNimTree(fieldNames[i]), + copyNimTree(fieldTypes[i]), + newEmptyNode(), + ) + ) + + let emitCtorCallCtx = + newCall(copyNimTree(emitImplIdent), ident("brokerCtx"), copyNimTree(emitCtorExpr)) + let emitCtorBodyCtx = quote: + asyncSpawn `emitCtorCallCtx` + + let typedescEmitProcCtx = newTree( + nnkProcDef, + postfix(ident("emit"), "*"), + newEmptyNode(), + newEmptyNode(), + emitCtorParamsCtx, + newEmptyNode(), + newEmptyNode(), + emitCtorBodyCtx, + ) + result.add(typedescEmitProcCtx) + + when defined(eventBrokerDebug): + echo result.repr diff --git a/waku/common/broker/helper/broker_utils.nim b/waku/common/broker/helper/broker_utils.nim new file mode 100644 index 000000000..90f2055d3 --- /dev/null +++ b/waku/common/broker/helper/broker_utils.nim @@ -0,0 +1,206 @@ +import std/macros + +type ParsedBrokerType* = object + ## Result of parsing the single `type` definition inside a broker macro body. + ## + ## - `typeIdent`: base identifier for the declared type name + ## - `objectDef`: exported type definition RHS (inline object fields exported; + ## non-object types wrapped in `distinct` unless already distinct) + ## - `isRefObject`: true only for inline `ref object` definitions + ## - `hasInlineFields`: true for inline `object` / `ref object` + ## - `fieldNames`/`fieldTypes`: populated only when `collectFieldInfo = true` + typeIdent*: NimNode + objectDef*: NimNode + isRefObject*: bool + hasInlineFields*: bool + fieldNames*: seq[NimNode] + fieldTypes*: seq[NimNode] + +proc sanitizeIdentName*(node: NimNode): string = + var raw = $node + var sanitizedName = newStringOfCap(raw.len) + for ch in raw: + case ch + of 'A' .. 'Z', 'a' .. 'z', '0' .. '9', '_': + sanitizedName.add(ch) + else: + sanitizedName.add('_') + sanitizedName + +proc ensureFieldDef*(node: NimNode) = + if node.kind != nnkIdentDefs or node.len < 3: + error("Expected field definition of the form `name: Type`", node) + let typeSlot = node.len - 2 + if node[typeSlot].kind == nnkEmpty: + error("Field `" & $node[0] & "` must declare a type", node) + +proc exportIdentNode*(node: NimNode): NimNode = + case node.kind + of nnkIdent: + postfix(copyNimTree(node), "*") + of nnkPostfix: + node + else: + error("Unsupported identifier form in field definition", node) + +proc baseTypeIdent*(defName: NimNode): NimNode = + case defName.kind + of nnkIdent: + defName + of nnkAccQuoted: + if defName.len != 1: + error("Unsupported quoted identifier", defName) + defName[0] + of nnkPostfix: + baseTypeIdent(defName[1]) + of nnkPragmaExpr: + baseTypeIdent(defName[0]) + else: + error("Unsupported type name in broker definition", defName) + +proc ensureDistinctType*(rhs: NimNode): NimNode = + ## For PODs / aliases / externally-defined types, wrap in `distinct` unless + ## it's already distinct. + if rhs.kind == nnkDistinctTy: + return copyNimTree(rhs) + newTree(nnkDistinctTy, copyNimTree(rhs)) + +proc cloneParams*(params: seq[NimNode]): seq[NimNode] = + ## Deep copy parameter definitions so they can be inserted in multiple places. + result = @[] + for param in params: + result.add(copyNimTree(param)) + +proc collectParamNames*(params: seq[NimNode]): seq[NimNode] = + ## Extract all identifier symbols declared across IdentDefs nodes. + result = @[] + for param in params: + assert param.kind == nnkIdentDefs + for i in 0 ..< param.len - 2: + let nameNode = param[i] + if nameNode.kind == nnkEmpty: + continue + result.add(ident($nameNode)) + +proc parseSingleTypeDef*( + body: NimNode, + macroName: string, + allowRefToNonObject = false, + collectFieldInfo = false, +): ParsedBrokerType = + ## Parses exactly one `type` definition from a broker macro body. + ## + ## Supported RHS: + ## - inline `object` / `ref object` (fields are auto-exported) + ## - non-object types / aliases / externally-defined types (wrapped in `distinct`) + ## - optionally: `ref SomeType` when `allowRefToNonObject = true` + var typeIdent: NimNode = nil + var objectDef: NimNode = nil + var isRefObject = false + var hasInlineFields = false + var fieldNames: seq[NimNode] = @[] + var fieldTypes: seq[NimNode] = @[] + + for stmt in body: + if stmt.kind != nnkTypeSection: + continue + for def in stmt: + if def.kind != nnkTypeDef: + continue + if not typeIdent.isNil(): + error("Only one type may be declared inside " & macroName, def) + typeIdent = baseTypeIdent(def[0]) + let rhs = def[2] + + case rhs.kind + of nnkObjectTy: + let recList = rhs[2] + if recList.kind != nnkRecList: + error(macroName & " object must declare a standard field list", rhs) + var exportedRecList = newTree(nnkRecList) + for field in recList: + case field.kind + of nnkIdentDefs: + ensureFieldDef(field) + if collectFieldInfo: + let fieldTypeNode = field[field.len - 2] + for i in 0 ..< field.len - 2: + let baseFieldIdent = baseTypeIdent(field[i]) + fieldNames.add(copyNimTree(baseFieldIdent)) + fieldTypes.add(copyNimTree(fieldTypeNode)) + var cloned = copyNimTree(field) + for i in 0 ..< cloned.len - 2: + cloned[i] = exportIdentNode(cloned[i]) + exportedRecList.add(cloned) + of nnkEmpty: + discard + else: + error( + macroName & " object definition only supports simple field declarations", + field, + ) + objectDef = newTree( + nnkObjectTy, copyNimTree(rhs[0]), copyNimTree(rhs[1]), exportedRecList + ) + isRefObject = false + hasInlineFields = true + of nnkRefTy: + if rhs.len != 1: + error(macroName & " ref type must have a single base", rhs) + if rhs[0].kind == nnkObjectTy: + let obj = rhs[0] + let recList = obj[2] + if recList.kind != nnkRecList: + error(macroName & " object must declare a standard field list", obj) + var exportedRecList = newTree(nnkRecList) + for field in recList: + case field.kind + of nnkIdentDefs: + ensureFieldDef(field) + if collectFieldInfo: + let fieldTypeNode = field[field.len - 2] + for i in 0 ..< field.len - 2: + let baseFieldIdent = baseTypeIdent(field[i]) + fieldNames.add(copyNimTree(baseFieldIdent)) + fieldTypes.add(copyNimTree(fieldTypeNode)) + var cloned = copyNimTree(field) + for i in 0 ..< cloned.len - 2: + cloned[i] = exportIdentNode(cloned[i]) + exportedRecList.add(cloned) + of nnkEmpty: + discard + else: + error( + macroName & " object definition only supports simple field declarations", + field, + ) + let exportedObjectType = newTree( + nnkObjectTy, copyNimTree(obj[0]), copyNimTree(obj[1]), exportedRecList + ) + objectDef = newTree(nnkRefTy, exportedObjectType) + isRefObject = true + hasInlineFields = true + elif allowRefToNonObject: + ## `ref SomeType` (SomeType can be defined elsewhere) + objectDef = ensureDistinctType(rhs) + isRefObject = false + hasInlineFields = false + else: + error(macroName & " ref object must wrap a concrete object definition", rhs) + else: + ## Non-object type / alias. + objectDef = ensureDistinctType(rhs) + isRefObject = false + hasInlineFields = false + + if typeIdent.isNil(): + error(macroName & " body must declare exactly one type", body) + + result = ParsedBrokerType( + typeIdent: typeIdent, + objectDef: objectDef, + isRefObject: isRefObject, + hasInlineFields: hasInlineFields, + fieldNames: fieldNames, + fieldTypes: fieldTypes, + ) diff --git a/waku/common/broker/multi_request_broker.nim b/waku/common/broker/multi_request_broker.nim new file mode 100644 index 000000000..2baa19940 --- /dev/null +++ b/waku/common/broker/multi_request_broker.nim @@ -0,0 +1,743 @@ +## MultiRequestBroker +## -------------------- +## MultiRequestBroker represents a proactive decoupling pattern, that +## allows defining request-response style interactions between modules without +## need for direct dependencies in between. +## Worth considering using it for use cases where you need to collect data from multiple providers. +## +## Generates a standalone, type-safe request broker for the declared type. +## The macro exports the value type itself plus a broker companion that manages +## providers via thread-local storage. +## +## Unlike `RequestBroker`, every call to `request` fan-outs to every registered +## provider and returns all collected responses. +## The request succeeds only if all providers succeed, otherwise it fails. +## +## Type definitions: +## - Inline `object` / `ref object` definitions are supported. +## - Native types, aliases, and externally-defined types are also supported. +## In that case, MultiRequestBroker will automatically wrap the declared RHS +## type in `distinct` unless you already used `distinct`. +## This keeps request types unique even when multiple brokers share the same +## underlying base type. +## +## Default vs. context aware use: +## Every generated broker is a thread-local global instance. +## Sometimes you want multiple independent provider sets for the same request +## type within the same thread (e.g. multiple components). For that, you can use +## context-aware MultiRequestBroker. +## +## Context awareness is supported through the `BrokerContext` argument for +## `setProvider`, `request`, `removeProvider`, and `clearProviders`. +## Provider stores are kept separate per broker context. +## +## Default broker context is defined as `DefaultBrokerContext`. If you don't +## need context awareness, you can keep using the interfaces without the context +## argument, which operate on `DefaultBrokerContext`. +## +## Usage: +## +## Declare collectable request data type inside a `MultiRequestBroker` macro, add any number of fields: +## ```nim +## MultiRequestBroker: +## type TypeName = object +## field1*: Type1 +## field2*: Type2 +## +## ## Define the request and provider signature, that is enforced at compile time. +## proc signature*(): Future[Result[TypeName, string]] {.async: (raises: []).} +## +## ## Also possible to define signature with arbitrary input arguments. +## proc signature*(arg1: ArgType, arg2: AnotherArgType): Future[Result[TypeName, string]] {.async: (raises: []).} +## +## ``` +## +## You can register a request processor (provider) anywhere without the need to +## know who will request. +## Register provider functions with `TypeName.setProvider(...)`. +## Providers are async procs or lambdas that return `Future[Result[TypeName, string]]`. +## `setProvider` returns a handle (or an error) that can later be used to remove +## the provider. + +## Requests can be made from anywhere with no direct dependency on the provider(s) +## by calling `TypeName.request()` (with arguments respecting the declared signature). +## This will asynchronously call all registered providers and return the collected +## responses as `Future[Result[seq[TypeName], string]]`. +## +## Whenever you don't want to process requests anymore (or your object instance that provides the request goes out of scope), +## you can remove it from the broker with `TypeName.removeProvider(handle)`. +## Alternatively, you can remove all registered providers through `TypeName.clearProviders()`. +## +## Example: +## ```nim +## MultiRequestBroker: +## type Greeting = object +## text*: string +## +## ## Define the request and provider signature, that is enforced at compile time. +## proc signature*(): Future[Result[Greeting, string]] {.async: (raises: []).} +## +## ## Also possible to define signature with arbitrary input arguments. +## proc signature*(lang: string): Future[Result[Greeting, string]] {.async: (raises: []).} +## +## ... +## let handle = Greeting.setProvider( +## proc(): Future[Result[Greeting, string]] {.async: (raises: []).} = +## ok(Greeting(text: "hello")) +## ) +## +## let anotherHandle = Greeting.setProvider( +## proc(): Future[Result[Greeting, string]] {.async: (raises: []).} = +## ok(Greeting(text: "szia")) +## ) +## +## let responses = (await Greeting.request()).valueOr(@[Greeting(text: "default")]) +## +## echo responses.len +## Greeting.clearProviders() +## ``` +## If no `signature` proc is declared, a zero-argument form is generated +## automatically, so the caller only needs to provide the type definition. + +import std/[macros, strutils, tables, sugar] +import chronos +import results +import ./helper/broker_utils +import ./broker_context + +export results, chronos, broker_context + +proc isReturnTypeValid(returnType, typeIdent: NimNode): bool = + ## Accept Future[Result[TypeIdent, string]] as the contract. + if returnType.kind != nnkBracketExpr or returnType.len != 2: + return false + if returnType[0].kind != nnkIdent or not returnType[0].eqIdent("Future"): + return false + let inner = returnType[1] + if inner.kind != nnkBracketExpr or inner.len != 3: + return false + if inner[0].kind != nnkIdent or not inner[0].eqIdent("Result"): + return false + if inner[1].kind != nnkIdent or not inner[1].eqIdent($typeIdent): + return false + inner[2].kind == nnkIdent and inner[2].eqIdent("string") + +proc makeProcType(returnType: NimNode, params: seq[NimNode]): NimNode = + var formal = newTree(nnkFormalParams) + formal.add(returnType) + for param in params: + formal.add(param) + + let pragmas = quote: + {.async.} + + newTree(nnkProcTy, formal, pragmas) + +macro MultiRequestBroker*(body: untyped): untyped = + when defined(requestBrokerDebug): + echo body.treeRepr + let parsed = parseSingleTypeDef(body, "MultiRequestBroker") + let typeIdent = parsed.typeIdent + let objectDef = parsed.objectDef + let isRefObject = parsed.isRefObject + + when defined(requestBrokerDebug): + echo "MultiRequestBroker generating type: ", $typeIdent + + let exportedTypeIdent = postfix(copyNimTree(typeIdent), "*") + let sanitized = sanitizeIdentName(typeIdent) + let typeNameLit = newLit($typeIdent) + let isRefObjectLit = newLit(isRefObject) + let uint64Ident = ident("uint64") + let providerKindIdent = ident(sanitized & "ProviderKind") + let providerHandleIdent = ident(sanitized & "ProviderHandle") + let exportedProviderHandleIdent = postfix(copyNimTree(providerHandleIdent), "*") + let bucketTypeIdent = ident(sanitized & "CtxBucket") + let findBucketIdxIdent = ident(sanitized & "FindBucketIdx") + let getOrCreateBucketIdxIdent = ident(sanitized & "GetOrCreateBucketIdx") + let zeroKindIdent = ident("pk" & sanitized & "NoArgs") + let argKindIdent = ident("pk" & sanitized & "WithArgs") + var zeroArgSig: NimNode = nil + var zeroArgProviderName: NimNode = nil + var zeroArgFieldName: NimNode = nil + var argSig: NimNode = nil + var argParams: seq[NimNode] = @[] + var argProviderName: NimNode = nil + var argFieldName: NimNode = nil + + for stmt in body: + case stmt.kind + of nnkProcDef: + let procName = stmt[0] + let procNameIdent = + case procName.kind + of nnkIdent: + procName + of nnkPostfix: + procName[1] + else: + procName + let procNameStr = $procNameIdent + if not procNameStr.startsWith("signature"): + error("Signature proc names must start with `signature`", procName) + let params = stmt.params + if params.len == 0: + error("Signature must declare a return type", stmt) + let returnType = params[0] + if not isReturnTypeValid(returnType, typeIdent): + error( + "Signature must return Future[Result[`" & $typeIdent & "`, string]]", stmt + ) + let paramCount = params.len - 1 + if paramCount == 0: + if zeroArgSig != nil: + error("Only one zero-argument signature is allowed", stmt) + zeroArgSig = stmt + zeroArgProviderName = ident(sanitizeIdentName(typeIdent) & "ProviderNoArgs") + zeroArgFieldName = ident("providerNoArgs") + elif paramCount >= 1: + if argSig != nil: + error("Only one argument-based signature is allowed", stmt) + argSig = stmt + argParams = @[] + for idx in 1 ..< params.len: + let paramDef = params[idx] + if paramDef.kind != nnkIdentDefs: + error( + "Signature parameter must be a standard identifier declaration", paramDef + ) + let paramTypeNode = paramDef[paramDef.len - 2] + if paramTypeNode.kind == nnkEmpty: + error("Signature parameter must declare a type", paramDef) + var hasName = false + for i in 0 ..< paramDef.len - 2: + if paramDef[i].kind != nnkEmpty: + hasName = true + if not hasName: + error("Signature parameter must declare a name", paramDef) + argParams.add(copyNimTree(paramDef)) + argProviderName = ident(sanitizeIdentName(typeIdent) & "ProviderWithArgs") + argFieldName = ident("providerWithArgs") + of nnkTypeSection, nnkEmpty: + discard + else: + error("Unsupported statement inside MultiRequestBroker definition", stmt) + + if zeroArgSig.isNil() and argSig.isNil(): + zeroArgSig = newEmptyNode() + zeroArgProviderName = ident(sanitizeIdentName(typeIdent) & "ProviderNoArgs") + zeroArgFieldName = ident("providerNoArgs") + + var typeSection = newTree(nnkTypeSection) + typeSection.add(newTree(nnkTypeDef, exportedTypeIdent, newEmptyNode(), objectDef)) + + var kindEnum = newTree(nnkEnumTy, newEmptyNode()) + if not zeroArgSig.isNil(): + kindEnum.add(zeroKindIdent) + if not argSig.isNil(): + kindEnum.add(argKindIdent) + typeSection.add(newTree(nnkTypeDef, providerKindIdent, newEmptyNode(), kindEnum)) + + var handleRecList = newTree(nnkRecList) + handleRecList.add(newTree(nnkIdentDefs, ident("id"), uint64Ident, newEmptyNode())) + handleRecList.add( + newTree(nnkIdentDefs, ident("kind"), providerKindIdent, newEmptyNode()) + ) + typeSection.add( + newTree( + nnkTypeDef, + exportedProviderHandleIdent, + newEmptyNode(), + newTree(nnkObjectTy, newEmptyNode(), newEmptyNode(), handleRecList), + ) + ) + + let returnType = quote: + Future[Result[`typeIdent`, string]] + + if not zeroArgSig.isNil(): + let procType = makeProcType(returnType, @[]) + typeSection.add(newTree(nnkTypeDef, zeroArgProviderName, newEmptyNode(), procType)) + if not argSig.isNil(): + let procType = makeProcType(returnType, cloneParams(argParams)) + typeSection.add(newTree(nnkTypeDef, argProviderName, newEmptyNode(), procType)) + + var bucketRecList = newTree(nnkRecList) + bucketRecList.add( + newTree(nnkIdentDefs, ident("brokerCtx"), ident("BrokerContext"), newEmptyNode()) + ) + if not zeroArgSig.isNil(): + bucketRecList.add( + newTree( + nnkIdentDefs, + zeroArgFieldName, + newTree(nnkBracketExpr, ident("seq"), zeroArgProviderName), + newEmptyNode(), + ) + ) + if not argSig.isNil(): + bucketRecList.add( + newTree( + nnkIdentDefs, + argFieldName, + newTree(nnkBracketExpr, ident("seq"), argProviderName), + newEmptyNode(), + ) + ) + typeSection.add( + newTree( + nnkTypeDef, + bucketTypeIdent, + newEmptyNode(), + newTree(nnkObjectTy, newEmptyNode(), newEmptyNode(), bucketRecList), + ) + ) + + var brokerRecList = newTree(nnkRecList) + brokerRecList.add( + newTree( + nnkIdentDefs, + ident("buckets"), + newTree(nnkBracketExpr, ident("seq"), bucketTypeIdent), + newEmptyNode(), + ) + ) + let brokerTypeIdent = ident(sanitizeIdentName(typeIdent) & "Broker") + typeSection.add( + newTree( + nnkTypeDef, + brokerTypeIdent, + newEmptyNode(), + newTree( + nnkRefTy, newTree(nnkObjectTy, newEmptyNode(), newEmptyNode(), brokerRecList) + ), + ) + ) + result = newStmtList() + result.add(typeSection) + + let globalVarIdent = ident("g" & sanitizeIdentName(typeIdent) & "Broker") + let accessProcIdent = ident("access" & sanitizeIdentName(typeIdent) & "Broker") + result.add( + quote do: + var `globalVarIdent` {.threadvar.}: `brokerTypeIdent` + + proc `findBucketIdxIdent`( + broker: `brokerTypeIdent`, brokerCtx: BrokerContext + ): int = + if brokerCtx == DefaultBrokerContext: + return 0 + for i in 1 ..< broker.buckets.len: + if broker.buckets[i].brokerCtx == brokerCtx: + return i + return -1 + + proc `getOrCreateBucketIdxIdent`( + broker: `brokerTypeIdent`, brokerCtx: BrokerContext + ): int = + let idx = `findBucketIdxIdent`(broker, brokerCtx) + if idx >= 0: + return idx + broker.buckets.add(`bucketTypeIdent`(brokerCtx: brokerCtx)) + return broker.buckets.high + + proc `accessProcIdent`(): `brokerTypeIdent` = + if `globalVarIdent`.isNil(): + new(`globalVarIdent`) + `globalVarIdent`.buckets = + @[`bucketTypeIdent`(brokerCtx: DefaultBrokerContext)] + return `globalVarIdent` + + ) + + var clearBody = newStmtList() + if not zeroArgSig.isNil(): + result.add( + quote do: + proc setProvider*( + _: typedesc[`typeIdent`], + brokerCtx: BrokerContext, + handler: `zeroArgProviderName`, + ): Result[`providerHandleIdent`, string] = + if handler.isNil(): + return err("Provider handler must be provided") + let broker = `accessProcIdent`() + let bucketIdx = `getOrCreateBucketIdxIdent`(broker, brokerCtx) + for i, existing in broker.buckets[bucketIdx].`zeroArgFieldName`: + if not existing.isNil() and existing == handler: + return ok(`providerHandleIdent`(id: uint64(i + 1), kind: `zeroKindIdent`)) + broker.buckets[bucketIdx].`zeroArgFieldName`.add(handler) + return ok( + `providerHandleIdent`( + id: uint64(broker.buckets[bucketIdx].`zeroArgFieldName`.len), + kind: `zeroKindIdent`, + ) + ) + + proc setProvider*( + _: typedesc[`typeIdent`], handler: `zeroArgProviderName` + ): Result[`providerHandleIdent`, string] = + return setProvider(`typeIdent`, DefaultBrokerContext, handler) + + ) + result.add( + quote do: + proc request*( + _: typedesc[`typeIdent`], brokerCtx: BrokerContext + ): Future[Result[seq[`typeIdent`], string]] {.async: (raises: []), gcsafe.} = + var aggregated: seq[`typeIdent`] = @[] + let broker = `accessProcIdent`() + let bucketIdx = `findBucketIdxIdent`(broker, brokerCtx) + if bucketIdx < 0: + return ok(aggregated) + let providers = broker.buckets[bucketIdx].`zeroArgFieldName` + if providers.len == 0: + return ok(aggregated) + # var providersFut: seq[Future[Result[`typeIdent`, string]]] = collect: + var providersFut = collect(newSeq): + for provider in providers: + if provider.isNil(): + continue + provider() + + let catchable = catch: + await allFinished(providersFut) + + catchable.isOkOr: + return err("Some provider(s) failed:" & error.msg) + + for fut in catchable.get(): + if fut.failed(): + return err("Some provider(s) failed:" & fut.error.msg) + elif fut.finished(): + let providerResult = fut.value() + if providerResult.isOk: + let providerValue = providerResult.get() + when `isRefObjectLit`: + if providerValue.isNil(): + return err( + "MultiRequestBroker(" & `typeNameLit` & + "): provider returned nil result" + ) + aggregated.add(providerValue) + else: + return err("Some provider(s) failed:" & providerResult.error) + + return ok(aggregated) + + proc request*( + _: typedesc[`typeIdent`] + ): Future[Result[seq[`typeIdent`], string]] = + return request(`typeIdent`, DefaultBrokerContext) + + ) + if not argSig.isNil(): + result.add( + quote do: + proc setProvider*( + _: typedesc[`typeIdent`], + brokerCtx: BrokerContext, + handler: `argProviderName`, + ): Result[`providerHandleIdent`, string] = + if handler.isNil(): + return err("Provider handler must be provided") + let broker = `accessProcIdent`() + let bucketIdx = `getOrCreateBucketIdxIdent`(broker, brokerCtx) + for i, existing in broker.buckets[bucketIdx].`argFieldName`: + if not existing.isNil() and existing == handler: + return ok(`providerHandleIdent`(id: uint64(i + 1), kind: `argKindIdent`)) + broker.buckets[bucketIdx].`argFieldName`.add(handler) + return ok( + `providerHandleIdent`( + id: uint64(broker.buckets[bucketIdx].`argFieldName`.len), + kind: `argKindIdent`, + ) + ) + + proc setProvider*( + _: typedesc[`typeIdent`], handler: `argProviderName` + ): Result[`providerHandleIdent`, string] = + return setProvider(`typeIdent`, DefaultBrokerContext, handler) + + ) + let requestParamDefs = cloneParams(argParams) + let argNameIdents = collectParamNames(requestParamDefs) + let providerSym = genSym(nskLet, "providerVal") + var providerCall = newCall(providerSym) + for argName in argNameIdents: + providerCall.add(argName) + var formalParams = newTree(nnkFormalParams) + formalParams.add( + quote do: + Future[Result[seq[`typeIdent`], string]] + ) + formalParams.add( + newTree( + nnkIdentDefs, + ident("_"), + newTree(nnkBracketExpr, ident("typedesc"), copyNimTree(typeIdent)), + newEmptyNode(), + ) + ) + formalParams.add( + newTree(nnkIdentDefs, ident("brokerCtx"), ident("BrokerContext"), newEmptyNode()) + ) + for paramDef in requestParamDefs: + formalParams.add(paramDef) + let requestPragmas = quote: + {.async: (raises: []), gcsafe.} + let requestBody = quote: + var aggregated: seq[`typeIdent`] = @[] + let broker = `accessProcIdent`() + let bucketIdx = `findBucketIdxIdent`(broker, brokerCtx) + if bucketIdx < 0: + return ok(aggregated) + let providers = broker.buckets[bucketIdx].`argFieldName` + if providers.len == 0: + return ok(aggregated) + var providersFut = collect(newSeq): + for provider in providers: + if provider.isNil(): + continue + let `providerSym` = provider + `providerCall` + let catchable = catch: + await allFinished(providersFut) + catchable.isOkOr: + return err("Some provider(s) failed:" & error.msg) + for fut in catchable.get(): + if fut.failed(): + return err("Some provider(s) failed:" & fut.error.msg) + elif fut.finished(): + let providerResult = fut.value() + if providerResult.isOk: + let providerValue = providerResult.get() + when `isRefObjectLit`: + if providerValue.isNil(): + return err( + "MultiRequestBroker(" & `typeNameLit` & + "): provider returned nil result" + ) + aggregated.add(providerValue) + else: + return err("Some provider(s) failed:" & providerResult.error) + return ok(aggregated) + + result.add( + newTree( + nnkProcDef, + postfix(ident("request"), "*"), + newEmptyNode(), + newEmptyNode(), + formalParams, + requestPragmas, + newEmptyNode(), + requestBody, + ) + ) + + # Backward-compatible default-context overload (no brokerCtx parameter). + var formalParamsDefault = newTree(nnkFormalParams) + formalParamsDefault.add( + quote do: + Future[Result[seq[`typeIdent`], string]] + ) + formalParamsDefault.add( + newTree( + nnkIdentDefs, + ident("_"), + newTree(nnkBracketExpr, ident("typedesc"), copyNimTree(typeIdent)), + newEmptyNode(), + ) + ) + for paramDef in requestParamDefs: + formalParamsDefault.add(copyNimTree(paramDef)) + + var wrapperCall = newCall(ident("request")) + wrapperCall.add(copyNimTree(typeIdent)) + wrapperCall.add(ident("DefaultBrokerContext")) + for argName in argNameIdents: + wrapperCall.add(copyNimTree(argName)) + + result.add( + newTree( + nnkProcDef, + postfix(ident("request"), "*"), + newEmptyNode(), + newEmptyNode(), + formalParamsDefault, + newEmptyNode(), + newEmptyNode(), + newStmtList(newTree(nnkReturnStmt, wrapperCall)), + ) + ) + let removeHandleCtxSym = genSym(nskParam, "handle") + let removeHandleDefaultSym = genSym(nskParam, "handle") + + when true: + # Generate clearProviders / removeProvider with macro-time knowledge about which + # provider lists exist (zero-arg and/or arg providers). + if not zeroArgSig.isNil() and not argSig.isNil(): + result.add( + quote do: + proc clearProviders*(_: typedesc[`typeIdent`], brokerCtx: BrokerContext) = + let broker = `accessProcIdent`() + if broker.isNil(): + return + let bucketIdx = `findBucketIdxIdent`(broker, brokerCtx) + if bucketIdx < 0: + return + broker.buckets[bucketIdx].`zeroArgFieldName`.setLen(0) + broker.buckets[bucketIdx].`argFieldName`.setLen(0) + if brokerCtx != DefaultBrokerContext: + broker.buckets.delete(bucketIdx) + + proc clearProviders*(_: typedesc[`typeIdent`]) = + clearProviders(`typeIdent`, DefaultBrokerContext) + + proc removeProvider*( + _: typedesc[`typeIdent`], + brokerCtx: BrokerContext, + `removeHandleCtxSym`: `providerHandleIdent`, + ) = + if `removeHandleCtxSym`.id == 0'u64: + return + let broker = `accessProcIdent`() + if broker.isNil(): + return + let bucketIdx = `findBucketIdxIdent`(broker, brokerCtx) + if bucketIdx < 0: + return + + if `removeHandleCtxSym`.kind == `zeroKindIdent`: + let idx = int(`removeHandleCtxSym`.id) - 1 + if idx >= 0 and idx < broker.buckets[bucketIdx].`zeroArgFieldName`.len: + broker.buckets[bucketIdx].`zeroArgFieldName`[idx] = nil + elif `removeHandleCtxSym`.kind == `argKindIdent`: + let idx = int(`removeHandleCtxSym`.id) - 1 + if idx >= 0 and idx < broker.buckets[bucketIdx].`argFieldName`.len: + broker.buckets[bucketIdx].`argFieldName`[idx] = nil + + if brokerCtx != DefaultBrokerContext: + var hasAny = false + for p in broker.buckets[bucketIdx].`zeroArgFieldName`: + if not p.isNil(): + hasAny = true + break + if not hasAny: + for p in broker.buckets[bucketIdx].`argFieldName`: + if not p.isNil(): + hasAny = true + break + if not hasAny: + broker.buckets.delete(bucketIdx) + + proc removeProvider*( + _: typedesc[`typeIdent`], `removeHandleDefaultSym`: `providerHandleIdent` + ) = + removeProvider(`typeIdent`, DefaultBrokerContext, `removeHandleDefaultSym`) + + ) + elif not zeroArgSig.isNil(): + result.add( + quote do: + proc clearProviders*(_: typedesc[`typeIdent`], brokerCtx: BrokerContext) = + let broker = `accessProcIdent`() + if broker.isNil(): + return + let bucketIdx = `findBucketIdxIdent`(broker, brokerCtx) + if bucketIdx < 0: + return + broker.buckets[bucketIdx].`zeroArgFieldName`.setLen(0) + if brokerCtx != DefaultBrokerContext: + broker.buckets.delete(bucketIdx) + + proc clearProviders*(_: typedesc[`typeIdent`]) = + clearProviders(`typeIdent`, DefaultBrokerContext) + + proc removeProvider*( + _: typedesc[`typeIdent`], + brokerCtx: BrokerContext, + `removeHandleCtxSym`: `providerHandleIdent`, + ) = + if `removeHandleCtxSym`.id == 0'u64: + return + let broker = `accessProcIdent`() + if broker.isNil(): + return + let bucketIdx = `findBucketIdxIdent`(broker, brokerCtx) + if bucketIdx < 0: + return + if `removeHandleCtxSym`.kind != `zeroKindIdent`: + return + let idx = int(`removeHandleCtxSym`.id) - 1 + if idx >= 0 and idx < broker.buckets[bucketIdx].`zeroArgFieldName`.len: + broker.buckets[bucketIdx].`zeroArgFieldName`[idx] = nil + if brokerCtx != DefaultBrokerContext: + var hasAny = false + for p in broker.buckets[bucketIdx].`zeroArgFieldName`: + if not p.isNil(): + hasAny = true + break + if not hasAny: + broker.buckets.delete(bucketIdx) + + proc removeProvider*( + _: typedesc[`typeIdent`], `removeHandleDefaultSym`: `providerHandleIdent` + ) = + removeProvider(`typeIdent`, DefaultBrokerContext, `removeHandleDefaultSym`) + + ) + else: + result.add( + quote do: + proc clearProviders*(_: typedesc[`typeIdent`], brokerCtx: BrokerContext) = + let broker = `accessProcIdent`() + if broker.isNil(): + return + let bucketIdx = `findBucketIdxIdent`(broker, brokerCtx) + if bucketIdx < 0: + return + broker.buckets[bucketIdx].`argFieldName`.setLen(0) + if brokerCtx != DefaultBrokerContext: + broker.buckets.delete(bucketIdx) + + proc clearProviders*(_: typedesc[`typeIdent`]) = + clearProviders(`typeIdent`, DefaultBrokerContext) + + proc removeProvider*( + _: typedesc[`typeIdent`], + brokerCtx: BrokerContext, + `removeHandleCtxSym`: `providerHandleIdent`, + ) = + if `removeHandleCtxSym`.id == 0'u64: + return + let broker = `accessProcIdent`() + if broker.isNil(): + return + let bucketIdx = `findBucketIdxIdent`(broker, brokerCtx) + if bucketIdx < 0: + return + if `removeHandleCtxSym`.kind != `argKindIdent`: + return + let idx = int(`removeHandleCtxSym`.id) - 1 + if idx >= 0 and idx < broker.buckets[bucketIdx].`argFieldName`.len: + broker.buckets[bucketIdx].`argFieldName`[idx] = nil + if brokerCtx != DefaultBrokerContext: + var hasAny = false + for p in broker.buckets[bucketIdx].`argFieldName`: + if not p.isNil(): + hasAny = true + break + if not hasAny: + broker.buckets.delete(bucketIdx) + + proc removeProvider*( + _: typedesc[`typeIdent`], `removeHandleDefaultSym`: `providerHandleIdent` + ) = + removeProvider(`typeIdent`, DefaultBrokerContext, `removeHandleDefaultSym`) + + ) + + when defined(requestBrokerDebug): + echo result.repr diff --git a/waku/common/broker/request_broker.nim b/waku/common/broker/request_broker.nim new file mode 100644 index 000000000..46f7d7d16 --- /dev/null +++ b/waku/common/broker/request_broker.nim @@ -0,0 +1,841 @@ +## RequestBroker +## -------------------- +## RequestBroker represents a proactive decoupling pattern, that +## allows defining request-response style interactions between modules without +## need for direct dependencies in between. +## Worth considering using it in a single provider, many requester scenario. +## +## Provides a declarative way to define an immutable value type together with a +## thread-local broker that can register an asynchronous or synchronous provider, +## dispatch typed requests and clear provider. +## +## For consideration use `sync` mode RequestBroker when you need to provide simple value(s) +## where there is no long-running async operation involved. +## Typically it act as a accessor for the local state of generic setting. +## +## `async` mode is better to be used when you request date that may involve some long IO operation +## or action. +## +## Default vs. context aware use: +## Every generated broker is a thread-local global instance. This means each RequestBroker enables decoupled +## data exchange threadwise. Sometimes we use brokers inside a context - like inside a component that has many modules or subsystems. +## In case you would instantiate multiple such components in a single thread, and each component must has its own provider for the same RequestBroker type, +## in order to avoid provider collision, you can use context aware RequestBroker. +## Context awareness is supported through the `BrokerContext` argument for `setProvider`, `request`, `clearProvider` interfaces. +## Suce use requires generating a new unique `BrokerContext` value per component instance, and spread it to all modules using the brokers. +## Example, store the `BrokerContext` as a field inside the top level component instance, and spread around at initialization of the subcomponents.. +## +## Default broker context is defined as `DefaultBrokerContext` constant. But if you don't need context awareness, you can use the +## interfaces without context argument. +## +## Usage: +## Declare your desired request type inside a `RequestBroker` macro, add any number of fields. +## Define the provider signature, that is enforced at compile time. +## +## ```nim +## RequestBroker: +## type TypeName = object +## field1*: FieldType +## field2*: AnotherFieldType +## +## proc signature*(): Future[Result[TypeName, string]] +## ## Also possible to define signature with arbitrary input arguments. +## proc signature*(arg1: ArgType, arg2: AnotherArgType): Future[Result[TypeName, string]] +## +## ``` +## +## Sync mode (no `async` / `Future`) can be generated with: +## +## ```nim +## RequestBroker(sync): +## type TypeName = object +## field1*: FieldType +## +## proc signature*(): Result[TypeName, string] +## proc signature*(arg1: ArgType): Result[TypeName, string] +## ``` +## +## Note: When the request type is declared as a native type / alias / externally-defined +## type (i.e. not an inline `object` / `ref object` definition), RequestBroker +## will wrap it in `distinct` automatically unless you already used `distinct`. +## This avoids overload ambiguity when multiple brokers share the same +## underlying base type (Nim overload resolution does not consider return type). +## +## This means that for non-object request types you typically: +## - construct values with an explicit cast/constructor, e.g. `MyType("x")` +## - unwrap with a cast when needed, e.g. `string(myVal)` or `BaseType(myVal)` +## +## Example (native response type): +## ```nim +## RequestBroker(sync): +## type MyCount = int # exported as: `distinct int` +## +## MyCount.setProvider(proc(): Result[MyCount, string] = ok(MyCount(42))) +## let res = MyCount.request() +## if res.isOk(): +## let raw = int(res.get()) +## ``` +## +## Example (externally-defined type): +## ```nim +## type External = object +## label*: string +## +## RequestBroker: +## type MyExternal = External # exported as: `distinct External` +## +## MyExternal.setProvider( +## proc(): Future[Result[MyExternal, string]] {.async.} = +## ok(MyExternal(External(label: "hi"))) +## ) +## let res = await MyExternal.request() +## if res.isOk(): +## let base = External(res.get()) +## echo base.label +## ``` +## The 'TypeName' object defines the requestable data (but also can be seen as request for action with return value). +## The 'signature' proc defines the provider(s) signature, that is enforced at compile time. +## One signature can be with no arguments, another with any number of arguments - where the input arguments are +## not related to the request type - but alternative inputs for the request to be processed. +## +## After this, you can register a provider anywhere in your code with +## `TypeName.setProvider(...)`, which returns error if already having a provider. +## Providers are async procs/lambdas in default mode and sync procs in sync mode. +## +## Providers are stored as a broker-context keyed list: +## - the default provider is always stored at index 0 (reserved broker context: 0) +## - additional providers can be registered under arbitrary non-zero broker contexts +## +## The original `setProvider(handler)` / `request(...)` APIs continue to operate +## on the default provider (broker context 0) for backward compatibility. +## +## Requests can be made from anywhere with no direct dependency on the provider by +## calling `TypeName.request()` - with arguments respecting the signature(s). +## In async mode, this returns a Future[Result[TypeName, string]]. In sync mode, it returns Result[TypeName, string]. +## +## Whenever you no want to process requests (or your object instance that provides the request goes out of scope), +## you can remove it from the broker with `TypeName.clearProvider()`. +## +## +## Example: +## ```nim +## RequestBroker: +## type Greeting = object +## text*: string +## +## ## Define the request and provider signature, that is enforced at compile time. +## proc signature*(): Future[Result[Greeting, string]] {.async.} +## +## ## Also possible to define signature with arbitrary input arguments. +## proc signature*(lang: string): Future[Result[Greeting, string]] {.async.} +## +## ... +## Greeting.setProvider( +## proc(): Future[Result[Greeting, string]] {.async.} = +## ok(Greeting(text: "hello")) +## ) +## let res = await Greeting.request() +## +## +## ... +## # using native type as response for a synchronous request. +## RequestBroker(sync): +## type NeedThatInfo = string +## +##... +## NeedThatInfo.setProvider( +## proc(): Result[NeedThatInfo, string] = +## ok("this is the info you wanted") +## ) +## let res = NeedThatInfo.request().valueOr: +## echo "not ok due to: " & error +## NeedThatInfo(":-(") +## +## echo string(res) +## ``` +## If no `signature` proc is declared, a zero-argument form is generated +## automatically, so the caller only needs to provide the type definition. + +import std/[macros, strutils] +from std/sequtils import keepItIf +import chronos +import results +import ./helper/broker_utils, broker_context + +export results, chronos, keepItIf, broker_context + +proc errorFuture[T](message: string): Future[Result[T, string]] {.inline.} = + ## Build a future that is already completed with an error result. + let fut = newFuture[Result[T, string]]("request_broker.errorFuture") + fut.complete(err(Result[T, string], message)) + fut + +type RequestBrokerMode = enum + rbAsync + rbSync + +proc isAsyncReturnTypeValid(returnType, typeIdent: NimNode): bool = + ## Accept Future[Result[TypeIdent, string]] as the contract. + if returnType.kind != nnkBracketExpr or returnType.len != 2: + return false + if returnType[0].kind != nnkIdent or not returnType[0].eqIdent("Future"): + return false + let inner = returnType[1] + if inner.kind != nnkBracketExpr or inner.len != 3: + return false + if inner[0].kind != nnkIdent or not inner[0].eqIdent("Result"): + return false + if inner[1].kind != nnkIdent or not inner[1].eqIdent($typeIdent): + return false + inner[2].kind == nnkIdent and inner[2].eqIdent("string") + +proc isSyncReturnTypeValid(returnType, typeIdent: NimNode): bool = + ## Accept Result[TypeIdent, string] as the contract. + if returnType.kind != nnkBracketExpr or returnType.len != 3: + return false + if returnType[0].kind != nnkIdent or not returnType[0].eqIdent("Result"): + return false + if returnType[1].kind != nnkIdent or not returnType[1].eqIdent($typeIdent): + return false + returnType[2].kind == nnkIdent and returnType[2].eqIdent("string") + +proc isReturnTypeValid(returnType, typeIdent: NimNode, mode: RequestBrokerMode): bool = + case mode + of rbAsync: + isAsyncReturnTypeValid(returnType, typeIdent) + of rbSync: + isSyncReturnTypeValid(returnType, typeIdent) + +proc makeProcType( + returnType: NimNode, params: seq[NimNode], mode: RequestBrokerMode +): NimNode = + var formal = newTree(nnkFormalParams) + formal.add(returnType) + for param in params: + formal.add(param) + case mode + of rbAsync: + let pragmas = newTree(nnkPragma, ident("async")) + newTree(nnkProcTy, formal, pragmas) + of rbSync: + let raisesPragma = newTree( + nnkExprColonExpr, ident("raises"), newTree(nnkBracket, ident("CatchableError")) + ) + let pragmas = newTree(nnkPragma, raisesPragma, ident("gcsafe")) + newTree(nnkProcTy, formal, pragmas) + +proc parseMode(modeNode: NimNode): RequestBrokerMode = + ## Parses the mode selector for the 2-argument macro overload. + ## Supported spellings: `sync` / `async` (case-insensitive). + let raw = ($modeNode).strip().toLowerAscii() + case raw + of "sync": + rbSync + of "async": + rbAsync + else: + error("RequestBroker mode must be `sync` or `async` (default is async)", modeNode) + +proc generateRequestBroker(body: NimNode, mode: RequestBrokerMode): NimNode = + when defined(requestBrokerDebug): + echo body.treeRepr + echo "RequestBroker mode: ", $mode + let parsed = parseSingleTypeDef(body, "RequestBroker", allowRefToNonObject = true) + let typeIdent = parsed.typeIdent + let objectDef = parsed.objectDef + + when defined(requestBrokerDebug): + echo "RequestBroker generating type: ", $typeIdent + + let exportedTypeIdent = postfix(copyNimTree(typeIdent), "*") + let typeDisplayName = sanitizeIdentName(typeIdent) + let typeNameLit = newLit(typeDisplayName) + var zeroArgSig: NimNode = nil + var zeroArgProviderName: NimNode = nil + var argSig: NimNode = nil + var argParams: seq[NimNode] = @[] + var argProviderName: NimNode = nil + + for stmt in body: + case stmt.kind + of nnkProcDef: + let procName = stmt[0] + let procNameIdent = + case procName.kind + of nnkIdent: + procName + of nnkPostfix: + procName[1] + else: + procName + let procNameStr = $procNameIdent + if not procNameStr.startsWith("signature"): + error("Signature proc names must start with `signature`", procName) + let params = stmt.params + if params.len == 0: + error("Signature must declare a return type", stmt) + let returnType = params[0] + if not isReturnTypeValid(returnType, typeIdent, mode): + case mode + of rbAsync: + error( + "Signature must return Future[Result[`" & $typeIdent & "`, string]]", stmt + ) + of rbSync: + error("Signature must return Result[`" & $typeIdent & "`, string]", stmt) + let paramCount = params.len - 1 + if paramCount == 0: + if zeroArgSig != nil: + error("Only one zero-argument signature is allowed", stmt) + zeroArgSig = stmt + zeroArgProviderName = ident(sanitizeIdentName(typeIdent) & "ProviderNoArgs") + elif paramCount >= 1: + if argSig != nil: + error("Only one argument-based signature is allowed", stmt) + argSig = stmt + argParams = @[] + for idx in 1 ..< params.len: + let paramDef = params[idx] + if paramDef.kind != nnkIdentDefs: + error( + "Signature parameter must be a standard identifier declaration", paramDef + ) + let paramTypeNode = paramDef[paramDef.len - 2] + if paramTypeNode.kind == nnkEmpty: + error("Signature parameter must declare a type", paramDef) + var hasName = false + for i in 0 ..< paramDef.len - 2: + if paramDef[i].kind != nnkEmpty: + hasName = true + if not hasName: + error("Signature parameter must declare a name", paramDef) + argParams.add(copyNimTree(paramDef)) + argProviderName = ident(sanitizeIdentName(typeIdent) & "ProviderWithArgs") + of nnkTypeSection, nnkEmpty: + discard + else: + error("Unsupported statement inside RequestBroker definition", stmt) + + if zeroArgSig.isNil() and argSig.isNil(): + zeroArgSig = newEmptyNode() + zeroArgProviderName = ident(sanitizeIdentName(typeIdent) & "ProviderNoArgs") + + var typeSection = newTree(nnkTypeSection) + typeSection.add(newTree(nnkTypeDef, exportedTypeIdent, newEmptyNode(), objectDef)) + + let returnType = + case mode + of rbAsync: + quote: + Future[Result[`typeIdent`, string]] + of rbSync: + quote: + Result[`typeIdent`, string] + + if not zeroArgSig.isNil(): + let procType = makeProcType(returnType, @[], mode) + typeSection.add(newTree(nnkTypeDef, zeroArgProviderName, newEmptyNode(), procType)) + if not argSig.isNil(): + let procType = makeProcType(returnType, cloneParams(argParams), mode) + typeSection.add(newTree(nnkTypeDef, argProviderName, newEmptyNode(), procType)) + + var brokerRecList = newTree(nnkRecList) + if not zeroArgSig.isNil(): + let zeroArgProvidersFieldName = ident("providersNoArgs") + let zeroArgProvidersTupleTy = newTree( + nnkTupleTy, + newTree(nnkIdentDefs, ident("brokerCtx"), ident("BrokerContext"), newEmptyNode()), + newTree(nnkIdentDefs, ident("handler"), zeroArgProviderName, newEmptyNode()), + ) + let zeroArgProvidersSeqTy = + newTree(nnkBracketExpr, ident("seq"), zeroArgProvidersTupleTy) + brokerRecList.add( + newTree( + nnkIdentDefs, zeroArgProvidersFieldName, zeroArgProvidersSeqTy, newEmptyNode() + ) + ) + if not argSig.isNil(): + let argProvidersFieldName = ident("providersWithArgs") + let argProvidersTupleTy = newTree( + nnkTupleTy, + newTree(nnkIdentDefs, ident("brokerCtx"), ident("BrokerContext"), newEmptyNode()), + newTree(nnkIdentDefs, ident("handler"), argProviderName, newEmptyNode()), + ) + let argProvidersSeqTy = newTree(nnkBracketExpr, ident("seq"), argProvidersTupleTy) + brokerRecList.add( + newTree(nnkIdentDefs, argProvidersFieldName, argProvidersSeqTy, newEmptyNode()) + ) + let brokerTypeIdent = ident(sanitizeIdentName(typeIdent) & "Broker") + let brokerTypeDef = newTree( + nnkTypeDef, + brokerTypeIdent, + newEmptyNode(), + newTree(nnkObjectTy, newEmptyNode(), newEmptyNode(), brokerRecList), + ) + typeSection.add(brokerTypeDef) + result = newStmtList() + result.add(typeSection) + + let globalVarIdent = ident("g" & sanitizeIdentName(typeIdent) & "Broker") + let accessProcIdent = ident("access" & sanitizeIdentName(typeIdent) & "Broker") + + var brokerNewBody = newStmtList() + if not zeroArgSig.isNil(): + brokerNewBody.add( + quote do: + result.providersNoArgs = + @[(brokerCtx: DefaultBrokerContext, handler: default(`zeroArgProviderName`))] + ) + if not argSig.isNil(): + brokerNewBody.add( + quote do: + result.providersWithArgs = + @[(brokerCtx: DefaultBrokerContext, handler: default(`argProviderName`))] + ) + + var brokerInitChecks = newStmtList() + if not zeroArgSig.isNil(): + brokerInitChecks.add( + quote do: + if `globalVarIdent`.providersNoArgs.len == 0: + `globalVarIdent` = `brokerTypeIdent`.new() + ) + if not argSig.isNil(): + brokerInitChecks.add( + quote do: + if `globalVarIdent`.providersWithArgs.len == 0: + `globalVarIdent` = `brokerTypeIdent`.new() + ) + + result.add( + quote do: + var `globalVarIdent` {.threadvar.}: `brokerTypeIdent` + + proc new(_: type `brokerTypeIdent`): `brokerTypeIdent` = + result = `brokerTypeIdent`() + `brokerNewBody` + + proc `accessProcIdent`(): var `brokerTypeIdent` = + `brokerInitChecks` + `globalVarIdent` + + ) + + var clearBodyKeyed = newStmtList() + let brokerCtxParamIdent = ident("brokerCtx") + if not zeroArgSig.isNil(): + let zeroArgProvidersFieldName = ident("providersNoArgs") + result.add( + quote do: + proc setProvider*( + _: typedesc[`typeIdent`], handler: `zeroArgProviderName` + ): Result[void, string] = + if not `accessProcIdent`().`zeroArgProvidersFieldName`[0].handler.isNil(): + return err("Zero-arg provider already set") + `accessProcIdent`().`zeroArgProvidersFieldName`[0].handler = handler + return ok() + + ) + + result.add( + quote do: + proc setProvider*( + _: typedesc[`typeIdent`], + brokerCtx: BrokerContext, + handler: `zeroArgProviderName`, + ): Result[void, string] = + if brokerCtx == DefaultBrokerContext: + return setProvider(`typeIdent`, handler) + + for entry in `accessProcIdent`().`zeroArgProvidersFieldName`: + if entry.brokerCtx == brokerCtx: + return err( + "RequestBroker(" & `typeNameLit` & + "): provider already set for broker context " & $brokerCtx + ) + + `accessProcIdent`().`zeroArgProvidersFieldName`.add( + (brokerCtx: brokerCtx, handler: handler) + ) + return ok() + + ) + clearBodyKeyed.add( + quote do: + if `brokerCtxParamIdent` == DefaultBrokerContext: + `accessProcIdent`().`zeroArgProvidersFieldName`[0].handler = + default(`zeroArgProviderName`) + else: + `accessProcIdent`().`zeroArgProvidersFieldName`.keepItIf( + it.brokerCtx != `brokerCtxParamIdent` + ) + ) + case mode + of rbAsync: + result.add( + quote do: + proc request*( + _: typedesc[`typeIdent`] + ): Future[Result[`typeIdent`, string]] {.async: (raises: []).} = + return await request(`typeIdent`, DefaultBrokerContext) + + ) + + result.add( + quote do: + proc request*( + _: typedesc[`typeIdent`], brokerCtx: BrokerContext + ): Future[Result[`typeIdent`, string]] {.async: (raises: []).} = + var provider: `zeroArgProviderName` + if brokerCtx == DefaultBrokerContext: + provider = `accessProcIdent`().`zeroArgProvidersFieldName`[0].handler + else: + for entry in `accessProcIdent`().`zeroArgProvidersFieldName`: + if entry.brokerCtx == brokerCtx: + provider = entry.handler + break + + if provider.isNil(): + if brokerCtx == DefaultBrokerContext: + return err( + "RequestBroker(" & `typeNameLit` & "): no zero-arg provider registered" + ) + return err( + "RequestBroker(" & `typeNameLit` & + "): no provider registered for broker context " & $brokerCtx + ) + + let catchedRes = catch: + await provider() + + if catchedRes.isErr(): + return err( + "RequestBroker(" & `typeNameLit` & "): provider threw exception: " & + catchedRes.error.msg + ) + + let providerRes = catchedRes.get() + if providerRes.isOk(): + let resultValue = providerRes.get() + when compiles(resultValue.isNil()): + if resultValue.isNil(): + return err( + "RequestBroker(" & `typeNameLit` & "): provider returned nil result" + ) + return providerRes + + ) + of rbSync: + result.add( + quote do: + proc request*( + _: typedesc[`typeIdent`] + ): Result[`typeIdent`, string] {.gcsafe, raises: [].} = + return request(`typeIdent`, DefaultBrokerContext) + + ) + + result.add( + quote do: + proc request*( + _: typedesc[`typeIdent`], brokerCtx: BrokerContext + ): Result[`typeIdent`, string] {.gcsafe, raises: [].} = + var provider: `zeroArgProviderName` + if brokerCtx == DefaultBrokerContext: + provider = `accessProcIdent`().`zeroArgProvidersFieldName`[0].handler + else: + for entry in `accessProcIdent`().`zeroArgProvidersFieldName`: + if entry.brokerCtx == brokerCtx: + provider = entry.handler + break + + if provider.isNil(): + if brokerCtx == DefaultBrokerContext: + return err( + "RequestBroker(" & `typeNameLit` & "): no zero-arg provider registered" + ) + return err( + "RequestBroker(" & `typeNameLit` & + "): no provider registered for broker context " & $brokerCtx + ) + + var providerRes: Result[`typeIdent`, string] + try: + providerRes = provider() + except CatchableError as e: + return err( + "RequestBroker(" & `typeNameLit` & "): provider threw exception: " & + e.msg + ) + + if providerRes.isOk(): + let resultValue = providerRes.get() + when compiles(resultValue.isNil()): + if resultValue.isNil(): + return err( + "RequestBroker(" & `typeNameLit` & "): provider returned nil result" + ) + return providerRes + + ) + if not argSig.isNil(): + let argProvidersFieldName = ident("providersWithArgs") + result.add( + quote do: + proc setProvider*( + _: typedesc[`typeIdent`], handler: `argProviderName` + ): Result[void, string] = + if not `accessProcIdent`().`argProvidersFieldName`[0].handler.isNil(): + return err("Provider already set") + `accessProcIdent`().`argProvidersFieldName`[0].handler = handler + return ok() + + ) + + result.add( + quote do: + proc setProvider*( + _: typedesc[`typeIdent`], + brokerCtx: BrokerContext, + handler: `argProviderName`, + ): Result[void, string] = + if brokerCtx == DefaultBrokerContext: + return setProvider(`typeIdent`, handler) + + for entry in `accessProcIdent`().`argProvidersFieldName`: + if entry.brokerCtx == brokerCtx: + return err( + "RequestBroker(" & `typeNameLit` & + "): provider already set for broker context " & $brokerCtx + ) + + `accessProcIdent`().`argProvidersFieldName`.add( + (brokerCtx: brokerCtx, handler: handler) + ) + return ok() + + ) + clearBodyKeyed.add( + quote do: + if `brokerCtxParamIdent` == DefaultBrokerContext: + `accessProcIdent`().`argProvidersFieldName`[0].handler = + default(`argProviderName`) + else: + `accessProcIdent`().`argProvidersFieldName`.keepItIf( + it.brokerCtx != `brokerCtxParamIdent` + ) + ) + let requestParamDefs = cloneParams(argParams) + let argNameIdents = collectParamNames(requestParamDefs) + var formalParams = newTree(nnkFormalParams) + formalParams.add(copyNimTree(returnType)) + formalParams.add( + newTree( + nnkIdentDefs, + ident("_"), + newTree(nnkBracketExpr, ident("typedesc"), copyNimTree(typeIdent)), + newEmptyNode(), + ) + ) + for paramDef in requestParamDefs: + formalParams.add(paramDef) + + let requestPragmas = + case mode + of rbAsync: + quote: + {.async: (raises: []).} + of rbSync: + quote: + {.gcsafe, raises: [].} + + var forwardCall = newCall(ident("request")) + forwardCall.add(copyNimTree(typeIdent)) + forwardCall.add(ident("DefaultBrokerContext")) + for argName in argNameIdents: + forwardCall.add(argName) + + var requestBody = newStmtList() + case mode + of rbAsync: + requestBody.add( + quote do: + return await `forwardCall` + ) + of rbSync: + requestBody.add( + quote do: + return `forwardCall` + ) + + result.add( + newTree( + nnkProcDef, + postfix(ident("request"), "*"), + newEmptyNode(), + newEmptyNode(), + formalParams, + requestPragmas, + newEmptyNode(), + requestBody, + ) + ) + + # Keyed request variant for the argument-based signature. + let requestParamDefsKeyed = cloneParams(argParams) + let argNameIdentsKeyed = collectParamNames(requestParamDefsKeyed) + let providerSymKeyed = genSym(nskVar, "provider") + var formalParamsKeyed = newTree(nnkFormalParams) + formalParamsKeyed.add(copyNimTree(returnType)) + formalParamsKeyed.add( + newTree( + nnkIdentDefs, + ident("_"), + newTree(nnkBracketExpr, ident("typedesc"), copyNimTree(typeIdent)), + newEmptyNode(), + ) + ) + formalParamsKeyed.add( + newTree(nnkIdentDefs, ident("brokerCtx"), ident("BrokerContext"), newEmptyNode()) + ) + for paramDef in requestParamDefsKeyed: + formalParamsKeyed.add(paramDef) + + let requestPragmasKeyed = requestPragmas + var providerCallKeyed = newCall(providerSymKeyed) + for argName in argNameIdentsKeyed: + providerCallKeyed.add(argName) + + var requestBodyKeyed = newStmtList() + requestBodyKeyed.add( + quote do: + var `providerSymKeyed`: `argProviderName` + if brokerCtx == DefaultBrokerContext: + `providerSymKeyed` = `accessProcIdent`().`argProvidersFieldName`[0].handler + else: + for entry in `accessProcIdent`().`argProvidersFieldName`: + if entry.brokerCtx == brokerCtx: + `providerSymKeyed` = entry.handler + break + ) + requestBodyKeyed.add( + quote do: + if `providerSymKeyed`.isNil(): + if brokerCtx == DefaultBrokerContext: + return err( + "RequestBroker(" & `typeNameLit` & + "): no provider registered for input signature" + ) + return err( + "RequestBroker(" & `typeNameLit` & + "): no provider registered for broker context " & $brokerCtx + ) + ) + + case mode + of rbAsync: + requestBodyKeyed.add( + quote do: + let catchedRes = catch: + await `providerCallKeyed` + if catchedRes.isErr(): + return err( + "RequestBroker(" & `typeNameLit` & "): provider threw exception: " & + catchedRes.error.msg + ) + + let providerRes = catchedRes.get() + if providerRes.isOk(): + let resultValue = providerRes.get() + when compiles(resultValue.isNil()): + if resultValue.isNil(): + return err( + "RequestBroker(" & `typeNameLit` & "): provider returned nil result" + ) + return providerRes + ) + of rbSync: + requestBodyKeyed.add( + quote do: + var providerRes: Result[`typeIdent`, string] + try: + providerRes = `providerCallKeyed` + except CatchableError as e: + return err( + "RequestBroker(" & `typeNameLit` & "): provider threw exception: " & e.msg + ) + + if providerRes.isOk(): + let resultValue = providerRes.get() + when compiles(resultValue.isNil()): + if resultValue.isNil(): + return err( + "RequestBroker(" & `typeNameLit` & "): provider returned nil result" + ) + return providerRes + ) + + result.add( + newTree( + nnkProcDef, + postfix(ident("request"), "*"), + newEmptyNode(), + newEmptyNode(), + formalParamsKeyed, + requestPragmasKeyed, + newEmptyNode(), + requestBodyKeyed, + ) + ) + + block: + var formalParamsClearKeyed = newTree(nnkFormalParams) + formalParamsClearKeyed.add(newEmptyNode()) + formalParamsClearKeyed.add( + newTree( + nnkIdentDefs, + ident("_"), + newTree(nnkBracketExpr, ident("typedesc"), copyNimTree(typeIdent)), + newEmptyNode(), + ) + ) + formalParamsClearKeyed.add( + newTree(nnkIdentDefs, brokerCtxParamIdent, ident("BrokerContext"), newEmptyNode()) + ) + + result.add( + newTree( + nnkProcDef, + postfix(ident("clearProvider"), "*"), + newEmptyNode(), + newEmptyNode(), + formalParamsClearKeyed, + newEmptyNode(), + newEmptyNode(), + clearBodyKeyed, + ) + ) + + result.add( + quote do: + proc clearProvider*(_: typedesc[`typeIdent`]) = + clearProvider(`typeIdent`, DefaultBrokerContext) + + ) + + when defined(requestBrokerDebug): + echo result.repr + + return result + +macro RequestBroker*(body: untyped): untyped = + ## Default (async) mode. + generateRequestBroker(body, rbAsync) + +macro RequestBroker*(mode: untyped, body: untyped): untyped = + ## Explicit mode selector. + ## Example: + ## RequestBroker(sync): + ## type Foo = object + ## proc signature*(): Result[Foo, string] + generateRequestBroker(body, parseMode(mode)) diff --git a/waku/common/callbacks.nim b/waku/common/callbacks.nim index 9b8590152..83209ef24 100644 --- a/waku/common/callbacks.nim +++ b/waku/common/callbacks.nim @@ -1,5 +1,7 @@ -import ../waku_enr/capabilities +import waku/waku_enr/capabilities, waku/waku_rendezvous/waku_peer_record type GetShards* = proc(): seq[uint16] {.closure, gcsafe, raises: [].} type GetCapabilities* = proc(): seq[Capabilities] {.closure, gcsafe, raises: [].} + +type GetWakuPeerRecord* = proc(): WakuPeerRecord {.closure, gcsafe, raises: [].} diff --git a/waku/common/rate_limit/per_peer_limiter.nim b/waku/common/rate_limit/per_peer_limiter.nim index 5cb96a2d1..16b6bf065 100644 --- a/waku/common/rate_limit/per_peer_limiter.nim +++ b/waku/common/rate_limit/per_peer_limiter.nim @@ -20,7 +20,7 @@ proc mgetOrPut( perPeerRateLimiter: var PerPeerRateLimiter, peerId: PeerId ): var Option[TokenBucket] = return perPeerRateLimiter.peerBucket.mgetOrPut( - peerId, newTokenBucket(perPeerRateLimiter.setting, ReplenishMode.Compensating) + peerId, newTokenBucket(perPeerRateLimiter.setting, ReplenishMode.Continuous) ) template checkUsageLimit*( diff --git a/waku/common/rate_limit/request_limiter.nim b/waku/common/rate_limit/request_limiter.nim index 0ede20be4..bc318e151 100644 --- a/waku/common/rate_limit/request_limiter.nim +++ b/waku/common/rate_limit/request_limiter.nim @@ -39,38 +39,82 @@ const SECONDS_RATIO = 3 const MINUTES_RATIO = 2 type RequestRateLimiter* = ref object of RootObj - tokenBucket: Option[TokenBucket] + tokenBucket: TokenBucket setting*: Option[RateLimitSetting] + mainBucketSetting: RateLimitSetting + ratio: int peerBucketSetting*: RateLimitSetting peerUsage: TimedMap[PeerId, TokenBucket] + checkUsageImpl: proc( + t: var RequestRateLimiter, proto: string, conn: Connection, now: Moment + ): bool {.gcsafe, raises: [].} + +proc newMainTokenBucket( + setting: RateLimitSetting, ratio: int, startTime: Moment +): TokenBucket = + ## RequestRateLimiter's global bucket should keep the *rate* of the configured + ## setting while allowing a larger burst window. We achieve this by scaling + ## both capacity and fillDuration by the same ratio. + ## + ## This matches previous behavior where unused tokens could effectively + ## accumulate across multiple periods. + let burstCapacity = setting.volume * ratio + var bucket = TokenBucket.new( + capacity = burstCapacity, + fillDuration = setting.period * ratio, + startTime = startTime, + mode = Continuous, + ) + + # Start with the configured volume (not the burst capacity) so that the + # initial burst behavior matches the raw setting, while still allowing + # accumulation up to `burstCapacity` over time. + let excess = burstCapacity - setting.volume + if excess > 0: + discard bucket.tryConsume(excess, startTime) + + return bucket proc mgetOrPut( - requestRateLimiter: var RequestRateLimiter, peerId: PeerId + requestRateLimiter: var RequestRateLimiter, peerId: PeerId, now: Moment ): var TokenBucket = - let bucketForNew = newTokenBucket(some(requestRateLimiter.peerBucketSetting)).valueOr: + let bucketForNew = newTokenBucket( + some(requestRateLimiter.peerBucketSetting), Discrete, now + ).valueOr: raiseAssert "This branch is not allowed to be reached as it will not be called if the setting is None." return requestRateLimiter.peerUsage.mgetOrPut(peerId, bucketForNew) -proc checkUsage*( - t: var RequestRateLimiter, proto: string, conn: Connection, now = Moment.now() -): bool {.raises: [].} = - if t.tokenBucket.isNone(): - return true +proc checkUsageUnlimited( + t: var RequestRateLimiter, proto: string, conn: Connection, now: Moment +): bool {.gcsafe, raises: [].} = + true - let peerBucket = t.mgetOrPut(conn.peerId) +proc checkUsageLimited( + t: var RequestRateLimiter, proto: string, conn: Connection, now: Moment +): bool {.gcsafe, raises: [].} = + # Lazy-init the main bucket using the first observed request time. This makes + # refill behavior deterministic under tests where `now` is controlled. + if isNil(t.tokenBucket): + t.tokenBucket = newMainTokenBucket(t.mainBucketSetting, t.ratio, now) + + let peerBucket = t.mgetOrPut(conn.peerId, now) ## check requesting peer's usage is not over the calculated ratio and let that peer go which not requested much/or this time... if not peerBucket.tryConsume(1, now): trace "peer usage limit reached", peer = conn.peerId return false # Ok if the peer can consume, check the overall budget we have left - let tokenBucket = t.tokenBucket.get() - if not tokenBucket.tryConsume(1, now): + if not t.tokenBucket.tryConsume(1, now): return false return true +proc checkUsage*( + t: var RequestRateLimiter, proto: string, conn: Connection, now = Moment.now() +): bool {.raises: [].} = + t.checkUsageImpl(t, proto, conn, now) + template checkUsageLimit*( t: var RequestRateLimiter, proto: string, @@ -135,9 +179,19 @@ func calcPeerTokenSetting( proc newRequestRateLimiter*(setting: Option[RateLimitSetting]): RequestRateLimiter = let ratio = calcPeriodRatio(setting) + let isLimited = setting.isSome() and not setting.get().isUnlimited() + let mainBucketSetting = + if isLimited: + setting.get() + else: + (0, 0.minutes) + return RequestRateLimiter( - tokenBucket: newTokenBucket(setting), + tokenBucket: nil, setting: setting, + mainBucketSetting: mainBucketSetting, + ratio: ratio, peerBucketSetting: calcPeerTokenSetting(setting, ratio), peerUsage: init(TimedMap[PeerId, TokenBucket], calcCacheTimeout(setting, ratio)), + checkUsageImpl: (if isLimited: checkUsageLimited else: checkUsageUnlimited), ) diff --git a/waku/common/rate_limit/single_token_limiter.nim b/waku/common/rate_limit/single_token_limiter.nim index 50fb2d64c..fc4b0acd5 100644 --- a/waku/common/rate_limit/single_token_limiter.nim +++ b/waku/common/rate_limit/single_token_limiter.nim @@ -6,12 +6,15 @@ import std/[options], chronos/timer, libp2p/stream/connection, libp2p/utility import std/times except TimeInterval, Duration -import ./[token_bucket, setting, service_metrics] +import chronos/ratelimit as token_bucket + +import ./[setting, service_metrics] export token_bucket, setting, service_metrics proc newTokenBucket*( setting: Option[RateLimitSetting], - replenishMode: ReplenishMode = ReplenishMode.Compensating, + replenishMode: static[ReplenishMode] = ReplenishMode.Continuous, + startTime: Moment = Moment.now(), ): Option[TokenBucket] = if setting.isNone(): return none[TokenBucket]() @@ -19,7 +22,14 @@ proc newTokenBucket*( if setting.get().isUnlimited(): return none[TokenBucket]() - return some(TokenBucket.new(setting.get().volume, setting.get().period)) + return some( + TokenBucket.new( + capacity = setting.get().volume, + fillDuration = setting.get().period, + startTime = startTime, + mode = replenishMode, + ) + ) proc checkUsage( t: var TokenBucket, proto: string, now = Moment.now() diff --git a/waku/common/rate_limit/token_bucket.nim b/waku/common/rate_limit/token_bucket.nim deleted file mode 100644 index 799817ebd..000000000 --- a/waku/common/rate_limit/token_bucket.nim +++ /dev/null @@ -1,182 +0,0 @@ -{.push raises: [].} - -import chronos, std/math, std/options - -const BUDGET_COMPENSATION_LIMIT_PERCENT = 0.25 - -## This is an extract from chronos/rate_limit.nim due to the found bug in the original implementation. -## Unfortunately that bug cannot be solved without harm the original features of TokenBucket class. -## So, this current shortcut is used to enable move ahead with nwaku rate limiter implementation. -## ref: https://github.com/status-im/nim-chronos/issues/500 -## -## This version of TokenBucket is different from the original one in chronos/rate_limit.nim in many ways: -## - It has a new mode called `Compensating` which is the default mode. -## Compensation is calculated as the not used bucket capacity in the last measured period(s) in average. -## or up until maximum the allowed compansation treshold (Currently it is const 25%). -## Also compensation takes care of the proper time period calculation to avoid non-usage periods that can lead to -## overcompensation. -## - Strict mode is also available which will only replenish when time period is over but also will fill -## the bucket to the max capacity. - -type - ReplenishMode* = enum - Strict - Compensating - - TokenBucket* = ref object - budget: int ## Current number of tokens in the bucket - budgetCap: int ## Bucket capacity - lastTimeFull: Moment - ## This timer measures the proper periodizaiton of the bucket refilling - fillDuration: Duration ## Refill period - case replenishMode*: ReplenishMode - of Strict: - ## In strict mode, the bucket is refilled only till the budgetCap - discard - of Compensating: - ## This is the default mode. - maxCompensation: float - -func periodDistance(bucket: TokenBucket, currentTime: Moment): float = - ## notice fillDuration cannot be zero by design - ## period distance is a float number representing the calculated period time - ## since the last time bucket was refilled. - return - nanoseconds(currentTime - bucket.lastTimeFull).float / - nanoseconds(bucket.fillDuration).float - -func getUsageAverageSince(bucket: TokenBucket, distance: float): float = - if distance == 0.float: - ## in case there is zero time difference than the usage percentage is 100% - return 1.0 - - ## budgetCap can never be zero - ## usage average is calculated as a percentage of total capacity available over - ## the measured period - return bucket.budget.float / bucket.budgetCap.float / distance - -proc calcCompensation(bucket: TokenBucket, averageUsage: float): int = - # if we already fully used or even overused the tokens, there is no place for compensation - if averageUsage >= 1.0: - return 0 - - ## compensation is the not used bucket capacity in the last measured period(s) in average. - ## or maximum the allowed compansation treshold - let compensationPercent = - min((1.0 - averageUsage) * bucket.budgetCap.float, bucket.maxCompensation) - return trunc(compensationPercent).int - -func periodElapsed(bucket: TokenBucket, currentTime: Moment): bool = - return currentTime - bucket.lastTimeFull >= bucket.fillDuration - -## Update will take place if bucket is empty and trying to consume tokens. -## It checks if the bucket can be replenished as refill duration is passed or not. -## - strict mode: -proc updateStrict(bucket: TokenBucket, currentTime: Moment) = - if bucket.fillDuration == default(Duration): - bucket.budget = min(bucket.budgetCap, bucket.budget) - return - - if not periodElapsed(bucket, currentTime): - return - - bucket.budget = bucket.budgetCap - bucket.lastTimeFull = currentTime - -## - compensating - ballancing load: -## - between updates we calculate average load (current bucket capacity / number of periods till last update) -## - gives the percentage load used recently -## - with this we can replenish bucket up to 100% + calculated leftover from previous period (caped with max treshold) -proc updateWithCompensation(bucket: TokenBucket, currentTime: Moment) = - if bucket.fillDuration == default(Duration): - bucket.budget = min(bucket.budgetCap, bucket.budget) - return - - # do not replenish within the same period - if not periodElapsed(bucket, currentTime): - return - - let distance = bucket.periodDistance(currentTime) - let recentAvgUsage = bucket.getUsageAverageSince(distance) - let compensation = bucket.calcCompensation(recentAvgUsage) - - bucket.budget = bucket.budgetCap + compensation - bucket.lastTimeFull = currentTime - -proc update(bucket: TokenBucket, currentTime: Moment) = - if bucket.replenishMode == ReplenishMode.Compensating: - updateWithCompensation(bucket, currentTime) - else: - updateStrict(bucket, currentTime) - -proc tryConsume*(bucket: TokenBucket, tokens: int, now = Moment.now()): bool = - ## If `tokens` are available, consume them, - ## Otherwhise, return false. - - if bucket.budget >= bucket.budgetCap: - bucket.lastTimeFull = now - - if bucket.budget >= tokens: - bucket.budget -= tokens - return true - - bucket.update(now) - - if bucket.budget >= tokens: - bucket.budget -= tokens - return true - else: - return false - -proc replenish*(bucket: TokenBucket, tokens: int, now = Moment.now()) = - ## Add `tokens` to the budget (capped to the bucket capacity) - bucket.budget += tokens - bucket.update(now) - -proc new*( - T: type[TokenBucket], - budgetCap: int, - fillDuration: Duration = 1.seconds, - mode: ReplenishMode = ReplenishMode.Compensating, -): T = - assert not isZero(fillDuration) - assert budgetCap != 0 - - ## Create different mode TokenBucket - case mode - of ReplenishMode.Strict: - return T( - budget: budgetCap, - budgetCap: budgetCap, - fillDuration: fillDuration, - lastTimeFull: Moment.now(), - replenishMode: mode, - ) - of ReplenishMode.Compensating: - T( - budget: budgetCap, - budgetCap: budgetCap, - fillDuration: fillDuration, - lastTimeFull: Moment.now(), - replenishMode: mode, - maxCompensation: budgetCap.float * BUDGET_COMPENSATION_LIMIT_PERCENT, - ) - -proc newStrict*(T: type[TokenBucket], capacity: int, period: Duration): TokenBucket = - T.new(capacity, period, ReplenishMode.Strict) - -proc newCompensating*( - T: type[TokenBucket], capacity: int, period: Duration -): TokenBucket = - T.new(capacity, period, ReplenishMode.Compensating) - -func `$`*(b: TokenBucket): string {.inline.} = - if isNil(b): - return "nil" - return $b.budgetCap & "/" & $b.fillDuration - -func `$`*(ob: Option[TokenBucket]): string {.inline.} = - if ob.isNone(): - return "no-limit" - - return $ob.get() diff --git a/waku/common/waku_protocol.nim b/waku/common/waku_protocol.nim new file mode 100644 index 000000000..5063f4c98 --- /dev/null +++ b/waku/common/waku_protocol.nim @@ -0,0 +1,24 @@ +{.push raises: [].} + +type WakuProtocol* {.pure.} = enum + RelayProtocol = "Relay" + RlnRelayProtocol = "Rln Relay" + StoreProtocol = "Store" + LegacyStoreProtocol = "Legacy Store" + FilterProtocol = "Filter" + LightpushProtocol = "Lightpush" + LegacyLightpushProtocol = "Legacy Lightpush" + PeerExchangeProtocol = "Peer Exchange" + RendezvousProtocol = "Rendezvous" + MixProtocol = "Mix" + StoreClientProtocol = "Store Client" + LegacyStoreClientProtocol = "Legacy Store Client" + FilterClientProtocol = "Filter Client" + LightpushClientProtocol = "Lightpush Client" + LegacyLightpushClientProtocol = "Legacy Lightpush Client" + +const + RelayProtocols* = {RelayProtocol} + StoreClientProtocols* = {StoreClientProtocol, LegacyStoreClientProtocol} + LightpushClientProtocols* = {LightpushClientProtocol, LegacyLightpushClientProtocol} + FilterClientProtocols* = {FilterClientProtocol} diff --git a/waku/discovery/waku_kademlia.nim b/waku/discovery/waku_kademlia.nim new file mode 100644 index 000000000..94b63a321 --- /dev/null +++ b/waku/discovery/waku_kademlia.nim @@ -0,0 +1,280 @@ +{.push raises: [].} + +import std/[options, sequtils] +import + chronos, + chronicles, + results, + stew/byteutils, + libp2p/[peerid, multiaddress, switch], + libp2p/extended_peer_record, + libp2p/crypto/curve25519, + libp2p/protocols/[kademlia, kad_disco], + libp2p/protocols/kademlia_discovery/types as kad_types, + libp2p/protocols/mix/mix_protocol + +import waku/waku_core, waku/node/peer_manager + +logScope: + topics = "waku extended kademlia discovery" + +const + DefaultExtendedKademliaDiscoveryInterval* = chronos.seconds(5) + ExtendedKademliaDiscoveryStartupDelay* = chronos.seconds(5) + +type + MixNodePoolSizeProvider* = proc(): int {.gcsafe, raises: [].} + NodeStartedProvider* = proc(): bool {.gcsafe, raises: [].} + + ExtendedKademliaDiscoveryParams* = object + bootstrapNodes*: seq[(PeerId, seq[MultiAddress])] + mixPubKey*: Option[Curve25519Key] + advertiseMix*: bool = false + + WakuKademlia* = ref object + protocol*: KademliaDiscovery + peerManager: PeerManager + discoveryLoop: Future[void] + running*: bool + getMixNodePoolSize: MixNodePoolSizeProvider + isNodeStarted: NodeStartedProvider + +proc new*( + T: type WakuKademlia, + switch: Switch, + params: ExtendedKademliaDiscoveryParams, + peerManager: PeerManager, + getMixNodePoolSize: MixNodePoolSizeProvider = nil, + isNodeStarted: NodeStartedProvider = nil, +): Result[T, string] = + if params.bootstrapNodes.len == 0: + info "creating kademlia discovery as seed node (no bootstrap nodes)" + + let kademlia = KademliaDiscovery.new( + switch, + bootstrapNodes = params.bootstrapNodes, + config = KadDHTConfig.new( + validator = kad_types.ExtEntryValidator(), selector = kad_types.ExtEntrySelector() + ), + codec = ExtendedKademliaDiscoveryCodec, + ) + + try: + switch.mount(kademlia) + except CatchableError: + return err("failed to mount kademlia discovery: " & getCurrentExceptionMsg()) + + # Register services BEFORE starting kademlia so they are included in the + # initial self-signed peer record published to the DHT + if params.advertiseMix: + if params.mixPubKey.isSome(): + let alreadyAdvertising = kademlia.startAdvertising( + ServiceInfo(id: MixProtocolID, data: @(params.mixPubKey.get())) + ) + if alreadyAdvertising: + warn "mix service was already being advertised" + debug "extended kademlia advertising mix service", + keyHex = byteutils.toHex(params.mixPubKey.get()), + bootstrapNodes = params.bootstrapNodes.len + else: + warn "mix advertising enabled but no key provided" + + info "kademlia discovery created", + bootstrapNodes = params.bootstrapNodes.len, advertiseMix = params.advertiseMix + + return ok( + WakuKademlia( + protocol: kademlia, + peerManager: peerManager, + running: false, + getMixNodePoolSize: getMixNodePoolSize, + isNodeStarted: isNodeStarted, + ) + ) + +proc extractMixPubKey(service: ServiceInfo): Option[Curve25519Key] = + if service.id != MixProtocolID: + trace "service is not mix protocol", + serviceId = service.id, mixProtocolId = MixProtocolID + return none(Curve25519Key) + + if service.data.len != Curve25519KeySize: + warn "invalid mix pub key length from kademlia record", + expected = Curve25519KeySize, + actual = service.data.len, + dataHex = byteutils.toHex(service.data) + return none(Curve25519Key) + + debug "found mix protocol service", + dataLen = service.data.len, expectedLen = Curve25519KeySize + + let key = intoCurve25519Key(service.data) + debug "successfully extracted mix pub key", keyHex = byteutils.toHex(key) + return some(key) + +proc remotePeerInfoFrom(record: ExtendedPeerRecord): Option[RemotePeerInfo] = + debug "processing kademlia record", + peerId = record.peerId, + numAddresses = record.addresses.len, + numServices = record.services.len, + serviceIds = record.services.mapIt(it.id) + + if record.addresses.len == 0: + trace "kademlia record missing addresses", peerId = record.peerId + return none(RemotePeerInfo) + + let addrs = record.addresses.mapIt(it.address) + if addrs.len == 0: + trace "kademlia record produced no dialable addresses", peerId = record.peerId + return none(RemotePeerInfo) + + let protocols = record.services.mapIt(it.id) + + var mixPubKey = none(Curve25519Key) + for service in record.services: + debug "checking service", + peerId = record.peerId, serviceId = service.id, dataLen = service.data.len + mixPubKey = extractMixPubKey(service) + if mixPubKey.isSome(): + debug "extracted mix public key from service", peerId = record.peerId + break + + if record.services.len > 0 and mixPubKey.isNone(): + debug "record has services but no valid mix key", + peerId = record.peerId, services = record.services.mapIt(it.id) + return none(RemotePeerInfo) + return some( + RemotePeerInfo.init( + record.peerId, + addrs = addrs, + protocols = protocols, + origin = PeerOrigin.Kademlia, + mixPubKey = mixPubKey, + ) + ) + +proc lookupMixPeers*( + wk: WakuKademlia +): Future[Result[int, string]] {.async: (raises: []).} = + ## Lookup mix peers via kademlia and add them to the peer store. + ## Returns the number of mix peers found and added. + if wk.protocol.isNil(): + return err("cannot lookup mix peers: kademlia not mounted") + + let mixService = ServiceInfo(id: MixProtocolID, data: @[]) + var records: seq[ExtendedPeerRecord] + try: + records = await wk.protocol.lookup(mixService) + except CatchableError: + return err("mix peer lookup failed: " & getCurrentExceptionMsg()) + + debug "mix peer lookup returned records", numRecords = records.len + + var added = 0 + for record in records: + let peerOpt = remotePeerInfoFrom(record) + if peerOpt.isNone(): + continue + + let peerInfo = peerOpt.get() + if peerInfo.mixPubKey.isNone(): + continue + + wk.peerManager.addPeer(peerInfo, PeerOrigin.Kademlia) + info "mix peer added via kademlia lookup", + peerId = $peerInfo.peerId, mixPubKey = byteutils.toHex(peerInfo.mixPubKey.get()) + added.inc() + + info "mix peer lookup complete", found = added + return ok(added) + +proc runDiscoveryLoop( + wk: WakuKademlia, interval: Duration, minMixPeers: int +) {.async: (raises: []).} = + info "extended kademlia discovery loop started", interval = interval + + try: + while true: + # Wait for node to be started + if not wk.isNodeStarted.isNil() and not wk.isNodeStarted(): + await sleepAsync(ExtendedKademliaDiscoveryStartupDelay) + continue + + var records: seq[ExtendedPeerRecord] + try: + records = await wk.protocol.randomRecords() + except CatchableError as e: + warn "extended kademlia discovery failed", error = e.msg + await sleepAsync(interval) + continue + + debug "received random records from kademlia", numRecords = records.len + + var added = 0 + for record in records: + let peerOpt = remotePeerInfoFrom(record) + if peerOpt.isNone(): + continue + + let peerInfo = peerOpt.get() + wk.peerManager.addPeer(peerInfo, PeerOrigin.Kademlia) + debug "peer added via extended kademlia discovery", + peerId = $peerInfo.peerId, + addresses = peerInfo.addrs.mapIt($it), + protocols = peerInfo.protocols, + hasMixPubKey = peerInfo.mixPubKey.isSome() + added.inc() + + if added > 0: + info "added peers from extended kademlia discovery", count = added + + # Targeted mix peer lookup when pool is low + if minMixPeers > 0 and not wk.getMixNodePoolSize.isNil() and + wk.getMixNodePoolSize() < minMixPeers: + debug "mix node pool below threshold, performing targeted lookup", + currentPoolSize = wk.getMixNodePoolSize(), threshold = minMixPeers + let found = (await wk.lookupMixPeers()).valueOr: + warn "targeted mix peer lookup failed", error = error + 0 + if found > 0: + info "found mix peers via targeted kademlia lookup", count = found + + await sleepAsync(interval) + except CancelledError as e: + debug "extended kademlia discovery loop cancelled", error = e.msg + except CatchableError as e: + error "extended kademlia discovery loop failed", error = e.msg + +proc start*( + wk: WakuKademlia, + interval: Duration = DefaultExtendedKademliaDiscoveryInterval, + minMixPeers: int = 0, +): Future[Result[void, string]] {.async: (raises: []).} = + if wk.running: + return err("already running") + + try: + await wk.protocol.start() + except CatchableError as e: + return err("failed to start kademlia discovery: " & e.msg) + + wk.discoveryLoop = wk.runDiscoveryLoop(interval, minMixPeers) + + info "kademlia discovery started" + return ok() + +proc stop*(wk: WakuKademlia) {.async: (raises: []).} = + if not wk.running: + return + + info "Stopping kademlia discovery" + + wk.running = false + + if not wk.discoveryLoop.isNil(): + await wk.discoveryLoop.cancelAndWait() + wk.discoveryLoop = nil + + if not wk.protocol.isNil(): + await wk.protocol.stop() + info "Successfully stopped kademlia discovery" diff --git a/waku/events/delivery_events.nim b/waku/events/delivery_events.nim new file mode 100644 index 000000000..f8eb0f48d --- /dev/null +++ b/waku/events/delivery_events.nim @@ -0,0 +1,27 @@ +import waku/waku_core/[message/message, message/digest], waku/common/broker/event_broker + +type DeliveryDirection* {.pure.} = enum + PUBLISHING + RECEIVING + +type DeliverySuccess* {.pure.} = enum + SUCCESSFUL + UNSUCCESSFUL + +EventBroker: + type DeliveryFeedbackEvent* = ref object + success*: DeliverySuccess + dir*: DeliveryDirection + comment*: string + msgHash*: WakuMessageHash + msg*: WakuMessage + +EventBroker: + type OnFilterSubscribeEvent* = object + pubsubTopic*: string + contentTopics*: seq[string] + +EventBroker: + type OnFilterUnSubscribeEvent* = object + pubsubTopic*: string + contentTopics*: seq[string] diff --git a/waku/events/events.nim b/waku/events/events.nim new file mode 100644 index 000000000..46dd4fdd3 --- /dev/null +++ b/waku/events/events.nim @@ -0,0 +1,3 @@ +import ./[message_events, delivery_events, health_events, peer_events] + +export message_events, delivery_events, health_events, peer_events diff --git a/waku/events/health_events.nim b/waku/events/health_events.nim new file mode 100644 index 000000000..1e6decedb --- /dev/null +++ b/waku/events/health_events.nim @@ -0,0 +1,27 @@ +import waku/common/broker/event_broker + +import waku/api/types +import waku/node/health_monitor/[protocol_health, topic_health] +import waku/waku_core/topics + +export protocol_health, topic_health + +# Notify health changes to node connectivity +EventBroker: + type EventConnectionStatusChange* = object + connectionStatus*: ConnectionStatus + +# Notify health changes to a subscribed topic +# TODO: emit content topic health change events when subscribe/unsubscribe +# from/to content topic is provided in the new API (so we know which +# content topics are of interest to the application) +EventBroker: + type EventContentTopicHealthChange* = object + contentTopic*: ContentTopic + health*: TopicHealth + +# Notify health changes to a shard (pubsub topic) +EventBroker: + type EventShardTopicHealthChange* = object + topic*: PubsubTopic + health*: TopicHealth diff --git a/waku/events/message_events.nim b/waku/events/message_events.nim new file mode 100644 index 000000000..677a4a433 --- /dev/null +++ b/waku/events/message_events.nim @@ -0,0 +1,34 @@ +import waku/[api/types, waku_core/message, waku_core/topics, common/broker/event_broker] + +export types + +EventBroker: + # Event emitted when a message is sent to the network + type MessageSentEvent* = object + requestId*: RequestId + messageHash*: string + +EventBroker: + # Event emitted when a message send operation fails + type MessageErrorEvent* = object + requestId*: RequestId + messageHash*: string + error*: string + +EventBroker: + # Confirmation that a message has been correctly delivered to some neighbouring nodes. + type MessagePropagatedEvent* = object + requestId*: RequestId + messageHash*: string + +EventBroker: + # Event emitted when a message is received via Waku + type MessageReceivedEvent* = object + messageHash*: string + message*: WakuMessage + +EventBroker: + # Internal event emitted when a message arrives from the network via any protocol + type MessageSeenEvent* = object + topic*: PubsubTopic + message*: WakuMessage diff --git a/waku/events/peer_events.nim b/waku/events/peer_events.nim new file mode 100644 index 000000000..49dfa9f9a --- /dev/null +++ b/waku/events/peer_events.nim @@ -0,0 +1,13 @@ +import waku/common/broker/event_broker +import libp2p/switch + +type WakuPeerEventKind* {.pure.} = enum + EventConnected + EventDisconnected + EventIdentified + EventMetadataUpdated + +EventBroker: + type EventWakuPeer* = object + peerId*: PeerId + kind*: WakuPeerEventKind diff --git a/waku/factory/app_callbacks.nim b/waku/factory/app_callbacks.nim index d28b9f2d1..f1d3369be 100644 --- a/waku/factory/app_callbacks.nim +++ b/waku/factory/app_callbacks.nim @@ -1,6 +1,7 @@ -import ../waku_relay, ../node/peer_manager +import ../waku_relay, ../node/peer_manager, ../node/health_monitor/connection_status type AppCallbacks* = ref object relayHandler*: WakuRelayHandler topicHealthChangeHandler*: TopicHealthChangeHandler connectionChangeHandler*: ConnectionChangeHandler + connectionStatusChangeHandler*: ConnectionStatusChangeHandler diff --git a/waku/factory/builder.nim b/waku/factory/builder.nim index 772cfbffd..e0b643fc0 100644 --- a/waku/factory/builder.nim +++ b/waku/factory/builder.nim @@ -15,7 +15,8 @@ import ../waku_node, ../node/peer_manager, ../common/rate_limit/setting, - ../common/utils/parse_size_units + ../common/utils/parse_size_units, + ../common/broker/broker_context type WakuNodeBuilder* = object # General @@ -209,6 +210,7 @@ proc build*(builder: WakuNodeBuilder): Result[WakuNode, string] = maxServicePeers = some(builder.maxServicePeers), colocationLimit = builder.colocationLimit, shardedPeerManagement = builder.shardAware, + maxConnections = builder.switchMaxConnections.get(builders.MaxConnections), ) var node: WakuNode diff --git a/waku/factory/conf_builder/conf_builder.nim b/waku/factory/conf_builder/conf_builder.nim index 37cea76fe..b8d0316c3 100644 --- a/waku/factory/conf_builder/conf_builder.nim +++ b/waku/factory/conf_builder/conf_builder.nim @@ -10,10 +10,12 @@ import ./metrics_server_conf_builder, ./rate_limit_conf_builder, ./rln_relay_conf_builder, - ./mix_conf_builder + ./mix_conf_builder, + ./kademlia_discovery_conf_builder export waku_conf_builder, filter_service_conf_builder, store_sync_conf_builder, store_service_conf_builder, rest_server_conf_builder, dns_discovery_conf_builder, discv5_conf_builder, web_socket_conf_builder, metrics_server_conf_builder, - rate_limit_conf_builder, rln_relay_conf_builder, mix_conf_builder + rate_limit_conf_builder, rln_relay_conf_builder, mix_conf_builder, + kademlia_discovery_conf_builder diff --git a/waku/factory/conf_builder/filter_service_conf_builder.nim b/waku/factory/conf_builder/filter_service_conf_builder.nim index a3f056b01..0a6617430 100644 --- a/waku/factory/conf_builder/filter_service_conf_builder.nim +++ b/waku/factory/conf_builder/filter_service_conf_builder.nim @@ -22,6 +22,12 @@ proc withEnabled*(b: var FilterServiceConfBuilder, enabled: bool) = proc withMaxPeersToServe*(b: var FilterServiceConfBuilder, maxPeersToServe: uint32) = b.maxPeersToServe = some(maxPeersToServe) +proc withMaxPeersToServeIfNotAssigned*( + b: var FilterServiceConfBuilder, maxPeersToServe: uint32 +) = + if b.maxPeersToServe.isNone(): + b.maxPeersToServe = some(maxPeersToServe) + proc withSubscriptionTimeout*( b: var FilterServiceConfBuilder, subscriptionTimeout: uint16 ) = diff --git a/waku/factory/conf_builder/kademlia_discovery_conf_builder.nim b/waku/factory/conf_builder/kademlia_discovery_conf_builder.nim new file mode 100644 index 000000000..916d71be1 --- /dev/null +++ b/waku/factory/conf_builder/kademlia_discovery_conf_builder.nim @@ -0,0 +1,40 @@ +import chronicles, std/options, results +import libp2p/[peerid, multiaddress, peerinfo] +import waku/factory/waku_conf + +logScope: + topics = "waku conf builder kademlia discovery" + +####################################### +## Kademlia Discovery Config Builder ## +####################################### +type KademliaDiscoveryConfBuilder* = object + enabled*: bool + bootstrapNodes*: seq[string] + +proc init*(T: type KademliaDiscoveryConfBuilder): KademliaDiscoveryConfBuilder = + KademliaDiscoveryConfBuilder() + +proc withEnabled*(b: var KademliaDiscoveryConfBuilder, enabled: bool) = + b.enabled = enabled + +proc withBootstrapNodes*( + b: var KademliaDiscoveryConfBuilder, bootstrapNodes: seq[string] +) = + b.bootstrapNodes = bootstrapNodes + +proc build*( + b: KademliaDiscoveryConfBuilder +): Result[Option[KademliaDiscoveryConf], string] = + # Kademlia is enabled if explicitly enabled OR if bootstrap nodes are provided + let enabled = b.enabled or b.bootstrapNodes.len > 0 + if not enabled: + return ok(none(KademliaDiscoveryConf)) + + var parsedNodes: seq[(PeerId, seq[MultiAddress])] + for nodeStr in b.bootstrapNodes: + let (peerId, ma) = parseFullAddress(nodeStr).valueOr: + return err("Failed to parse kademlia bootstrap node: " & error) + parsedNodes.add((peerId, @[ma])) + + return ok(some(KademliaDiscoveryConf(bootstrapNodes: parsedNodes))) diff --git a/waku/factory/conf_builder/rate_limit_conf_builder.nim b/waku/factory/conf_builder/rate_limit_conf_builder.nim index 0d466a132..b2edbef03 100644 --- a/waku/factory/conf_builder/rate_limit_conf_builder.nim +++ b/waku/factory/conf_builder/rate_limit_conf_builder.nim @@ -14,6 +14,12 @@ proc init*(T: type RateLimitConfBuilder): RateLimitConfBuilder = proc withRateLimits*(b: var RateLimitConfBuilder, rateLimits: seq[string]) = b.strValue = some(rateLimits) +proc withRateLimitsIfNotAssigned*( + b: var RateLimitConfBuilder, rateLimits: seq[string] +) = + if b.strValue.isNone() or b.strValue.get().len == 0: + b.strValue = some(rateLimits) + proc build*(b: RateLimitConfBuilder): Result[ProtocolRateLimitSettings, string] = if b.strValue.isSome() and b.objValue.isSome(): return err("Rate limits conf must only be set once on the builder") diff --git a/waku/factory/conf_builder/waku_conf_builder.nim b/waku/factory/conf_builder/waku_conf_builder.nim index 645869247..2c427918d 100644 --- a/waku/factory/conf_builder/waku_conf_builder.nim +++ b/waku/factory/conf_builder/waku_conf_builder.nim @@ -12,7 +12,8 @@ import ../networks_config, ../../common/logging, ../../common/utils/parse_size_units, - ../../waku_enr/capabilities + ../../waku_enr/capabilities, + tools/confutils/entry_nodes import ./filter_service_conf_builder, @@ -25,11 +26,14 @@ import ./metrics_server_conf_builder, ./rate_limit_conf_builder, ./rln_relay_conf_builder, - ./mix_conf_builder + ./mix_conf_builder, + ./kademlia_discovery_conf_builder logScope: topics = "waku conf builder" +const DefaultMaxConnections* = 150 + type MaxMessageSizeKind* = enum mmskNone mmskStr @@ -78,6 +82,7 @@ type WakuConfBuilder* = object mixConf*: MixConfBuilder webSocketConf*: WebSocketConfBuilder rateLimitConf*: RateLimitConfBuilder + kademliaDiscoveryConf*: KademliaDiscoveryConfBuilder # End conf builders relay: Option[bool] lightPush: Option[bool] @@ -138,6 +143,7 @@ proc init*(T: type WakuConfBuilder): WakuConfBuilder = storeServiceConf: StoreServiceConfBuilder.init(), webSocketConf: WebSocketConfBuilder.init(), rateLimitConf: RateLimitConfBuilder.init(), + kademliaDiscoveryConf: KademliaDiscoveryConfBuilder.init(), ) proc withNetworkConf*(b: var WakuConfBuilder, networkConf: NetworkConf) = @@ -248,9 +254,6 @@ proc withAgentString*(b: var WakuConfBuilder, agentString: string) = proc withColocationLimit*(b: var WakuConfBuilder, colocationLimit: int) = b.colocationLimit = some(colocationLimit) -proc withMaxRelayPeers*(b: var WakuConfBuilder, maxRelayPeers: int) = - b.maxRelayPeers = some(maxRelayPeers) - proc withRelayServiceRatio*(b: var WakuConfBuilder, relayServiceRatio: string) = b.relayServiceRatio = some(relayServiceRatio) @@ -391,6 +394,42 @@ proc applyNetworkConf(builder: var WakuConfBuilder) = discarded = builder.discv5Conf.bootstrapNodes builder.discv5Conf.withBootstrapNodes(networkConf.discv5BootstrapNodes) + if networkConf.enableKadDiscovery: + if not builder.kademliaDiscoveryConf.enabled: + builder.kademliaDiscoveryConf.withEnabled(networkConf.enableKadDiscovery) + + if builder.kademliaDiscoveryConf.bootstrapNodes.len == 0 and + networkConf.kadBootstrapNodes.len > 0: + builder.kademliaDiscoveryConf.withBootstrapNodes(networkConf.kadBootstrapNodes) + + if networkConf.mix: + if builder.mix.isNone: + builder.mix = some(networkConf.mix) + + if builder.p2pReliability.isNone: + builder.withP2pReliability(networkConf.p2pReliability) + + # Process entry nodes from network config - classify and distribute + if networkConf.entryNodes.len > 0: + let processed = processEntryNodes(networkConf.entryNodes) + if processed.isOk(): + let (enrTreeUrls, bootstrapEnrs, staticNodesFromEntry) = processed.get() + + # Set ENRTree URLs for DNS discovery + if enrTreeUrls.len > 0: + for url in enrTreeUrls: + builder.dnsDiscoveryConf.withEnrTreeUrl(url) + + # Set ENR records as bootstrap nodes for discv5 + if bootstrapEnrs.len > 0: + builder.discv5Conf.withBootstrapNodes(bootstrapEnrs) + + # Add static nodes (multiaddrs and those extracted from ENR entries) + if staticNodesFromEntry.len > 0: + builder.withStaticNodes(staticNodesFromEntry) + else: + warn "Failed to process entry nodes from network conf", error = processed.error() + proc build*( builder: var WakuConfBuilder, rng: ref HmacDrbgContext = crypto.newRng() ): Result[WakuConf, string] = @@ -507,6 +546,9 @@ proc build*( let rateLimit = builder.rateLimitConf.build().valueOr: return err("Rate limits Conf building failed: " & $error) + let kademliaDiscoveryConf = builder.kademliaDiscoveryConf.build().valueOr: + return err("Kademlia Discovery Conf building failed: " & $error) + # End - Build sub-configs let logLevel = @@ -592,11 +634,16 @@ proc build*( if builder.maxConnections.isSome(): builder.maxConnections.get() else: - warn "Max Connections was not specified, defaulting to 300" - 300 + warn "Max connections not specified, defaulting to DefaultMaxConnections", + default = DefaultMaxConnections + DefaultMaxConnections + + if maxConnections < DefaultMaxConnections: + warn "max-connections less than DefaultMaxConnections; we suggest using DefaultMaxConnections or more for better connectivity", + provided = maxConnections, recommended = DefaultMaxConnections # TODO: Do the git version thing here - let agentString = builder.agentString.get("nwaku") + let agentString = builder.agentString.get("logos-delivery") # TODO: use `DefaultColocationLimit`. the user of this value should # probably be defining a config object @@ -606,7 +653,7 @@ proc build*( let relayShardedPeerManagement = builder.relayShardedPeerManagement.get(false) let wakuFlags = CapabilitiesBitfield.init( - lightpush = lightPush, + lightpush = lightPush and relay, filter = filterServiceConf.isSome, store = storeServiceConf.isSome, relay = relay, @@ -624,6 +671,7 @@ proc build*( restServerConf: restServerConf, dnsDiscoveryConf: dnsDiscoveryConf, mixConf: mixConf, + kademliaDiscoveryConf: kademliaDiscoveryConf, # end confs nodeKey: nodeKey, clusterId: clusterId, @@ -663,7 +711,7 @@ proc build*( agentString: agentString, colocationLimit: colocationLimit, maxRelayPeers: builder.maxRelayPeers, - relayServiceRatio: builder.relayServiceRatio.get("60:40"), + relayServiceRatio: builder.relayServiceRatio.get("50:50"), rateLimit: rateLimit, circuitRelayClient: builder.circuitRelayClient.get(false), staticNodes: builder.staticNodes, diff --git a/waku/factory/networks_config.nim b/waku/factory/networks_config.nim index c7193aa9c..94856fb21 100644 --- a/waku/factory/networks_config.nim +++ b/waku/factory/networks_config.nim @@ -29,6 +29,11 @@ type NetworkConf* = object shardingConf*: ShardingConf discv5Discovery*: bool discv5BootstrapNodes*: seq[string] + enableKadDiscovery*: bool + kadBootstrapNodes*: seq[string] + entryNodes*: seq[string] + mix*: bool + p2pReliability*: bool # cluster-id=1 (aka The Waku Network) # Cluster configuration corresponding to The Waku Network. Note that it @@ -45,6 +50,11 @@ proc TheWakuNetworkConf*(T: type NetworkConf): NetworkConf = rlnEpochSizeSec: 600, rlnRelayUserMessageLimit: 100, shardingConf: ShardingConf(kind: AutoSharding, numShardsInCluster: 8), + enableKadDiscovery: false, + kadBootstrapNodes: @[], + entryNodes: @[], + mix: false, + p2pReliability: false, discv5Discovery: true, discv5BootstrapNodes: @[ @@ -54,6 +64,36 @@ proc TheWakuNetworkConf*(T: type NetworkConf): NetworkConf = ], ) +# cluster-id=2 (Logos Dev Network) +# Cluster configuration for the Logos Dev Network. +proc LogosDevConf*(T: type NetworkConf): NetworkConf = + const ZeroChainId = 0'u256 + return NetworkConf( + maxMessageSize: "150KiB", + clusterId: 2, + rlnRelay: false, + rlnRelayEthContractAddress: "", + rlnRelayDynamic: false, + rlnRelayChainId: ZeroChainId, + rlnEpochSizeSec: 0, + rlnRelayUserMessageLimit: 0, + shardingConf: ShardingConf(kind: AutoSharding, numShardsInCluster: 8), + enableKadDiscovery: true, + mix: true, + p2pReliability: true, + discv5Discovery: true, + discv5BootstrapNodes: @[], + entryNodes: + @[ + "/dns4/delivery-01.do-ams3.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAmTUbnxLGT9JvV6mu9oPyDjqHK4Phs1VDJNUgESgNSkuby", + "/dns4/delivery-02.do-ams3.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAmMK7PYygBtKUQ8EHp7EfaD3bCEsJrkFooK8RQ2PVpJprH", + "/dns4/delivery-01.gc-us-central1-a.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAm4S1JYkuzDKLKQvwgAhZKs9otxXqt8SCGtB4hoJP1S397", + "/dns4/delivery-02.gc-us-central1-a.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAm8Y9kgBNtjxvCnf1X6gnZJW5EGE4UwwCL3CCm55TwqBiH", + "/dns4/delivery-01.ac-cn-hongkong-c.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAm8YokiNun9BkeA1ZRmhLbtNUvcwRr64F69tYj9fkGyuEP", + "/dns4/delivery-02.ac-cn-hongkong-c.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAkvwhGHKNry6LACrB8TmEFoCJKEX29XR5dDUzk3UT3UNSE", + ], + ) + proc validateShards*( shardingConf: ShardingConf, shards: seq[uint16] ): Result[void, string] = diff --git a/waku/factory/node_factory.nim b/waku/factory/node_factory.nim index 488d07c06..2f82440f6 100644 --- a/waku/factory/node_factory.nim +++ b/waku/factory/node_factory.nim @@ -6,7 +6,8 @@ import libp2p/protocols/pubsub/gossipsub, libp2p/protocols/connectivity/relay/relay, libp2p/nameresolving/dnsresolver, - libp2p/crypto/crypto + libp2p/crypto/crypto, + libp2p/crypto/curve25519 import ./internal_config, @@ -32,6 +33,7 @@ import ../waku_store_legacy/common as legacy_common, ../waku_filter_v2, ../waku_peer_exchange, + ../discovery/waku_kademlia, ../node/peer_manager, ../node/peer_manager/peer_store/waku_peer_storage, ../node/peer_manager/peer_store/migrations as peer_store_sqlite_migrations, @@ -163,6 +165,38 @@ proc setupProtocols( error "Unrecoverable error occurred", error = msg quit(QuitFailure) + #mount mix + if conf.mixConf.isSome(): + let mixConf = conf.mixConf.get() + (await node.mountMix(conf.clusterId, mixConf.mixKey, mixConf.mixnodes)).isOkOr: + return err("failed to mount waku mix protocol: " & $error) + + # Setup extended kademlia discovery + if conf.kademliaDiscoveryConf.isSome(): + let mixPubKey = + if conf.mixConf.isSome(): + some(conf.mixConf.get().mixPubKey) + else: + none(Curve25519Key) + + node.wakuKademlia = WakuKademlia.new( + node.switch, + ExtendedKademliaDiscoveryParams( + bootstrapNodes: conf.kademliaDiscoveryConf.get().bootstrapNodes, + mixPubKey: mixPubKey, + advertiseMix: conf.mixConf.isSome(), + ), + node.peerManager, + getMixNodePoolSize = proc(): int {.gcsafe, raises: [].} = + if node.wakuMix.isNil(): + 0 + else: + node.getMixNodePoolSize(), + isNodeStarted = proc(): bool {.gcsafe, raises: [].} = + node.started, + ).valueOr: + return err("failed to setup kademlia discovery: " & error) + if conf.storeServiceConf.isSome(): let storeServiceConf = conf.storeServiceConf.get() if storeServiceConf.supportV2: @@ -327,9 +361,9 @@ proc setupProtocols( protectedShard = shardKey.shard, publicKey = shardKey.key node.wakuRelay.addSignedShardsValidator(subscribedProtectedShards, conf.clusterId) - # Only relay nodes should be rendezvous points. - if conf.rendezvous: - await node.mountRendezvous(conf.clusterId) + if conf.rendezvous: + await node.mountRendezvous(conf.clusterId, shards) + await node.mountRendezvousClient(conf.clusterId) # Keepalive mounted on all nodes try: @@ -359,8 +393,11 @@ proc setupProtocols( # NOTE Must be mounted after relay if conf.lightPush: try: - await mountLightPush(node, node.rateLimitSettings.getSetting(LIGHTPUSH)) - await mountLegacyLightPush(node, node.rateLimitSettings.getSetting(LIGHTPUSH)) + (await mountLightPush(node, node.rateLimitSettings.getSetting(LIGHTPUSH))).isOkOr: + return err("failed to mount waku lightpush protocol: " & $error) + + (await mountLegacyLightPush(node, node.rateLimitSettings.getSetting(LIGHTPUSH))).isOkOr: + return err("failed to mount waku legacy lightpush protocol: " & $error) except CatchableError: return err("failed to mount waku lightpush protocol: " & getCurrentExceptionMsg()) @@ -414,14 +451,6 @@ proc setupProtocols( if conf.peerExchangeDiscovery: await node.mountPeerExchangeClient() - #mount mix - if conf.mixConf.isSome(): - ( - await node.mountMix( - conf.clusterId, conf.mixConf.get().mixKey, conf.mixConf.get().mixnodes - ) - ).isOkOr: - return err("failed to mount waku mix protocol: " & $error) return ok() ## Start node @@ -473,6 +502,11 @@ proc startNode*( if conf.relay: node.peerManager.start() + if not node.wakuKademlia.isNil(): + let minMixPeers = if conf.mixConf.isSome(): 4 else: 0 + (await node.wakuKademlia.start(minMixPeers = minMixPeers)).isOkOr: + return err("failed to start kademlia discovery: " & error) + return ok() proc setupNode*( diff --git a/waku/factory/waku.nim b/waku/factory/waku.nim index bed8a9137..dbee8d093 100644 --- a/waku/factory/waku.nim +++ b/waku/factory/waku.nim @@ -13,38 +13,42 @@ import libp2p/services/autorelayservice, libp2p/services/hpservice, libp2p/peerid, - libp2p/discovery/discoverymngr, - libp2p/discovery/rendezvousinterface, eth/keys, eth/p2p/discoveryv5/enr, presto, metrics, - metrics/chronos_httpserver -import - ../common/logging, - ../waku_core, - ../waku_node, - ../node/peer_manager, - ../node/health_monitor, - ../node/waku_metrics, - ../node/delivery_monitor/delivery_monitor, - ../rest_api/message_cache, - ../rest_api/endpoint/server, - ../rest_api/endpoint/builder as rest_server_builder, - ../waku_archive, - ../waku_relay/protocol, - ../discovery/waku_dnsdisc, - ../discovery/waku_discv5, - ../discovery/autonat_service, - ../waku_enr/sharding, - ../waku_rln_relay, - ../waku_store, - ../waku_filter_v2, - ../factory/node_factory, - ../factory/internal_config, - ../factory/app_callbacks, - ../waku_enr/multiaddr, - ./waku_conf + metrics/chronos_httpserver, + waku/[ + waku_core, + waku_node, + waku_archive, + waku_rln_relay, + waku_store, + waku_filter_v2, + waku_relay/protocol, + waku_enr/sharding, + waku_enr/multiaddr, + api/types, + common/logging, + common/broker/broker_context, + node/peer_manager, + node/health_monitor, + node/waku_metrics, + node/delivery_service/delivery_service, + node/delivery_service/subscription_manager, + rest_api/message_cache, + rest_api/endpoint/server, + rest_api/endpoint/builder as rest_server_builder, + discovery/waku_dnsdisc, + discovery/waku_discv5, + discovery/autonat_service, + requests/health_requests, + factory/node_factory, + factory/internal_config, + factory/app_callbacks, + ], + ./waku_conf, + ./waku_state_info logScope: topics = "wakunode waku" @@ -53,7 +57,7 @@ logScope: const git_version* {.strdefine.} = "n/a" type Waku* = ref object - version: string + stateInfo*: WakuStateInfo conf*: WakuConf rng*: ref HmacDrbgContext @@ -63,20 +67,18 @@ type Waku* = ref object dynamicBootstrapNodes*: seq[RemotePeerInfo] dnsRetryLoopHandle: Future[void] networkConnLoopHandle: Future[void] - discoveryMngr: DiscoveryManager node*: WakuNode healthMonitor*: NodeHealthMonitor - deliveryMonitor: DeliveryMonitor + deliveryService*: DeliveryService restServer*: WakuRestServerRef metricsServer*: MetricsHttpServerRef appCallbacks*: AppCallbacks -func version*(waku: Waku): string = - waku.version + brokerCtx*: BrokerContext proc setupSwitchServices( waku: Waku, conf: WakuConf, circuitRelay: Relay, rng: ref HmacDrbgContext @@ -116,7 +118,10 @@ proc newCircuitRelay(isRelayClient: bool): Relay = return Relay.new() proc setupAppCallbacks( - node: WakuNode, conf: WakuConf, appCallbacks: AppCallbacks + node: WakuNode, + conf: WakuConf, + appCallbacks: AppCallbacks, + healthMonitor: NodeHealthMonitor, ): Result[void, string] = if appCallbacks.isNil(): info "No external callbacks to be set" @@ -157,19 +162,33 @@ proc setupAppCallbacks( err("Cannot configure connectionChangeHandler callback with empty peer manager") node.peerManager.onConnectionChange = appCallbacks.connectionChangeHandler + if not appCallbacks.connectionStatusChangeHandler.isNil(): + if healthMonitor.isNil(): + return + err("Cannot configure connectionStatusChangeHandler with empty health monitor") + + healthMonitor.onConnectionStatusChange = appCallbacks.connectionStatusChangeHandler + return ok() proc new*( T: type Waku, wakuConf: WakuConf, appCallbacks: AppCallbacks = nil ): Future[Result[Waku, string]] {.async.} = let rng = crypto.newRng() + let brokerCtx = globalBrokerContext() logging.setupLog(wakuConf.logLevel, wakuConf.logFormat) ?wakuConf.validate() wakuConf.logConf() - let healthMonitor = NodeHealthMonitor.new(wakuConf.dnsAddrsNameServers) + let relay = newCircuitRelay(wakuConf.circuitRelayClient) + + let node = (await setupNode(wakuConf, rng, relay)).valueOr: + error "Failed setting up node", error = $error + return err("Failed setting up node: " & $error) + + let healthMonitor = NodeHealthMonitor.new(node, wakuConf.dnsAddrsNameServers) let restServer: WakuRestServerRef = if wakuConf.restServerConf.isSome(): @@ -183,44 +202,28 @@ proc new*( else: nil - var relay = newCircuitRelay(wakuConf.circuitRelayClient) + # Set the extMultiAddrsOnly flag so the node knows not to replace explicit addresses + node.extMultiAddrsOnly = wakuConf.endpointConf.extMultiAddrsOnly - let node = (await setupNode(wakuConf, rng, relay)).valueOr: - error "Failed setting up node", error = $error - return err("Failed setting up node: " & $error) - - healthMonitor.setNodeToHealthMonitor(node) - healthMonitor.onlineMonitor.setPeerStoreToOnlineMonitor(node.switch.peerStore) - healthMonitor.onlineMonitor.addOnlineStateObserver( - node.peerManager.getOnlineStateObserver() - ) - - node.setupAppCallbacks(wakuConf, appCallbacks).isOkOr: + node.setupAppCallbacks(wakuConf, appCallbacks, healthMonitor).isOkOr: error "Failed setting up app callbacks", error = error return err("Failed setting up app callbacks: " & $error) ## Delivery Monitor - var deliveryMonitor: DeliveryMonitor - if wakuConf.p2pReliability: - if wakuConf.remoteStoreNode.isNone(): - return err("A storenode should be set when reliability mode is on") - - let deliveryMonitor = DeliveryMonitor.new( - node.wakuStoreClient, node.wakuRelay, node.wakuLightpushClient, - node.wakuFilterClient, - ).valueOr: - return err("could not create delivery monitor: " & $error) + let deliveryService = DeliveryService.new(wakuConf.p2pReliability, node).valueOr: + return err("could not create delivery service: " & $error) var waku = Waku( - version: git_version, + stateInfo: WakuStateInfo.init(node), conf: wakuConf, rng: rng, key: wakuConf.nodeKey, node: node, healthMonitor: healthMonitor, - deliveryMonitor: deliveryMonitor, + deliveryService: deliveryService, appCallbacks: appCallbacks, restServer: restServer, + brokerCtx: brokerCtx, ) waku.setupSwitchServices(wakuConf, relay, rng) @@ -356,7 +359,7 @@ proc startDnsDiscoveryRetryLoop(waku: ptr Waku): Future[void] {.async.} = error "failed to connect to dynamic bootstrap nodes: " & getCurrentExceptionMsg() return -proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} = +proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async: (raises: []).} = if waku[].node.started: warn "startWaku: waku node already started" return ok() @@ -366,9 +369,15 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} = if conf.dnsDiscoveryConf.isSome(): let dnsDiscoveryConf = waku.conf.dnsDiscoveryConf.get() - let dynamicBootstrapNodesRes = await waku_dnsdisc.retrieveDynamicBootstrapNodes( - dnsDiscoveryConf.enrTreeUrl, dnsDiscoveryConf.nameServers - ) + let dynamicBootstrapNodesRes = + try: + await waku_dnsdisc.retrieveDynamicBootstrapNodes( + dnsDiscoveryConf.enrTreeUrl, dnsDiscoveryConf.nameServers + ) + except CatchableError as exc: + Result[seq[RemotePeerInfo], string].err( + "Retrieving dynamic bootstrap nodes failed: " & exc.msg + ) if dynamicBootstrapNodesRes.isErr(): error "Retrieving dynamic bootstrap nodes failed", @@ -382,8 +391,11 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} = return err("error while calling startNode: " & $error) ## Update waku data that is set dynamically on node start - (await updateWaku(waku)).isOkOr: - return err("Error in updateApp: " & $error) + try: + (await updateWaku(waku)).isOkOr: + return err("Error in updateApp: " & $error) + except CatchableError: + return err("Caught exception in updateApp: " & getCurrentExceptionMsg()) ## Discv5 if conf.discv5Conf.isSome(): @@ -403,13 +415,56 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} = return err("failed to start waku discovery v5: " & $error) ## Reliability - if not waku[].deliveryMonitor.isNil(): - waku[].deliveryMonitor.startDeliveryMonitor() + if not waku[].deliveryService.isNil(): + waku[].deliveryService.startDeliveryService() ## Health Monitor waku[].healthMonitor.startHealthMonitor().isOkOr: return err("failed to start health monitor: " & $error) + ## Setup RequestConnectionStatus provider + + RequestConnectionStatus.setProvider( + globalBrokerContext(), + proc(): Result[RequestConnectionStatus, string] = + try: + let healthReport = waku[].healthMonitor.getSyncNodeHealthReport() + return + ok(RequestConnectionStatus(connectionStatus: healthReport.connectionStatus)) + except CatchableError: + err("Failed to read health report: " & getCurrentExceptionMsg()), + ).isOkOr: + error "Failed to set RequestConnectionStatus provider", error = error + + ## Setup RequestProtocolHealth provider + + RequestProtocolHealth.setProvider( + globalBrokerContext(), + proc( + protocol: WakuProtocol + ): Future[Result[RequestProtocolHealth, string]] {.async.} = + try: + let protocolHealthStatus = + await waku[].healthMonitor.getProtocolHealthInfo(protocol) + return ok(RequestProtocolHealth(healthStatus: protocolHealthStatus)) + except CatchableError: + return err("Failed to get protocol health: " & getCurrentExceptionMsg()), + ).isOkOr: + error "Failed to set RequestProtocolHealth provider", error = error + + ## Setup RequestHealthReport provider + + RequestHealthReport.setProvider( + globalBrokerContext(), + proc(): Future[Result[RequestHealthReport, string]] {.async.} = + try: + let report = await waku[].healthMonitor.getNodeHealthReport() + return ok(RequestHealthReport(healthReport: report)) + except CatchableError: + return err("Failed to get health report: " & getCurrentExceptionMsg()), + ).isOkOr: + error "Failed to set RequestHealthReport provider", error = error + if conf.restServerConf.isSome(): rest_server_builder.startRestServerProtocolSupport( waku[].restServer, @@ -425,41 +480,69 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} = return err ("Starting protocols support REST server failed: " & $error) if conf.metricsServerConf.isSome(): - waku[].metricsServer = ( - await ( - waku_metrics.startMetricsServerAndLogging( - conf.metricsServerConf.get(), conf.portsShift + try: + waku[].metricsServer = ( + await ( + waku_metrics.startMetricsServerAndLogging( + conf.metricsServerConf.get(), conf.portsShift + ) ) + ).valueOr: + return err("Starting monitoring and external interfaces failed: " & error) + except CatchableError: + return err( + "Caught exception starting monitoring and external interfaces failed: " & + getCurrentExceptionMsg() ) - ).valueOr: - return err("Starting monitoring and external interfaces failed: " & error) - waku[].healthMonitor.setOverallHealth(HealthStatus.READY) return ok() -proc stop*(waku: Waku): Future[void] {.async: (raises: [Exception]).} = +proc stop*(waku: Waku): Future[Result[void, string]] {.async: (raises: []).} = ## Waku shutdown if not waku.node.started: warn "stop: attempting to stop node that isn't running" - waku.healthMonitor.setOverallHealth(HealthStatus.SHUTTING_DOWN) + try: + waku.healthMonitor.setOverallHealth(HealthStatus.SHUTTING_DOWN) - if not waku.metricsServer.isNil(): - await waku.metricsServer.stop() + if not waku.metricsServer.isNil(): + await waku.metricsServer.stop() - if not waku.wakuDiscv5.isNil(): - await waku.wakuDiscv5.stop() + if not waku.wakuDiscv5.isNil(): + await waku.wakuDiscv5.stop() - if not waku.node.isNil(): - await waku.node.stop() + if not waku.deliveryService.isNil(): + await waku.deliveryService.stopDeliveryService() + waku.deliveryService = nil - if not waku.dnsRetryLoopHandle.isNil(): - await waku.dnsRetryLoopHandle.cancelAndWait() + if not waku.node.isNil(): + await waku.node.stop() - if not waku.healthMonitor.isNil(): - await waku.healthMonitor.stopHealthMonitor() + if not waku.dnsRetryLoopHandle.isNil(): + await waku.dnsRetryLoopHandle.cancelAndWait() - if not waku.restServer.isNil(): - await waku.restServer.stop() + if not waku.healthMonitor.isNil(): + await waku.healthMonitor.stopHealthMonitor() + + ## Clear RequestConnectionStatus provider + RequestConnectionStatus.clearProvider(waku.brokerCtx) + + if not waku.restServer.isNil(): + await waku.restServer.stop() + except Exception: + error "waku stop failed: " & getCurrentExceptionMsg() + return err("waku stop failed: " & getCurrentExceptionMsg()) + + return ok() + +proc isModeCoreAvailable*(waku: Waku): bool = + return not waku.node.wakuRelay.isNil() + +proc isModeEdgeAvailable*(waku: Waku): bool = + return + waku.node.wakuRelay.isNil() and not waku.node.wakuStoreClient.isNil() and + not waku.node.wakuFilterClient.isNil() and not waku.node.wakuLightPushClient.isNil() + +{.pop.} diff --git a/waku/factory/waku_conf.nim b/waku/factory/waku_conf.nim index 89ffb366c..01574d067 100644 --- a/waku/factory/waku_conf.nim +++ b/waku/factory/waku_conf.nim @@ -4,6 +4,7 @@ import libp2p/crypto/crypto, libp2p/multiaddress, libp2p/crypto/curve25519, + libp2p/peerid, secp256k1, results @@ -51,6 +52,10 @@ type MixConf* = ref object mixPubKey*: Curve25519Key mixnodes*: seq[MixNodePubInfo] +type KademliaDiscoveryConf* = object + bootstrapNodes*: seq[(PeerId, seq[MultiAddress])] + ## Bootstrap nodes for extended kademlia discovery. + type StoreServiceConf* {.requiresInit.} = object dbMigration*: bool dbURl*: string @@ -109,6 +114,7 @@ type WakuConf* {.requiresInit.} = ref object metricsServerConf*: Option[MetricsServerConf] webSocketConf*: Option[WebSocketConf] mixConf*: Option[MixConf] + kademliaDiscoveryConf*: Option[KademliaDiscoveryConf] portsShift*: uint16 dnsAddrsNameServers*: seq[IpAddress] @@ -154,7 +160,8 @@ proc logConf*(conf: WakuConf) = store = conf.storeServiceConf.isSome(), filter = conf.filterServiceConf.isSome(), lightPush = conf.lightPush, - peerExchange = conf.peerExchangeService + peerExchange = conf.peerExchangeService, + rendezvous = conf.rendezvous info "Configuration. Network", cluster = conf.clusterId diff --git a/waku/factory/waku_state_info.nim b/waku/factory/waku_state_info.nim new file mode 100644 index 000000000..5dc72a693 --- /dev/null +++ b/waku/factory/waku_state_info.nim @@ -0,0 +1,50 @@ +## This module is aimed to collect and provide information about the state of the node, +## such as its version, metrics values, etc. +## It has been originally designed to be used by the debug API, which acts as a consumer of +## this information, but any other module can populate the information it needs to be +## accessible through the debug API. + +import std/[tables, sequtils, strutils] +import metrics, eth/p2p/discoveryv5/enr, libp2p/peerid +import waku/waku_node + +type + NodeInfoId* {.pure.} = enum + Version + Metrics + MyMultiaddresses + MyENR + MyPeerId + + WakuStateInfo* {.requiresInit.} = object + node: WakuNode + +proc getAllPossibleInfoItemIds*(self: WakuStateInfo): seq[NodeInfoId] = + ## Returns all possible options that can be queried to learn about the node's information. + var ret = newSeq[NodeInfoId](0) + for item in NodeInfoId: + ret.add(item) + return ret + +proc getMetrics(): string = + {.gcsafe.}: + return defaultRegistry.toText() ## defaultRegistry is {.global.} in metrics module + +proc getNodeInfoItem*(self: WakuStateInfo, infoItemId: NodeInfoId): string = + ## Returns the content of the info item with the given id if it exists. + case infoItemId + of NodeInfoId.Version: + return git_version + of NodeInfoId.Metrics: + return getMetrics() + of NodeInfoId.MyMultiaddresses: + return self.node.info().listenAddresses.join(",") + of NodeInfoId.MyENR: + return self.node.enr.toURI() + of NodeInfoId.MyPeerId: + return $PeerId(self.node.peerId()) + else: + return "unknown info item id" + +proc init*(T: typedesc[WakuStateInfo], node: WakuNode): T = + return WakuStateInfo(node: node) diff --git a/waku/node/delivery_monitor/delivery_callback.nim b/waku/node/delivery_monitor/delivery_callback.nim deleted file mode 100644 index c996bc7b0..000000000 --- a/waku/node/delivery_monitor/delivery_callback.nim +++ /dev/null @@ -1,17 +0,0 @@ -import ../../waku_core - -type DeliveryDirection* {.pure.} = enum - PUBLISHING - RECEIVING - -type DeliverySuccess* {.pure.} = enum - SUCCESSFUL - UNSUCCESSFUL - -type DeliveryFeedbackCallback* = proc( - success: DeliverySuccess, - dir: DeliveryDirection, - comment: string, - msgHash: WakuMessageHash, - msg: WakuMessage, -) {.gcsafe, raises: [].} diff --git a/waku/node/delivery_monitor/delivery_monitor.nim b/waku/node/delivery_monitor/delivery_monitor.nim deleted file mode 100644 index 4dda542cc..000000000 --- a/waku/node/delivery_monitor/delivery_monitor.nim +++ /dev/null @@ -1,43 +0,0 @@ -## This module helps to ensure the correct transmission and reception of messages - -import results -import chronos -import - ./recv_monitor, - ./send_monitor, - ./delivery_callback, - ../../waku_core, - ../../waku_store/client, - ../../waku_relay/protocol, - ../../waku_lightpush/client, - ../../waku_filter_v2/client - -type DeliveryMonitor* = ref object - sendMonitor: SendMonitor - recvMonitor: RecvMonitor - -proc new*( - T: type DeliveryMonitor, - storeClient: WakuStoreClient, - wakuRelay: protocol.WakuRelay, - wakuLightpushClient: WakuLightpushClient, - wakuFilterClient: WakuFilterClient, -): Result[T, string] = - ## storeClient is needed to give store visitility to DeliveryMonitor - ## wakuRelay and wakuLightpushClient are needed to give a mechanism to SendMonitor to re-publish - let sendMonitor = ?SendMonitor.new(storeClient, wakuRelay, wakuLightpushClient) - let recvMonitor = RecvMonitor.new(storeClient, wakuFilterClient) - return ok(DeliveryMonitor(sendMonitor: sendMonitor, recvMonitor: recvMonitor)) - -proc startDeliveryMonitor*(self: DeliveryMonitor) = - self.sendMonitor.startSendMonitor() - self.recvMonitor.startRecvMonitor() - -proc stopDeliveryMonitor*(self: DeliveryMonitor) {.async.} = - self.sendMonitor.stopSendMonitor() - await self.recvMonitor.stopRecvMonitor() - -proc setDeliveryCallback*(self: DeliveryMonitor, deliveryCb: DeliveryFeedbackCallback) = - ## The deliveryCb is a proc defined by the api client so that it can get delivery feedback - self.sendMonitor.setDeliveryCallback(deliveryCb) - self.recvMonitor.setDeliveryCallback(deliveryCb) diff --git a/waku/node/delivery_monitor/publish_observer.nim b/waku/node/delivery_monitor/publish_observer.nim deleted file mode 100644 index 1f517f8bd..000000000 --- a/waku/node/delivery_monitor/publish_observer.nim +++ /dev/null @@ -1,9 +0,0 @@ -import chronicles -import ../../waku_core/message/message - -type PublishObserver* = ref object of RootObj - -method onMessagePublished*( - self: PublishObserver, pubsubTopic: string, message: WakuMessage -) {.base, gcsafe, raises: [].} = - error "onMessagePublished not implemented" diff --git a/waku/node/delivery_monitor/send_monitor.nim b/waku/node/delivery_monitor/send_monitor.nim deleted file mode 100644 index 15b16065f..000000000 --- a/waku/node/delivery_monitor/send_monitor.nim +++ /dev/null @@ -1,212 +0,0 @@ -## This module reinforces the publish operation with regular store-v3 requests. -## - -import std/[sequtils, tables] -import chronos, chronicles, libp2p/utility -import - ./delivery_callback, - ./publish_observer, - ../../waku_core, - ./not_delivered_storage/not_delivered_storage, - ../../waku_store/[client, common], - ../../waku_archive/archive, - ../../waku_relay/protocol, - ../../waku_lightpush/client - -const MaxTimeInCache* = chronos.minutes(1) - ## Messages older than this time will get completely forgotten on publication and a - ## feedback will be given when that happens - -const SendCheckInterval* = chronos.seconds(3) - ## Interval at which we check that messages have been properly received by a store node - -const MaxMessagesToCheckAtOnce = 100 - ## Max number of messages to check if they were properly archived by a store node - -const ArchiveTime = chronos.seconds(3) - ## Estimation of the time we wait until we start confirming that a message has been properly - ## received and archived by a store node - -type DeliveryInfo = object - pubsubTopic: string - msg: WakuMessage - -type SendMonitor* = ref object of PublishObserver - publishedMessages: Table[WakuMessageHash, DeliveryInfo] - ## Cache that contains the delivery info per message hash. - ## This is needed to make sure the published messages are properly published - - msgStoredCheckerHandle: Future[void] ## handle that allows to stop the async task - - notDeliveredStorage: NotDeliveredStorage - ## NOTE: this is not fully used because that might be tackled by higher abstraction layers - - storeClient: WakuStoreClient - deliveryCb: DeliveryFeedbackCallback - - wakuRelay: protocol.WakuRelay - wakuLightpushClient: WakuLightPushClient - -proc new*( - T: type SendMonitor, - storeClient: WakuStoreClient, - wakuRelay: protocol.WakuRelay, - wakuLightpushClient: WakuLightPushClient, -): Result[T, string] = - if wakuRelay.isNil() and wakuLightpushClient.isNil(): - return err( - "Could not create SendMonitor. wakuRelay or wakuLightpushClient should be set" - ) - - let notDeliveredStorage = ?NotDeliveredStorage.new() - - let sendMonitor = SendMonitor( - notDeliveredStorage: notDeliveredStorage, - storeClient: storeClient, - wakuRelay: wakuRelay, - wakuLightpushClient: wakuLightPushClient, - ) - - if not wakuRelay.isNil(): - wakuRelay.addPublishObserver(sendMonitor) - - if not wakuLightpushClient.isNil(): - wakuLightpushClient.addPublishObserver(sendMonitor) - - return ok(sendMonitor) - -proc performFeedbackAndCleanup( - self: SendMonitor, - msgsToDiscard: Table[WakuMessageHash, DeliveryInfo], - success: DeliverySuccess, - dir: DeliveryDirection, - comment: string, -) = - ## This procs allows to bring delivery feedback to the API client - ## It requires a 'deliveryCb' to be registered beforehand. - if self.deliveryCb.isNil(): - error "deliveryCb is nil in performFeedbackAndCleanup", - success, dir, comment, hashes = toSeq(msgsToDiscard.keys).mapIt(shortLog(it)) - return - - for hash, deliveryInfo in msgsToDiscard: - info "send monitor performFeedbackAndCleanup", - success, dir, comment, msg_hash = shortLog(hash) - - self.deliveryCb(success, dir, comment, hash, deliveryInfo.msg) - self.publishedMessages.del(hash) - -proc checkMsgsInStore( - self: SendMonitor, msgsToValidate: Table[WakuMessageHash, DeliveryInfo] -): Future[ - Result[ - tuple[ - publishedCorrectly: Table[WakuMessageHash, DeliveryInfo], - notYetPublished: Table[WakuMessageHash, DeliveryInfo], - ], - void, - ] -] {.async.} = - let hashesToValidate = toSeq(msgsToValidate.keys) - - let storeResp: StoreQueryResponse = ( - await self.storeClient.queryToAny( - StoreQueryRequest(includeData: false, messageHashes: hashesToValidate) - ) - ).valueOr: - error "checkMsgsInStore failed to get remote msgHashes", - hashes = hashesToValidate.mapIt(shortLog(it)), error = $error - return err() - - let publishedHashes = storeResp.messages.mapIt(it.messageHash) - - var notYetPublished: Table[WakuMessageHash, DeliveryInfo] - var publishedCorrectly: Table[WakuMessageHash, DeliveryInfo] - - for msgHash, deliveryInfo in msgsToValidate.pairs: - if publishedHashes.contains(msgHash): - publishedCorrectly[msgHash] = deliveryInfo - self.publishedMessages.del(msgHash) ## we will no longer track that message - else: - notYetPublished[msgHash] = deliveryInfo - - return ok((publishedCorrectly: publishedCorrectly, notYetPublished: notYetPublished)) - -proc processMessages(self: SendMonitor) {.async.} = - var msgsToValidate: Table[WakuMessageHash, DeliveryInfo] - var msgsToDiscard: Table[WakuMessageHash, DeliveryInfo] - - let now = getNowInNanosecondTime() - let timeToCheckThreshold = now - ArchiveTime.nanos - let maxLifeTime = now - MaxTimeInCache.nanos - - for hash, deliveryInfo in self.publishedMessages.pairs: - if deliveryInfo.msg.timestamp < maxLifeTime: - ## message is too old - msgsToDiscard[hash] = deliveryInfo - - if deliveryInfo.msg.timestamp < timeToCheckThreshold: - msgsToValidate[hash] = deliveryInfo - - ## Discard the messages that are too old - self.performFeedbackAndCleanup( - msgsToDiscard, DeliverySuccess.UNSUCCESSFUL, DeliveryDirection.PUBLISHING, - "Could not publish messages. Please try again.", - ) - - let (publishedCorrectly, notYetPublished) = ( - await self.checkMsgsInStore(msgsToValidate) - ).valueOr: - return ## the error log is printed in checkMsgsInStore - - ## Give positive feedback for the correctly published messages - self.performFeedbackAndCleanup( - publishedCorrectly, DeliverySuccess.SUCCESSFUL, DeliveryDirection.PUBLISHING, - "messages published correctly", - ) - - ## Try to publish again - for msgHash, deliveryInfo in notYetPublished.pairs: - let pubsubTopic = deliveryInfo.pubsubTopic - let msg = deliveryInfo.msg - if not self.wakuRelay.isNil(): - info "trying to publish again with wakuRelay", msgHash, pubsubTopic - (await self.wakuRelay.publish(pubsubTopic, msg)).isOkOr: - error "could not publish with wakuRelay.publish", - msgHash, pubsubTopic, error = $error - continue - - if not self.wakuLightpushClient.isNil(): - info "trying to publish again with wakuLightpushClient", msgHash, pubsubTopic - (await self.wakuLightpushClient.publishToAny(pubsubTopic, msg)).isOkOr: - error "could not publish with publishToAny", error = $error - continue - -proc checkIfMessagesStored(self: SendMonitor) {.async.} = - ## Continuously monitors that the sent messages have been received by a store node - while true: - await self.processMessages() - await sleepAsync(SendCheckInterval) - -method onMessagePublished( - self: SendMonitor, pubsubTopic: string, msg: WakuMessage -) {.gcsafe, raises: [].} = - ## Implementation of the PublishObserver interface. - ## - ## When publishing a message either through relay or lightpush, we want to add some extra effort - ## to make sure it is received to one store node. Hence, keep track of those published messages. - - info "onMessagePublished" - let msgHash = computeMessageHash(pubSubTopic, msg) - - if not self.publishedMessages.hasKey(msgHash): - self.publishedMessages[msgHash] = DeliveryInfo(pubsubTopic: pubsubTopic, msg: msg) - -proc startSendMonitor*(self: SendMonitor) = - self.msgStoredCheckerHandle = self.checkIfMessagesStored() - -proc stopSendMonitor*(self: SendMonitor) = - discard self.msgStoredCheckerHandle.cancelAndWait() - -proc setDeliveryCallback*(self: SendMonitor, deliveryCb: DeliveryFeedbackCallback) = - self.deliveryCb = deliveryCb diff --git a/waku/node/delivery_monitor/subscriptions_observer.nim b/waku/node/delivery_monitor/subscriptions_observer.nim deleted file mode 100644 index 800117ae9..000000000 --- a/waku/node/delivery_monitor/subscriptions_observer.nim +++ /dev/null @@ -1,13 +0,0 @@ -import chronicles - -type SubscriptionObserver* = ref object of RootObj - -method onSubscribe*( - self: SubscriptionObserver, pubsubTopic: string, contentTopics: seq[string] -) {.base, gcsafe, raises: [].} = - error "onSubscribe not implemented" - -method onUnsubscribe*( - self: SubscriptionObserver, pubsubTopic: string, contentTopics: seq[string] -) {.base, gcsafe, raises: [].} = - error "onUnsubscribe not implemented" diff --git a/waku/node/delivery_service/delivery_service.nim b/waku/node/delivery_service/delivery_service.nim new file mode 100644 index 000000000..258c01e95 --- /dev/null +++ b/waku/node/delivery_service/delivery_service.nim @@ -0,0 +1,48 @@ +## This module helps to ensure the correct transmission and reception of messages + +import results +import chronos +import + ./recv_service, + ./send_service, + ./subscription_manager, + waku/[ + waku_core, + waku_node, + waku_store/client, + waku_relay/protocol, + waku_lightpush/client, + waku_filter_v2/client, + ] + +type DeliveryService* = ref object + sendService*: SendService + recvService: RecvService + subscriptionManager*: SubscriptionManager + +proc new*( + T: type DeliveryService, useP2PReliability: bool, w: WakuNode +): Result[T, string] = + ## storeClient is needed to give store visitility to DeliveryService + ## wakuRelay and wakuLightpushClient are needed to give a mechanism to SendService to re-publish + let subscriptionManager = SubscriptionManager.new(w) + let sendService = ?SendService.new(useP2PReliability, w, subscriptionManager) + let recvService = RecvService.new(w, subscriptionManager) + + return ok( + DeliveryService( + sendService: sendService, + recvService: recvService, + subscriptionManager: subscriptionManager, + ) + ) + +proc startDeliveryService*(self: DeliveryService) = + self.subscriptionManager.startSubscriptionManager() + self.recvService.startRecvService() + self.sendService.startSendService() + +proc stopDeliveryService*(self: DeliveryService) {.async.} = + await self.sendService.stopSendService() + await self.recvService.stopRecvService() + await self.subscriptionManager.stopSubscriptionManager() diff --git a/waku/node/delivery_monitor/not_delivered_storage/migrations.nim b/waku/node/delivery_service/not_delivered_storage/migrations.nim similarity index 95% rename from waku/node/delivery_monitor/not_delivered_storage/migrations.nim rename to waku/node/delivery_service/not_delivered_storage/migrations.nim index 8175aea62..807074d64 100644 --- a/waku/node/delivery_monitor/not_delivered_storage/migrations.nim +++ b/waku/node/delivery_service/not_delivered_storage/migrations.nim @@ -4,7 +4,7 @@ import std/[tables, strutils, os], results, chronicles import ../../../common/databases/db_sqlite, ../../../common/databases/common logScope: - topics = "waku node delivery_monitor" + topics = "waku node delivery_service" const TargetSchemaVersion* = 1 # increase this when there is an update in the database schema diff --git a/waku/node/delivery_monitor/not_delivered_storage/not_delivered_storage.nim b/waku/node/delivery_service/not_delivered_storage/not_delivered_storage.nim similarity index 93% rename from waku/node/delivery_monitor/not_delivered_storage/not_delivered_storage.nim rename to waku/node/delivery_service/not_delivered_storage/not_delivered_storage.nim index 85611310b..b0f5f5828 100644 --- a/waku/node/delivery_monitor/not_delivered_storage/not_delivered_storage.nim +++ b/waku/node/delivery_service/not_delivered_storage/not_delivered_storage.nim @@ -1,17 +1,17 @@ ## This module is aimed to keep track of the sent/published messages that are considered ## not being properly delivered. -## +## ## The archiving of such messages will happen in a local sqlite database. -## +## ## In the very first approach, we consider that a message is sent properly is it has been ## received by any store node. -## +## import results import ../../../common/databases/db_sqlite, ../../../waku_core/message/message, - ../../../node/delivery_monitor/not_delivered_storage/migrations + ../../../node/delivery_service/not_delivered_storage/migrations const NotDeliveredMessagesDbUrl = "not-delivered-messages.db" diff --git a/waku/node/delivery_service/recv_service.nim b/waku/node/delivery_service/recv_service.nim new file mode 100644 index 000000000..c4dcf4fef --- /dev/null +++ b/waku/node/delivery_service/recv_service.nim @@ -0,0 +1,3 @@ +import ./recv_service/recv_service + +export recv_service diff --git a/waku/node/delivery_monitor/recv_monitor.nim b/waku/node/delivery_service/recv_service/recv_service.nim similarity index 55% rename from waku/node/delivery_monitor/recv_monitor.nim rename to waku/node/delivery_service/recv_service/recv_service.nim index 6ea35d301..0eba2c450 100644 --- a/waku/node/delivery_monitor/recv_monitor.nim +++ b/waku/node/delivery_service/recv_service/recv_service.nim @@ -2,15 +2,21 @@ ## receive and is backed by store-v3 requests to get an additional degree of certainty ## -import std/[tables, sequtils, options] +import std/[tables, sequtils, options, sets] import chronos, chronicles, libp2p/utility +import ../[subscription_manager] import - ../../waku_core, - ./delivery_callback, - ./subscriptions_observer, - ../../waku_store/[client, common], - ../../waku_filter_v2/client, - ../../waku_core/topics + waku/[ + waku_core, + waku_store/client, + waku_store/common, + waku_filter_v2/client, + waku_core/topics, + events/delivery_events, + events/message_events, + waku_node, + common/broker/broker_context, + ] const StoreCheckPeriod = chronos.minutes(5) ## How often to perform store queries @@ -28,14 +34,11 @@ type RecvMessage = object rxTime: Timestamp ## timestamp of the rx message. We will not keep the rx messages forever -type RecvMonitor* = ref object of SubscriptionObserver - topicsInterest: Table[PubsubTopic, seq[ContentTopic]] - ## Tracks message verification requests and when was the last time a - ## pubsub topic was verified for missing messages - ## The key contains pubsub-topics - - storeClient: WakuStoreClient - deliveryCb: DeliveryFeedbackCallback +type RecvService* = ref object of RootObj + brokerCtx: BrokerContext + node: WakuNode + seenMsgListener: MessageSeenEventListener + subscriptionManager: SubscriptionManager recentReceivedMsgs: seq[RecvMessage] @@ -46,10 +49,10 @@ type RecvMonitor* = ref object of SubscriptionObserver endTimeToCheck: Timestamp proc getMissingMsgsFromStore( - self: RecvMonitor, msgHashes: seq[WakuMessageHash] + self: RecvService, msgHashes: seq[WakuMessageHash] ): Future[Result[seq[TupleHashAndMsg], string]] {.async.} = let storeResp: StoreQueryResponse = ( - await self.storeClient.queryToAny( + await self.node.wakuStoreClient.queryToAny( StoreQueryRequest(includeData: true, messageHashes: msgHashes) ) ).valueOr: @@ -62,46 +65,46 @@ proc getMissingMsgsFromStore( ) proc performDeliveryFeedback( - self: RecvMonitor, + self: RecvService, success: DeliverySuccess, dir: DeliveryDirection, comment: string, msgHash: WakuMessageHash, msg: WakuMessage, ) {.gcsafe, raises: [].} = - ## This procs allows to bring delivery feedback to the API client - ## It requires a 'deliveryCb' to be registered beforehand. - if self.deliveryCb.isNil(): - error "deliveryCb is nil in performDeliveryFeedback", - success, dir, comment, msg_hash - return - info "recv monitor performDeliveryFeedback", success, dir, comment, msg_hash = shortLog(msgHash) - self.deliveryCb(success, dir, comment, msgHash, msg) -proc msgChecker(self: RecvMonitor) {.async.} = + DeliveryFeedbackEvent.emit( + brokerCtx = self.brokerCtx, + success = success, + dir = dir, + comment = comment, + msgHash = msgHash, + msg = msg, + ) + +proc msgChecker(self: RecvService) {.async.} = ## Continuously checks if a message has been received while true: await sleepAsync(StoreCheckPeriod) - self.endTimeToCheck = getNowInNanosecondTime() var msgHashesInStore = newSeq[WakuMessageHash](0) - for pubsubTopic, cTopics in self.topicsInterest.pairs: + for sub in self.subscriptionManager.getActiveSubscriptions(): let storeResp: StoreQueryResponse = ( - await self.storeClient.queryToAny( + await self.node.wakuStoreClient.queryToAny( StoreQueryRequest( includeData: false, - pubsubTopic: some(PubsubTopic(pubsubTopic)), - contentTopics: cTopics, + pubsubTopic: some(PubsubTopic(sub.pubsubTopic)), + contentTopics: sub.contentTopics, startTime: some(self.startTimeToCheck - DelayExtra.nanos), endTime: some(self.endTimeToCheck + DelayExtra.nanos), ) ) ).valueOr: error "msgChecker failed to get remote msgHashes", - pubsubTopic, cTopics, error = $error + pubsubTopic = sub.pubsubTopic, cTopics = sub.contentTopics, error = $error continue msgHashesInStore.add(storeResp.messages.mapIt(it.messageHash)) @@ -126,71 +129,67 @@ proc msgChecker(self: RecvMonitor) {.async.} = ## update next check times self.startTimeToCheck = self.endTimeToCheck -method onSubscribe( - self: RecvMonitor, pubsubTopic: string, contentTopics: seq[string] -) {.gcsafe, raises: [].} = - info "onSubscribe", pubsubTopic, contentTopics - self.topicsInterest.withValue(pubsubTopic, contentTopicsOfInterest): - contentTopicsOfInterest[].add(contentTopics) - do: - self.topicsInterest[pubsubTopic] = contentTopics +proc processIncomingMessageOfInterest( + self: RecvService, pubsubTopic: string, message: WakuMessage +) = + ## Resolve an incoming network message that was already filtered by topic. + ## Deduplicate (by hash), store (saves in recently-seen messages) and emit + ## the MAPI MessageReceivedEvent for every unique incoming message. -method onUnsubscribe( - self: RecvMonitor, pubsubTopic: string, contentTopics: seq[string] -) {.gcsafe, raises: [].} = - info "onUnsubscribe", pubsubTopic, contentTopics + let msgHash = computeMessageHash(pubsubTopic, message) + if not self.recentReceivedMsgs.anyIt(it.msgHash == msgHash): + let rxMsg = RecvMessage(msgHash: msgHash, rxTime: message.timestamp) + self.recentReceivedMsgs.add(rxMsg) + MessageReceivedEvent.emit(self.brokerCtx, msgHash.to0xHex(), message) - self.topicsInterest.withValue(pubsubTopic, contentTopicsOfInterest): - let remainingCTopics = - contentTopicsOfInterest[].filterIt(not contentTopics.contains(it)) - contentTopicsOfInterest[] = remainingCTopics - - if remainingCTopics.len == 0: - self.topicsInterest.del(pubsubTopic) - do: - error "onUnsubscribe unsubscribing from wrong topic", pubsubTopic, contentTopics - -proc new*( - T: type RecvMonitor, - storeClient: WakuStoreClient, - wakuFilterClient: WakuFilterClient, -): T = +proc new*(T: typedesc[RecvService], node: WakuNode, s: SubscriptionManager): T = ## The storeClient will help to acquire any possible missed messages let now = getNowInNanosecondTime() - var recvMonitor = RecvMonitor(storeClient: storeClient, startTimeToCheck: now) + var recvService = RecvService( + node: node, + startTimeToCheck: now, + brokerCtx: node.brokerCtx, + subscriptionManager: s, + recentReceivedMsgs: @[], + ) - if not wakuFilterClient.isNil(): - wakuFilterClient.addSubscrObserver(recvMonitor) + # TODO: For MAPI Edge support, either call node.wakuFilterClient.registerPushHandler + # so that the RecvService listens to incoming filter messages, + # or have the filter client emit MessageSeenEvent. - let filterPushHandler = proc( - pubsubTopic: PubsubTopic, message: WakuMessage - ) {.async, closure.} = - ## Captures all the messages recived through filter + return recvService - let msgHash = computeMessageHash(pubSubTopic, message) - let rxMsg = RecvMessage(msgHash: msgHash, rxTime: message.timestamp) - recvMonitor.recentReceivedMsgs.add(rxMsg) - - wakuFilterClient.registerPushHandler(filterPushHandler) - - return recvMonitor - -proc loopPruneOldMessages(self: RecvMonitor) {.async.} = +proc loopPruneOldMessages(self: RecvService) {.async.} = while true: let oldestAllowedTime = getNowInNanosecondTime() - MaxMessageLife.nanos self.recentReceivedMsgs.keepItIf(it.rxTime > oldestAllowedTime) await sleepAsync(PruneOldMsgsPeriod) -proc startRecvMonitor*(self: RecvMonitor) = +proc startRecvService*(self: RecvService) = self.msgCheckerHandler = self.msgChecker() self.msgPrunerHandler = self.loopPruneOldMessages() -proc stopRecvMonitor*(self: RecvMonitor) {.async.} = + self.seenMsgListener = MessageSeenEvent.listen( + self.brokerCtx, + proc(event: MessageSeenEvent) {.async: (raises: []).} = + if not self.subscriptionManager.isSubscribed( + event.topic, event.message.contentTopic + ): + trace "skipping message as I am not subscribed", + shard = event.topic, contenttopic = event.message.contentTopic + return + + self.processIncomingMessageOfInterest(event.topic, event.message), + ).valueOr: + error "Failed to set MessageSeenEvent listener", error = error + quit(QuitFailure) + +proc stopRecvService*(self: RecvService) {.async.} = + MessageSeenEvent.dropListener(self.brokerCtx, self.seenMsgListener) if not self.msgCheckerHandler.isNil(): await self.msgCheckerHandler.cancelAndWait() + self.msgCheckerHandler = nil if not self.msgPrunerHandler.isNil(): await self.msgPrunerHandler.cancelAndWait() - -proc setDeliveryCallback*(self: RecvMonitor, deliveryCb: DeliveryFeedbackCallback) = - self.deliveryCb = deliveryCb + self.msgPrunerHandler = nil diff --git a/waku/node/delivery_service/send_service.nim b/waku/node/delivery_service/send_service.nim new file mode 100644 index 000000000..de0dbf6a3 --- /dev/null +++ b/waku/node/delivery_service/send_service.nim @@ -0,0 +1,6 @@ +## This module reinforces the publish operation with regular store-v3 requests. +## + +import ./send_service/[send_service, delivery_task] + +export send_service, delivery_task diff --git a/waku/node/delivery_service/send_service/delivery_task.nim b/waku/node/delivery_service/send_service/delivery_task.nim new file mode 100644 index 000000000..0ff151f6e --- /dev/null +++ b/waku/node/delivery_service/send_service/delivery_task.nim @@ -0,0 +1,74 @@ +import std/[options, times], chronos +import waku/waku_core, waku/api/types, waku/requests/node_requests +import waku/common/broker/broker_context + +type DeliveryState* {.pure.} = enum + Entry + SuccessfullyPropagated + # message is known to be sent to the network but not yet validated + SuccessfullyValidated + # message is known to be stored at least on one store node, thus validated + FallbackRetry # retry sending with fallback processor if available + NextRoundRetry # try sending in next loop + FailedToDeliver # final state of failed delivery + +type DeliveryTask* = ref object + requestId*: RequestId + pubsubTopic*: PubsubTopic + msg*: WakuMessage + msgHash*: WakuMessageHash + tryCount*: int + state*: DeliveryState + deliveryTime*: Moment + propagateEventEmitted*: bool + errorDesc*: string + +proc new*( + T: typedesc[DeliveryTask], + requestId: RequestId, + envelop: MessageEnvelope, + brokerCtx: BrokerContext, +): Result[T, string] = + let msg = envelop.toWakuMessage() + # TODO: use sync request for such as soon as available + let relayShardRes = ( + RequestRelayShard.request(brokerCtx, none[PubsubTopic](), envelop.contentTopic) + ).valueOr: + error "RequestRelayShard.request failed", error = error + return err("Failed create DeliveryTask: " & $error) + + let pubsubTopic = relayShardRes.relayShard.toPubsubTopic() + let msgHash = computeMessageHash(pubsubTopic, msg) + + return ok( + T( + requestId: requestId, + pubsubTopic: pubsubTopic, + msg: msg, + msgHash: msgHash, + tryCount: 0, + state: DeliveryState.Entry, + ) + ) + +func `==`*(r, l: DeliveryTask): bool = + if r.isNil() == l.isNil(): + r.isNil() or r.msgHash == l.msgHash + else: + false + +proc messageAge*(self: DeliveryTask): timer.Duration = + let actual = getNanosecondTime(getTime().toUnixFloat()) + if self.msg.timestamp >= 0 and self.msg.timestamp < actual: + nanoseconds(actual - self.msg.timestamp) + else: + ZeroDuration + +proc deliveryAge*(self: DeliveryTask): timer.Duration = + if self.state == DeliveryState.SuccessfullyPropagated: + timer.Moment.now() - self.deliveryTime + else: + ZeroDuration + +proc isEphemeral*(self: DeliveryTask): bool = + return self.msg.ephemeral diff --git a/waku/node/delivery_service/send_service/lightpush_processor.nim b/waku/node/delivery_service/send_service/lightpush_processor.nim new file mode 100644 index 000000000..40a754757 --- /dev/null +++ b/waku/node/delivery_service/send_service/lightpush_processor.nim @@ -0,0 +1,81 @@ +import chronicles, chronos, results +import std/options + +import + waku/node/peer_manager, + waku/waku_core, + waku/waku_lightpush/[common, client, rpc], + waku/common/broker/broker_context + +import ./[delivery_task, send_processor] + +logScope: + topics = "send service lightpush processor" + +type LightpushSendProcessor* = ref object of BaseSendProcessor + peerManager: PeerManager + lightpushClient: WakuLightPushClient + +proc new*( + T: typedesc[LightpushSendProcessor], + peerManager: PeerManager, + lightpushClient: WakuLightPushClient, + brokerCtx: BrokerContext, +): T = + return + T(peerManager: peerManager, lightpushClient: lightpushClient, brokerCtx: brokerCtx) + +proc isLightpushPeerAvailable( + self: LightpushSendProcessor, pubsubTopic: PubsubTopic +): bool = + return self.peerManager.selectPeer(WakuLightPushCodec, some(pubsubTopic)).isSome() + +method isValidProcessor*( + self: LightpushSendProcessor, task: DeliveryTask +): bool {.gcsafe.} = + return self.isLightpushPeerAvailable(task.pubsubTopic) + +method sendImpl*( + self: LightpushSendProcessor, task: DeliveryTask +): Future[void] {.async.} = + task.tryCount.inc() + info "Trying message delivery via Lightpush", + requestId = task.requestId, + msgHash = task.msgHash.to0xHex(), + tryCount = task.tryCount + + let peer = self.peerManager.selectPeer(WakuLightPushCodec, some(task.pubsubTopic)).valueOr: + debug "No peer available for Lightpush, request pushed back for next round", + requestId = task.requestId + task.state = DeliveryState.NextRoundRetry + return + + let numLightpushServers = ( + await self.lightpushClient.publish(some(task.pubsubTopic), task.msg, peer) + ).valueOr: + error "LightpushSendProcessor.sendImpl failed", error = error.desc.get($error.code) + case error.code + of LightPushErrorCode.NO_PEERS_TO_RELAY, LightPushErrorCode.TOO_MANY_REQUESTS, + LightPushErrorCode.OUT_OF_RLN_PROOF, LightPushErrorCode.SERVICE_NOT_AVAILABLE, + LightPushErrorCode.INTERNAL_SERVER_ERROR: + task.state = DeliveryState.NextRoundRetry + else: + # the message is malformed, send error + task.state = DeliveryState.FailedToDeliver + task.errorDesc = error.desc.get($error.code) + task.deliveryTime = Moment.now() + return + + if numLightpushServers > 0: + info "Message propagated via Lightpush", + requestId = task.requestId, msgHash = task.msgHash.to0xHex() + task.state = DeliveryState.SuccessfullyPropagated + task.deliveryTime = Moment.now() + # TODO: with a simple retry processor it might be more accurate to say `Sent` + else: + # Controversial state, publish says ok but no peer. It should not happen. + debug "Lightpush publish returned zero peers, request pushed back for next round", + requestId = task.requestId + task.state = DeliveryState.NextRoundRetry + + return diff --git a/waku/node/delivery_service/send_service/relay_processor.nim b/waku/node/delivery_service/send_service/relay_processor.nim new file mode 100644 index 000000000..833d15845 --- /dev/null +++ b/waku/node/delivery_service/send_service/relay_processor.nim @@ -0,0 +1,80 @@ +import std/options +import chronos, chronicles +import waku/[waku_core], waku/waku_lightpush/[common, rpc] +import waku/requests/health_requests +import waku/common/broker/broker_context +import waku/api/types +import ./[delivery_task, send_processor] + +logScope: + topics = "send service relay processor" + +type RelaySendProcessor* = ref object of BaseSendProcessor + publishProc: PushMessageHandler + fallbackStateToSet: DeliveryState + +proc new*( + T: typedesc[RelaySendProcessor], + lightpushAvailable: bool, + publishProc: PushMessageHandler, + brokerCtx: BrokerContext, +): RelaySendProcessor = + let fallbackStateToSet = + if lightpushAvailable: + DeliveryState.FallbackRetry + else: + DeliveryState.FailedToDeliver + + return RelaySendProcessor( + publishProc: publishProc, + fallbackStateToSet: fallbackStateToSet, + brokerCtx: brokerCtx, + ) + +proc isTopicHealthy(self: RelaySendProcessor, topic: PubsubTopic): bool {.gcsafe.} = + let healthReport = RequestShardTopicsHealth.request(self.brokerCtx, @[topic]).valueOr: + error "isTopicHealthy: failed to get health report", topic = topic, error = error + return false + + if healthReport.topicHealth.len() < 1: + warn "isTopicHealthy: no topic health entries", topic = topic + return false + let health = healthReport.topicHealth[0].health + debug "isTopicHealthy: topic health is ", topic = topic, health = health + return health == MINIMALLY_HEALTHY or health == SUFFICIENTLY_HEALTHY + +method isValidProcessor*( + self: RelaySendProcessor, task: DeliveryTask +): bool {.gcsafe.} = + # Topic health query is not reliable enough after a fresh subscribe... + # return self.isTopicHealthy(task.pubsubTopic) + return true + +method sendImpl*(self: RelaySendProcessor, task: DeliveryTask) {.async.} = + task.tryCount.inc() + info "Trying message delivery via Relay", + requestId = task.requestId, + msgHash = task.msgHash.to0xHex(), + tryCount = task.tryCount + + let noOfPublishedPeers = (await self.publishProc(task.pubsubTopic, task.msg)).valueOr: + let errorMessage = error.desc.get($error.code) + error "Failed to publish message with relay", + request = task.requestId, msgHash = task.msgHash.to0xHex(), error = errorMessage + if error.code != LightPushErrorCode.NO_PEERS_TO_RELAY: + task.state = DeliveryState.FailedToDeliver + task.errorDesc = errorMessage + else: + task.state = self.fallbackStateToSet + return + + if noOfPublishedPeers > 0: + info "Message propagated via Relay", + requestId = task.requestId, + msgHash = task.msgHash.to0xHex(), + noOfPeers = noOfPublishedPeers + task.state = DeliveryState.SuccessfullyPropagated + task.deliveryTime = Moment.now() + else: + # It shall not happen, but still covering it + task.state = self.fallbackStateToSet diff --git a/waku/node/delivery_service/send_service/send_processor.nim b/waku/node/delivery_service/send_service/send_processor.nim new file mode 100644 index 000000000..0108eacd0 --- /dev/null +++ b/waku/node/delivery_service/send_service/send_processor.nim @@ -0,0 +1,36 @@ +import chronos +import ./delivery_task +import waku/common/broker/broker_context + +{.push raises: [].} + +type BaseSendProcessor* = ref object of RootObj + fallbackProcessor*: BaseSendProcessor + brokerCtx*: BrokerContext + +proc chain*(self: BaseSendProcessor, next: BaseSendProcessor) = + self.fallbackProcessor = next + +method isValidProcessor*( + self: BaseSendProcessor, task: DeliveryTask +): bool {.base, gcsafe.} = + return false + +method sendImpl*( + self: BaseSendProcessor, task: DeliveryTask +): Future[void] {.async, base.} = + assert false, "Not implemented" + +method process*( + self: BaseSendProcessor, task: DeliveryTask +): Future[void] {.async, base.} = + var currentProcessor: BaseSendProcessor = self + var keepTrying = true + while not currentProcessor.isNil() and keepTrying: + if currentProcessor.isValidProcessor(task): + await currentProcessor.sendImpl(task) + currentProcessor = currentProcessor.fallbackProcessor + keepTrying = task.state == DeliveryState.FallbackRetry + + if task.state == DeliveryState.FallbackRetry: + task.state = DeliveryState.NextRoundRetry diff --git a/waku/node/delivery_service/send_service/send_service.nim b/waku/node/delivery_service/send_service/send_service.nim new file mode 100644 index 000000000..a3c44bc0c --- /dev/null +++ b/waku/node/delivery_service/send_service/send_service.nim @@ -0,0 +1,270 @@ +## This module reinforces the publish operation with regular store-v3 requests. +## + +import std/[sequtils, tables, options] +import chronos, chronicles, libp2p/utility +import + ./[send_processor, relay_processor, lightpush_processor, delivery_task], + ../[subscription_manager], + waku/[ + waku_core, + node/waku_node, + node/peer_manager, + waku_store/client, + waku_store/common, + waku_relay/protocol, + waku_rln_relay/rln_relay, + waku_lightpush/client, + waku_lightpush/callbacks, + events/message_events, + common/broker/broker_context, + ] + +logScope: + topics = "send service" + +# This useful util is missing from sequtils, this extends applyIt with predicate... +template applyItIf*(varSeq, pred, op: untyped) = + for i in low(varSeq) .. high(varSeq): + let it {.inject.} = varSeq[i] + if pred: + op + varSeq[i] = it + +template forEach*(varSeq, op: untyped) = + for i in low(varSeq) .. high(varSeq): + let it {.inject.} = varSeq[i] + op + +const MaxTimeInCache* = chronos.minutes(1) + ## Messages older than this time will get completely forgotten on publication and a + ## feedback will be given when that happens + +const ServiceLoopInterval* = chronos.seconds(1) + ## Interval at which we check that messages have been properly received by a store node + +const ArchiveTime = chronos.seconds(3) + ## Estimation of the time we wait until we start confirming that a message has been properly + ## received and archived by a store node + +type SendService* = ref object of RootObj + brokerCtx: BrokerContext + taskCache: seq[DeliveryTask] + ## Cache that contains the delivery task per message hash. + ## This is needed to make sure the published messages are properly published + + serviceLoopHandle: Future[void] ## handle that allows to stop the async task + sendProcessor: BaseSendProcessor + + node: WakuNode + checkStoreForMessages: bool + subscriptionManager: SubscriptionManager + +proc setupSendProcessorChain( + peerManager: PeerManager, + lightpushClient: WakuLightPushClient, + relay: WakuRelay, + rlnRelay: WakuRLNRelay, + brokerCtx: BrokerContext, +): Result[BaseSendProcessor, string] = + let isRelayAvail = not relay.isNil() + let isLightPushAvail = not lightpushClient.isNil() + + if not isRelayAvail and not isLightPushAvail: + return err("No valid send processor found for the delivery task") + + var processors = newSeq[BaseSendProcessor]() + + if isRelayAvail: + let rln: Option[WakuRLNRelay] = + if rlnRelay.isNil(): + none[WakuRLNRelay]() + else: + some(rlnRelay) + let publishProc = getRelayPushHandler(relay, rln) + + processors.add(RelaySendProcessor.new(isLightPushAvail, publishProc, brokerCtx)) + if isLightPushAvail: + processors.add(LightpushSendProcessor.new(peerManager, lightpushClient, brokerCtx)) + + var currentProcessor: BaseSendProcessor = processors[0] + for i in 1 ..< processors.len: + currentProcessor.chain(processors[i]) + currentProcessor = processors[i] + trace "Send processor chain", index = i, processor = type(processors[i]).name + + return ok(processors[0]) + +proc new*( + T: typedesc[SendService], + preferP2PReliability: bool, + w: WakuNode, + s: SubscriptionManager, +): Result[T, string] = + if w.wakuRelay.isNil() and w.wakuLightpushClient.isNil(): + return err( + "Could not create SendService. wakuRelay or wakuLightpushClient should be set" + ) + + let checkStoreForMessages = preferP2PReliability and not w.wakuStoreClient.isNil() + + let sendProcessorChain = setupSendProcessorChain( + w.peerManager, w.wakuLightPushClient, w.wakuRelay, w.wakuRlnRelay, w.brokerCtx + ).valueOr: + return err("failed to setup SendProcessorChain: " & $error) + + let sendService = SendService( + brokerCtx: w.brokerCtx, + taskCache: newSeq[DeliveryTask](), + serviceLoopHandle: nil, + sendProcessor: sendProcessorChain, + node: w, + checkStoreForMessages: checkStoreForMessages, + subscriptionManager: s, + ) + + return ok(sendService) + +proc addTask(self: SendService, task: DeliveryTask) = + self.taskCache.addUnique(task) + +proc isStorePeerAvailable*(sendService: SendService): bool = + return sendService.node.peerManager.selectPeer(WakuStoreCodec).isSome() + +proc checkMsgsInStore(self: SendService, tasksToValidate: seq[DeliveryTask]) {.async.} = + if tasksToValidate.len() == 0: + return + + if not isStorePeerAvailable(self): + warn "Skipping store validation for ", + messageCount = tasksToValidate.len(), error = "no store peer available" + return + + var hashesToValidate = tasksToValidate.mapIt(it.msgHash) + # TODO: confirm hash format for store query!!! + + let storeResp: StoreQueryResponse = ( + await self.node.wakuStoreClient.queryToAny( + StoreQueryRequest(includeData: false, messageHashes: hashesToValidate) + ) + ).valueOr: + error "Failed to get store validation for messages", + hashes = hashesToValidate.mapIt(shortLog(it)), error = $error + return + + let storedItems = storeResp.messages.mapIt(it.messageHash) + + # Set success state for messages found in store + self.taskCache.applyItIf(storedItems.contains(it.msgHash)): + it.state = DeliveryState.SuccessfullyValidated + + # set retry state for messages not found in store + hashesToValidate.keepItIf(not storedItems.contains(it)) + self.taskCache.applyItIf(hashesToValidate.contains(it.msgHash)): + it.state = DeliveryState.NextRoundRetry + +proc checkStoredMessages(self: SendService) {.async.} = + if not self.checkStoreForMessages: + return + + let tasksToValidate = self.taskCache.filterIt( + it.state == DeliveryState.SuccessfullyPropagated and it.deliveryAge() > ArchiveTime and + not it.isEphemeral() + ) + + await self.checkMsgsInStore(tasksToValidate) + +proc reportTaskResult(self: SendService, task: DeliveryTask) = + case task.state + of DeliveryState.SuccessfullyPropagated: + # TODO: in case of unable to strore check messages shall we report success instead? + if not task.propagateEventEmitted: + info "Message successfully propagated", + requestId = task.requestId, msgHash = task.msgHash.to0xHex() + MessagePropagatedEvent.emit( + self.brokerCtx, task.requestId, task.msgHash.to0xHex() + ) + task.propagateEventEmitted = true + return + of DeliveryState.SuccessfullyValidated: + info "Message successfully sent", + requestId = task.requestId, msgHash = task.msgHash.to0xHex() + MessageSentEvent.emit(self.brokerCtx, task.requestId, task.msgHash.to0xHex()) + return + of DeliveryState.FailedToDeliver: + error "Failed to send message", + requestId = task.requestId, + msgHash = task.msgHash.to0xHex(), + error = task.errorDesc + MessageErrorEvent.emit( + self.brokerCtx, task.requestId, task.msgHash.to0xHex(), task.errorDesc + ) + return + else: + # rest of the states are intermediate and does not translate to event + discard + + if task.messageAge() > MaxTimeInCache: + error "Failed to send message", + requestId = task.requestId, + msgHash = task.msgHash.to0xHex(), + error = "Message too old", + age = task.messageAge() + task.state = DeliveryState.FailedToDeliver + MessageErrorEvent.emit( + self.brokerCtx, + task.requestId, + task.msgHash.to0xHex(), + "Unable to send within retry time window", + ) + +proc evaluateAndCleanUp(self: SendService) = + self.taskCache.forEach(self.reportTaskResult(it)) + self.taskCache.keepItIf( + it.state != DeliveryState.SuccessfullyValidated and + it.state != DeliveryState.FailedToDeliver + ) + + # remove propagated ephemeral messages as no store check is possible + self.taskCache.keepItIf( + not (it.isEphemeral() and it.state == DeliveryState.SuccessfullyPropagated) + ) + +proc trySendMessages(self: SendService) {.async.} = + let tasksToSend = self.taskCache.filterIt(it.state == DeliveryState.NextRoundRetry) + + for task in tasksToSend: + # Todo, check if it has any perf gain to run them concurrent... + await self.sendProcessor.process(task) + +proc serviceLoop(self: SendService) {.async.} = + ## Continuously monitors that the sent messages have been received by a store node + while true: + await self.trySendMessages() + await self.checkStoredMessages() + self.evaluateAndCleanUp() + ## TODO: add circuit breaker to avoid infinite looping in case of persistent failures + ## Use OnlineStateChange observers to pause/resume the loop + await sleepAsync(ServiceLoopInterval) + +proc startSendService*(self: SendService) = + self.serviceLoopHandle = self.serviceLoop() + +proc stopSendService*(self: SendService) {.async.} = + if not self.serviceLoopHandle.isNil(): + await self.serviceLoopHandle.cancelAndWait() + +proc send*(self: SendService, task: DeliveryTask) {.async.} = + assert(not task.isNil(), "task for send must not be nil") + + info "SendService.send: processing delivery task", + requestId = task.requestId, msgHash = task.msgHash.to0xHex() + + self.subscriptionManager.subscribe(task.msg.contentTopic).isOkOr: + error "SendService.send: failed to subscribe to content topic", + contentTopic = task.msg.contentTopic, error = error + + await self.sendProcessor.process(task) + reportTaskResult(self, task) + if task.state != DeliveryState.FailedToDeliver: + self.addTask(task) diff --git a/waku/node/delivery_service/subscription_manager.nim b/waku/node/delivery_service/subscription_manager.nim new file mode 100644 index 000000000..22df47413 --- /dev/null +++ b/waku/node/delivery_service/subscription_manager.nim @@ -0,0 +1,164 @@ +import std/[sets, tables, options, strutils], chronos, chronicles, results +import + waku/[ + waku_core, + waku_core/topics, + waku_core/topics/sharding, + waku_node, + waku_relay, + common/broker/broker_context, + events/delivery_events, + ] + +type SubscriptionManager* = ref object of RootObj + node: WakuNode + contentTopicSubs: Table[PubsubTopic, HashSet[ContentTopic]] + ## Map of Shard to ContentTopic needed because e.g. WakuRelay is PubsubTopic only. + ## A present key with an empty HashSet value means pubsubtopic already subscribed + ## (via subscribePubsubTopics()) but there's no specific content topic interest yet. + +proc new*(T: typedesc[SubscriptionManager], node: WakuNode): T = + SubscriptionManager( + node: node, contentTopicSubs: initTable[PubsubTopic, HashSet[ContentTopic]]() + ) + +proc addContentTopicInterest( + self: SubscriptionManager, shard: PubsubTopic, topic: ContentTopic +): Result[void, string] = + if not self.contentTopicSubs.hasKey(shard): + self.contentTopicSubs[shard] = initHashSet[ContentTopic]() + + self.contentTopicSubs.withValue(shard, cTopics): + if not cTopics[].contains(topic): + cTopics[].incl(topic) + + # TODO: Call a "subscribe(shard, topic)" on filter client here, + # so the filter client can know that subscriptions changed. + + return ok() + +proc removeContentTopicInterest( + self: SubscriptionManager, shard: PubsubTopic, topic: ContentTopic +): Result[void, string] = + self.contentTopicSubs.withValue(shard, cTopics): + if cTopics[].contains(topic): + cTopics[].excl(topic) + + if cTopics[].len == 0 and isNil(self.node.wakuRelay): + self.contentTopicSubs.del(shard) # We're done with cTopics here + + # TODO: Call a "unsubscribe(shard, topic)" on filter client here, + # so the filter client can know that subscriptions changed. + + return ok() + +proc subscribePubsubTopics( + self: SubscriptionManager, shards: seq[PubsubTopic] +): Result[void, string] = + if isNil(self.node.wakuRelay): + return err("subscribePubsubTopics requires a Relay") + + var errors: seq[string] = @[] + + for shard in shards: + if not self.contentTopicSubs.hasKey(shard): + self.node.subscribe((kind: PubsubSub, topic: shard), nil).isOkOr: + errors.add("shard " & shard & ": " & error) + continue + + self.contentTopicSubs[shard] = initHashSet[ContentTopic]() + + if errors.len > 0: + return err("subscribeShard errors: " & errors.join("; ")) + + return ok() + +proc startSubscriptionManager*(self: SubscriptionManager) = + if isNil(self.node.wakuRelay): + return + + if self.node.wakuAutoSharding.isSome(): + # Subscribe relay to all shards in autosharding. + let autoSharding = self.node.wakuAutoSharding.get() + let clusterId = autoSharding.clusterId + let numShards = autoSharding.shardCountGenZero + + if numShards > 0: + var clusterPubsubTopics = newSeqOfCap[PubsubTopic](numShards) + + for i in 0 ..< numShards: + let shardObj = RelayShard(clusterId: clusterId, shardId: uint16(i)) + clusterPubsubTopics.add(PubsubTopic($shardObj)) + + self.subscribePubsubTopics(clusterPubsubTopics).isOkOr: + error "Failed to auto-subscribe Relay to cluster shards: ", error = error + else: + info "SubscriptionManager has no AutoSharding configured; skipping auto-subscribe." + +proc stopSubscriptionManager*(self: SubscriptionManager) {.async.} = + discard + +proc getActiveSubscriptions*( + self: SubscriptionManager +): seq[tuple[pubsubTopic: string, contentTopics: seq[ContentTopic]]] = + var activeSubs: seq[tuple[pubsubTopic: string, contentTopics: seq[ContentTopic]]] = + @[] + + for pubsub, cTopicSet in self.contentTopicSubs.pairs: + if cTopicSet.len > 0: + var cTopicSeq = newSeqOfCap[ContentTopic](cTopicSet.len) + for t in cTopicSet: + cTopicSeq.add(t) + activeSubs.add((pubsub, cTopicSeq)) + + return activeSubs + +proc getShardForContentTopic( + self: SubscriptionManager, topic: ContentTopic +): Result[PubsubTopic, string] = + if self.node.wakuAutoSharding.isSome(): + let shardObj = ?self.node.wakuAutoSharding.get().getShard(topic) + return ok($shardObj) + + return err("SubscriptionManager requires AutoSharding") + +proc isSubscribed*( + self: SubscriptionManager, topic: ContentTopic +): Result[bool, string] = + let shard = ?self.getShardForContentTopic(topic) + return ok( + self.contentTopicSubs.hasKey(shard) and self.contentTopicSubs[shard].contains(topic) + ) + +proc isSubscribed*( + self: SubscriptionManager, shard: PubsubTopic, contentTopic: ContentTopic +): bool {.raises: [].} = + self.contentTopicSubs.withValue(shard, cTopics): + return cTopics[].contains(contentTopic) + return false + +proc subscribe*(self: SubscriptionManager, topic: ContentTopic): Result[void, string] = + if isNil(self.node.wakuRelay) and isNil(self.node.wakuFilterClient): + return err("SubscriptionManager requires either Relay or Filter Client.") + + let shard = ?self.getShardForContentTopic(topic) + + if not isNil(self.node.wakuRelay) and not self.contentTopicSubs.hasKey(shard): + ?self.subscribePubsubTopics(@[shard]) + + ?self.addContentTopicInterest(shard, topic) + + return ok() + +proc unsubscribe*( + self: SubscriptionManager, topic: ContentTopic +): Result[void, string] = + if isNil(self.node.wakuRelay) and isNil(self.node.wakuFilterClient): + return err("SubscriptionManager requires either Relay or Filter Client.") + + let shard = ?self.getShardForContentTopic(topic) + + if self.isSubscribed(shard, topic): + ?self.removeContentTopicInterest(shard, topic) + + return ok() diff --git a/waku/node/health_monitor.nim b/waku/node/health_monitor.nim index 854a8bbc0..6e42352d4 100644 --- a/waku/node/health_monitor.nim +++ b/waku/node/health_monitor.nim @@ -1,4 +1,9 @@ import - health_monitor/[node_health_monitor, protocol_health, online_monitor, health_status] + health_monitor/[ + node_health_monitor, protocol_health, online_monitor, health_status, + connection_status, health_report, + ] -export node_health_monitor, protocol_health, online_monitor, health_status +export + node_health_monitor, protocol_health, online_monitor, health_status, + connection_status, health_report diff --git a/waku/node/health_monitor/connection_status.nim b/waku/node/health_monitor/connection_status.nim new file mode 100644 index 000000000..77696130a --- /dev/null +++ b/waku/node/health_monitor/connection_status.nim @@ -0,0 +1,15 @@ +import chronos, results, std/strutils, ../../api/types + +export ConnectionStatus + +proc init*( + t: typedesc[ConnectionStatus], strRep: string +): Result[ConnectionStatus, string] = + try: + let status = parseEnum[ConnectionStatus](strRep) + return ok(status) + except ValueError: + return err("Invalid ConnectionStatus string representation: " & strRep) + +type ConnectionStatusChangeHandler* = + proc(status: ConnectionStatus): Future[void] {.gcsafe, raises: [Defect].} diff --git a/waku/node/health_monitor/health_report.nim b/waku/node/health_monitor/health_report.nim new file mode 100644 index 000000000..d6c23cd28 --- /dev/null +++ b/waku/node/health_monitor/health_report.nim @@ -0,0 +1,10 @@ +{.push raises: [].} + +import ./health_status, ./connection_status, ./protocol_health + +type HealthReport* = object + ## Rest API type returned for /health endpoint + ## + nodeHealth*: HealthStatus # legacy "READY" health indicator + connectionStatus*: ConnectionStatus # new "Connected" health indicator + protocolsHealth*: seq[ProtocolHealth] diff --git a/waku/node/health_monitor/node_health_monitor.nim b/waku/node/health_monitor/node_health_monitor.nim index eb5d0ed8c..ddba47ccb 100644 --- a/waku/node/health_monitor/node_health_monitor.nim +++ b/waku/node/health_monitor/node_health_monitor.nim @@ -1,65 +1,89 @@ {.push raises: [].} import - std/[options, sets, random, sequtils], + std/[options, sets, random, sequtils, json, strutils, tables], chronos, chronicles, - libp2p/protocols/rendezvous - -import - ../waku_node, - ../kernel_api, - ../../waku_rln_relay, - ../../waku_relay, - ../peer_manager, - ./online_monitor, - ./health_status, - ./protocol_health + libp2p/protocols/rendezvous, + libp2p/protocols/pubsub, + libp2p/protocols/pubsub/rpc/messages, + waku/[ + waku_relay, + waku_rln_relay, + api/types, + events/health_events, + events/peer_events, + node/waku_node, + node/peer_manager, + node/kernel_api, + node/health_monitor/online_monitor, + node/health_monitor/health_status, + node/health_monitor/health_report, + node/health_monitor/connection_status, + node/health_monitor/protocol_health, + ] ## This module is aimed to check the state of the "self" Waku Node # randomize initializes sdt/random's random number generator # if not called, the outcome of randomization procedures will be the same in every run -randomize() +random.randomize() -type - HealthReport* = object - nodeHealth*: HealthStatus - protocolsHealth*: seq[ProtocolHealth] +const HealthyThreshold* = 2 + ## minimum peers required for all services for a Connected status, excluding Relay - NodeHealthMonitor* = ref object - nodeHealth: HealthStatus - node: WakuNode - onlineMonitor*: OnlineMonitor - keepAliveFut: Future[void] +type NodeHealthMonitor* = ref object + nodeHealth: HealthStatus + node: WakuNode + onlineMonitor*: OnlineMonitor + keepAliveFut: Future[void] + healthLoopFut: Future[void] + healthUpdateEvent: AsyncEvent + connectionStatus: ConnectionStatus + onConnectionStatusChange*: ConnectionStatusChangeHandler + cachedProtocols: seq[ProtocolHealth] + ## state of each protocol to report. + ## calculated on last event that can change any protocol's state so fetching a report is fast. + strength: Table[WakuProtocol, int] + ## latest known connectivity strength (e.g. connected peer count) metric for each protocol. + ## if it doesn't make sense for the protocol in question, this is set to zero. + relayObserver: PubSubObserver + peerEventListener: EventWakuPeerListener -template checkWakuNodeNotNil(node: WakuNode, p: ProtocolHealth): untyped = - if node.isNil(): - warn "WakuNode is not set, cannot check health", protocol_health_instance = $p - return p.notMounted() +func getHealth*(report: HealthReport, kind: WakuProtocol): ProtocolHealth = + for h in report.protocolsHealth: + if h.protocol == $kind: + return h + # Shouldn't happen, but if it does, then assume protocol is not mounted + return ProtocolHealth.init(kind) + +proc countCapablePeers(hm: NodeHealthMonitor, codec: string): int = + if isNil(hm.node.peerManager): + return 0 + + return hm.node.peerManager.getCapablePeersCount(codec) proc getRelayHealth(hm: NodeHealthMonitor): ProtocolHealth = - var p = ProtocolHealth.init("Relay") - checkWakuNodeNotNil(hm.node, p) + var p = ProtocolHealth.init(WakuProtocol.RelayProtocol) - if hm.node.wakuRelay == nil: + if isNil(hm.node.wakuRelay): + hm.strength[WakuProtocol.RelayProtocol] = 0 return p.notMounted() let relayPeers = hm.node.wakuRelay.getConnectedPubSubPeers(pubsubTopic = "").valueOr: + hm.strength[WakuProtocol.RelayProtocol] = 0 return p.notMounted() - if relayPeers.len() == 0: + let count = relayPeers.len + hm.strength[WakuProtocol.RelayProtocol] = count + if count == 0: return p.notReady("No connected peers") return p.ready() proc getRlnRelayHealth(hm: NodeHealthMonitor): Future[ProtocolHealth] {.async.} = - var p = ProtocolHealth.init("Rln Relay") - if hm.node.isNil(): - warn "WakuNode is not set, cannot check health", protocol_health_instance = $p - return p.notMounted() - - if hm.node.wakuRlnRelay.isNil(): + var p = ProtocolHealth.init(WakuProtocol.RlnRelayProtocol) + if isNil(hm.node.wakuRlnRelay): return p.notMounted() const FutIsReadyTimout = 5.seconds @@ -82,131 +106,144 @@ proc getRlnRelayHealth(hm: NodeHealthMonitor): Future[ProtocolHealth] {.async.} proc getLightpushHealth( hm: NodeHealthMonitor, relayHealth: HealthStatus ): ProtocolHealth = - var p = ProtocolHealth.init("Lightpush") - checkWakuNodeNotNil(hm.node, p) + var p = ProtocolHealth.init(WakuProtocol.LightpushProtocol) - if hm.node.wakuLightPush == nil: + if isNil(hm.node.wakuLightPush): + hm.strength[WakuProtocol.LightpushProtocol] = 0 return p.notMounted() + let peerCount = countCapablePeers(hm, WakuLightPushCodec) + hm.strength[WakuProtocol.LightpushProtocol] = peerCount + if relayHealth == HealthStatus.READY: return p.ready() return p.notReady("Node has no relay peers to fullfill push requests") -proc getLightpushClientHealth( - hm: NodeHealthMonitor, relayHealth: HealthStatus -): ProtocolHealth = - var p = ProtocolHealth.init("Lightpush Client") - checkWakuNodeNotNil(hm.node, p) - - if hm.node.wakuLightpushClient == nil: - return p.notMounted() - - let selfServiceAvailable = - hm.node.wakuLightPush != nil and relayHealth == HealthStatus.READY - let servicePeerAvailable = hm.node.peerManager.selectPeer(WakuLightPushCodec).isSome() - - if selfServiceAvailable or servicePeerAvailable: - return p.ready() - - return p.notReady("No Lightpush service peer available yet") - proc getLegacyLightpushHealth( hm: NodeHealthMonitor, relayHealth: HealthStatus ): ProtocolHealth = - var p = ProtocolHealth.init("Legacy Lightpush") - checkWakuNodeNotNil(hm.node, p) + var p = ProtocolHealth.init(WakuProtocol.LegacyLightpushProtocol) - if hm.node.wakuLegacyLightPush == nil: + if isNil(hm.node.wakuLegacyLightPush): + hm.strength[WakuProtocol.LegacyLightpushProtocol] = 0 return p.notMounted() + let peerCount = countCapablePeers(hm, WakuLegacyLightPushCodec) + hm.strength[WakuProtocol.LegacyLightpushProtocol] = peerCount + if relayHealth == HealthStatus.READY: return p.ready() return p.notReady("Node has no relay peers to fullfill push requests") -proc getLegacyLightpushClientHealth( - hm: NodeHealthMonitor, relayHealth: HealthStatus -): ProtocolHealth = - var p = ProtocolHealth.init("Legacy Lightpush Client") - checkWakuNodeNotNil(hm.node, p) - - if hm.node.wakuLegacyLightpushClient == nil: - return p.notMounted() - - if (hm.node.wakuLegacyLightPush != nil and relayHealth == HealthStatus.READY) or - hm.node.peerManager.selectPeer(WakuLegacyLightPushCodec).isSome(): - return p.ready() - - return p.notReady("No Lightpush service peer available yet") - proc getFilterHealth(hm: NodeHealthMonitor, relayHealth: HealthStatus): ProtocolHealth = - var p = ProtocolHealth.init("Filter") - checkWakuNodeNotNil(hm.node, p) + var p = ProtocolHealth.init(WakuProtocol.FilterProtocol) - if hm.node.wakuFilter == nil: + if isNil(hm.node.wakuFilter): + hm.strength[WakuProtocol.FilterProtocol] = 0 return p.notMounted() + let peerCount = countCapablePeers(hm, WakuFilterSubscribeCodec) + hm.strength[WakuProtocol.FilterProtocol] = peerCount + if relayHealth == HealthStatus.READY: return p.ready() return p.notReady("Relay is not ready, filter will not be able to sort out messages") -proc getFilterClientHealth( - hm: NodeHealthMonitor, relayHealth: HealthStatus -): ProtocolHealth = - var p = ProtocolHealth.init("Filter Client") - checkWakuNodeNotNil(hm.node, p) - - if hm.node.wakuFilterClient == nil: - return p.notMounted() - - if hm.node.peerManager.selectPeer(WakuFilterSubscribeCodec).isSome(): - return p.ready() - - return p.notReady("No Filter service peer available yet") - proc getStoreHealth(hm: NodeHealthMonitor): ProtocolHealth = - var p = ProtocolHealth.init("Store") - checkWakuNodeNotNil(hm.node, p) + var p = ProtocolHealth.init(WakuProtocol.StoreProtocol) - if hm.node.wakuStore == nil: + if isNil(hm.node.wakuStore): + hm.strength[WakuProtocol.StoreProtocol] = 0 return p.notMounted() + let peerCount = countCapablePeers(hm, WakuStoreCodec) + hm.strength[WakuProtocol.StoreProtocol] = peerCount return p.ready() -proc getStoreClientHealth(hm: NodeHealthMonitor): ProtocolHealth = - var p = ProtocolHealth.init("Store Client") - checkWakuNodeNotNil(hm.node, p) +proc getLegacyStoreHealth(hm: NodeHealthMonitor): ProtocolHealth = + var p = ProtocolHealth.init(WakuProtocol.LegacyStoreProtocol) - if hm.node.wakuStoreClient == nil: + if isNil(hm.node.wakuLegacyStore): + hm.strength[WakuProtocol.LegacyStoreProtocol] = 0 return p.notMounted() - if hm.node.peerManager.selectPeer(WakuStoreCodec).isSome() or hm.node.wakuStore != nil: + let peerCount = hm.countCapablePeers(WakuLegacyStoreCodec) + hm.strength[WakuProtocol.LegacyStoreProtocol] = peerCount + return p.ready() + +proc getLightpushClientHealth(hm: NodeHealthMonitor): ProtocolHealth = + var p = ProtocolHealth.init(WakuProtocol.LightpushClientProtocol) + + if isNil(hm.node.wakuLightpushClient): + hm.strength[WakuProtocol.LightpushClientProtocol] = 0 + return p.notMounted() + + let peerCount = countCapablePeers(hm, WakuLightPushCodec) + hm.strength[WakuProtocol.LightpushClientProtocol] = peerCount + + if peerCount > 0: + return p.ready() + return p.notReady("No Lightpush service peer available yet") + +proc getLegacyLightpushClientHealth(hm: NodeHealthMonitor): ProtocolHealth = + var p = ProtocolHealth.init(WakuProtocol.LegacyLightpushClientProtocol) + + if isNil(hm.node.wakuLegacyLightpushClient): + hm.strength[WakuProtocol.LegacyLightpushClientProtocol] = 0 + return p.notMounted() + + let peerCount = countCapablePeers(hm, WakuLegacyLightPushCodec) + hm.strength[WakuProtocol.LegacyLightpushClientProtocol] = peerCount + + if peerCount > 0: + return p.ready() + return p.notReady("No Lightpush service peer available yet") + +proc getFilterClientHealth(hm: NodeHealthMonitor): ProtocolHealth = + var p = ProtocolHealth.init(WakuProtocol.FilterClientProtocol) + + if isNil(hm.node.wakuFilterClient): + hm.strength[WakuProtocol.FilterClientProtocol] = 0 + return p.notMounted() + + let peerCount = countCapablePeers(hm, WakuFilterSubscribeCodec) + hm.strength[WakuProtocol.FilterClientProtocol] = peerCount + + if peerCount > 0: + return p.ready() + return p.notReady("No Filter service peer available yet") + +proc getStoreClientHealth(hm: NodeHealthMonitor): ProtocolHealth = + var p = ProtocolHealth.init(WakuProtocol.StoreClientProtocol) + + if isNil(hm.node.wakuStoreClient): + hm.strength[WakuProtocol.StoreClientProtocol] = 0 + return p.notMounted() + + let peerCount = countCapablePeers(hm, WakuStoreCodec) + hm.strength[WakuProtocol.StoreClientProtocol] = peerCount + + if peerCount > 0 or not isNil(hm.node.wakuStore): return p.ready() return p.notReady( "No Store service peer available yet, neither Store service set up for the node" ) -proc getLegacyStoreHealth(hm: NodeHealthMonitor): ProtocolHealth = - var p = ProtocolHealth.init("Legacy Store") - checkWakuNodeNotNil(hm.node, p) - - if hm.node.wakuLegacyStore == nil: - return p.notMounted() - - return p.ready() - proc getLegacyStoreClientHealth(hm: NodeHealthMonitor): ProtocolHealth = - var p = ProtocolHealth.init("Legacy Store Client") - checkWakuNodeNotNil(hm.node, p) + var p = ProtocolHealth.init(WakuProtocol.LegacyStoreClientProtocol) - if hm.node.wakuLegacyStoreClient == nil: + if isNil(hm.node.wakuLegacyStoreClient): + hm.strength[WakuProtocol.LegacyStoreClientProtocol] = 0 return p.notMounted() - if hm.node.peerManager.selectPeer(WakuLegacyStoreCodec).isSome() or - hm.node.wakuLegacyStore != nil: + let peerCount = countCapablePeers(hm, WakuLegacyStoreCodec) + hm.strength[WakuProtocol.LegacyStoreClientProtocol] = peerCount + + if peerCount > 0 or not isNil(hm.node.wakuLegacyStore): return p.ready() return p.notReady( @@ -214,41 +251,303 @@ proc getLegacyStoreClientHealth(hm: NodeHealthMonitor): ProtocolHealth = ) proc getPeerExchangeHealth(hm: NodeHealthMonitor): ProtocolHealth = - var p = ProtocolHealth.init("Peer Exchange") - checkWakuNodeNotNil(hm.node, p) + var p = ProtocolHealth.init(WakuProtocol.PeerExchangeProtocol) - if hm.node.wakuPeerExchange == nil: + if isNil(hm.node.wakuPeerExchange): + hm.strength[WakuProtocol.PeerExchangeProtocol] = 0 return p.notMounted() + let peerCount = countCapablePeers(hm, WakuPeerExchangeCodec) + hm.strength[WakuProtocol.PeerExchangeProtocol] = peerCount + return p.ready() proc getRendezvousHealth(hm: NodeHealthMonitor): ProtocolHealth = - var p = ProtocolHealth.init("Rendezvous") - checkWakuNodeNotNil(hm.node, p) + var p = ProtocolHealth.init(WakuProtocol.RendezvousProtocol) - if hm.node.wakuRendezvous == nil: + if isNil(hm.node.wakuRendezvous): + hm.strength[WakuProtocol.RendezvousProtocol] = 0 return p.notMounted() - if hm.node.peerManager.switch.peerStore.peers(RendezVousCodec).len() == 0: + let peerCount = countCapablePeers(hm, RendezVousCodec) + hm.strength[WakuProtocol.RendezvousProtocol] = peerCount + if peerCount == 0: return p.notReady("No Rendezvous peers are available yet") return p.ready() proc getMixHealth(hm: NodeHealthMonitor): ProtocolHealth = - var p = ProtocolHealth.init("Mix") - checkWakuNodeNotNil(hm.node, p) + var p = ProtocolHealth.init(WakuProtocol.MixProtocol) - if hm.node.wakuMix.isNil(): + if isNil(hm.node.wakuMix): return p.notMounted() return p.ready() +proc getSyncProtocolHealthInfo*( + hm: NodeHealthMonitor, protocol: WakuProtocol +): ProtocolHealth = + ## Get ProtocolHealth for a given protocol that can provide it synchronously + ## + case protocol + of WakuProtocol.RelayProtocol: + return hm.getRelayHealth() + of WakuProtocol.StoreProtocol: + return hm.getStoreHealth() + of WakuProtocol.LegacyStoreProtocol: + return hm.getLegacyStoreHealth() + of WakuProtocol.FilterProtocol: + return hm.getFilterHealth(hm.getRelayHealth().health) + of WakuProtocol.LightpushProtocol: + return hm.getLightpushHealth(hm.getRelayHealth().health) + of WakuProtocol.LegacyLightpushProtocol: + return hm.getLegacyLightpushHealth(hm.getRelayHealth().health) + of WakuProtocol.PeerExchangeProtocol: + return hm.getPeerExchangeHealth() + of WakuProtocol.RendezvousProtocol: + return hm.getRendezvousHealth() + of WakuProtocol.MixProtocol: + return hm.getMixHealth() + of WakuProtocol.StoreClientProtocol: + return hm.getStoreClientHealth() + of WakuProtocol.LegacyStoreClientProtocol: + return hm.getLegacyStoreClientHealth() + of WakuProtocol.FilterClientProtocol: + return hm.getFilterClientHealth() + of WakuProtocol.LightpushClientProtocol: + return hm.getLightpushClientHealth() + of WakuProtocol.LegacyLightpushClientProtocol: + return hm.getLegacyLightpushClientHealth() + of WakuProtocol.RlnRelayProtocol: + # Could waitFor here but we don't want to block the main thread. + # Could also return a cached value from a previous check. + var p = ProtocolHealth.init(protocol) + return p.notReady("RLN Relay health check is async") + else: + var p = ProtocolHealth.init(protocol) + return p.notMounted() + +proc getProtocolHealthInfo*( + hm: NodeHealthMonitor, protocol: WakuProtocol +): Future[ProtocolHealth] {.async.} = + ## Get ProtocolHealth for a given protocol + ## + case protocol + of WakuProtocol.RlnRelayProtocol: + return await hm.getRlnRelayHealth() + else: + return hm.getSyncProtocolHealthInfo(protocol) + +proc getSyncAllProtocolHealthInfo(hm: NodeHealthMonitor): seq[ProtocolHealth] = + ## Get ProtocolHealth for the subset of protocols that can provide it synchronously + ## + var protocols: seq[ProtocolHealth] = @[] + let relayHealth = hm.getRelayHealth() + protocols.add(relayHealth) + + protocols.add(hm.getLightpushHealth(relayHealth.health)) + protocols.add(hm.getLegacyLightpushHealth(relayHealth.health)) + protocols.add(hm.getFilterHealth(relayHealth.health)) + protocols.add(hm.getStoreHealth()) + protocols.add(hm.getLegacyStoreHealth()) + protocols.add(hm.getPeerExchangeHealth()) + protocols.add(hm.getRendezvousHealth()) + protocols.add(hm.getMixHealth()) + + protocols.add(hm.getLightpushClientHealth()) + protocols.add(hm.getLegacyLightpushClientHealth()) + protocols.add(hm.getStoreClientHealth()) + protocols.add(hm.getLegacyStoreClientHealth()) + protocols.add(hm.getFilterClientHealth()) + return protocols + +proc getAllProtocolHealthInfo( + hm: NodeHealthMonitor +): Future[seq[ProtocolHealth]] {.async.} = + ## Get ProtocolHealth for all protocols + ## + var protocols = hm.getSyncAllProtocolHealthInfo() + + let rlnHealth = await hm.getRlnRelayHealth() + protocols.add(rlnHealth) + + return protocols + +proc calculateConnectionState*( + protocols: seq[ProtocolHealth], + strength: Table[WakuProtocol, int], ## latest connectivity strength (e.g. peer count) for a protocol + dLowOpt: Option[int], ## minimum relay peers for Connected status if in Core (Relay) mode +): ConnectionStatus = + var + relayCount = 0 + lightpushCount = 0 + filterCount = 0 + storeClientCount = 0 + + for p in protocols: + let kind = + try: + parseEnum[WakuProtocol](p.protocol) + except ValueError: + continue + + if p.health != HealthStatus.READY: + continue + + let strength = strength.getOrDefault(kind, 0) + + if kind in RelayProtocols: + relayCount = max(relayCount, strength) + elif kind in StoreClientProtocols: + storeClientCount = max(storeClientCount, strength) + elif kind in LightpushClientProtocols: + lightpushCount = max(lightpushCount, strength) + elif kind in FilterClientProtocols: + filterCount = max(filterCount, strength) + + debug "calculateConnectionState", + relayCount, storeClientCount, lightpushCount, filterCount + + # Relay connectivity should be a sufficient check in Core mode. + # "Store peers" are relay peers because incoming messages in + # the relay are input to the store server. + # But if Store server (or client, even) is not mounted as well, this logic assumes + # the user knows what they're doing. + + if dLowOpt.isSome(): + if relayCount >= dLowOpt.get(): + return ConnectionStatus.Connected + + if relayCount > 0: + return ConnectionStatus.PartiallyConnected + + # No relay connectivity. Relay might not be mounted, or may just have zero peers. + # Fall back to Edge check in any case to be sure. + + let canSend = lightpushCount > 0 + let canReceive = filterCount > 0 + let canStore = storeClientCount > 0 + + let meetsMinimum = canSend and canReceive and canStore + + if not meetsMinimum: + return ConnectionStatus.Disconnected + + let isEdgeRobust = + (lightpushCount >= HealthyThreshold) and (filterCount >= HealthyThreshold) and + (storeClientCount >= HealthyThreshold) + + if isEdgeRobust: + return ConnectionStatus.Connected + + return ConnectionStatus.PartiallyConnected + +proc calculateConnectionState*(hm: NodeHealthMonitor): ConnectionStatus = + let dLow = + if isNil(hm.node.wakuRelay): + none(int) + else: + some(hm.node.wakuRelay.parameters.dLow) + return calculateConnectionState(hm.cachedProtocols, hm.strength, dLow) + +proc getNodeHealthReport*(hm: NodeHealthMonitor): Future[HealthReport] {.async.} = + ## Get a HealthReport that includes all protocols + ## + var report: HealthReport + + if hm.nodeHealth == HealthStatus.INITIALIZING or + hm.nodeHealth == HealthStatus.SHUTTING_DOWN: + report.nodeHealth = hm.nodeHealth + report.connectionStatus = ConnectionStatus.Disconnected + return report + + if hm.cachedProtocols.len == 0: + hm.cachedProtocols = await hm.getAllProtocolHealthInfo() + hm.connectionStatus = hm.calculateConnectionState() + + report.nodeHealth = HealthStatus.READY + report.connectionStatus = hm.connectionStatus + report.protocolsHealth = hm.cachedProtocols + return report + +proc getSyncNodeHealthReport*(hm: NodeHealthMonitor): HealthReport = + ## Get a HealthReport that includes the subset of protocols that inform health synchronously + ## + var report: HealthReport + + if hm.nodeHealth == HealthStatus.INITIALIZING or + hm.nodeHealth == HealthStatus.SHUTTING_DOWN: + report.nodeHealth = hm.nodeHealth + report.connectionStatus = ConnectionStatus.Disconnected + return report + + if hm.cachedProtocols.len == 0: + hm.cachedProtocols = hm.getSyncAllProtocolHealthInfo() + hm.connectionStatus = hm.calculateConnectionState() + + report.nodeHealth = HealthStatus.READY + report.connectionStatus = hm.connectionStatus + report.protocolsHealth = hm.cachedProtocols + return report + +proc onRelayMsg( + hm: NodeHealthMonitor, peer: PubSubPeer, msg: var RPCMsg +) {.gcsafe, raises: [].} = + ## Inspect Relay events for health-update relevance in Core (Relay) mode. + ## + ## For Core (Relay) mode, the connectivity health state is mostly determined + ## by the relay protocol state (it is the dominant factor), and we know + ## that a peer Relay can only affect this Relay's health if there is a + ## subscription change or a mesh (GRAFT/PRUNE) change. + ## + + if msg.subscriptions.len == 0: + if msg.control.isNone(): + return + let ctrl = msg.control.get() + if ctrl.graft.len == 0 and ctrl.prune.len == 0: + return + + hm.healthUpdateEvent.fire() + +proc healthLoop(hm: NodeHealthMonitor) {.async.} = + ## Re-evaluate the global health state of the node when notified of a potential change, + ## and call back the application if an actual change from the last notified state happened. + info "Health monitor loop start" + while true: + try: + await hm.healthUpdateEvent.wait() + hm.healthUpdateEvent.clear() + + hm.cachedProtocols = await hm.getAllProtocolHealthInfo() + let newConnectionStatus = hm.calculateConnectionState() + + if newConnectionStatus != hm.connectionStatus: + debug "connectionStatus change", + oldstatus = hm.connectionStatus, newstatus = newConnectionStatus + + hm.connectionStatus = newConnectionStatus + + EventConnectionStatusChange.emit(hm.node.brokerCtx, newConnectionStatus) + + if not isNil(hm.onConnectionStatusChange): + await hm.onConnectionStatusChange(newConnectionStatus) + except CancelledError: + break + except Exception as e: + error "HealthMonitor: error in update loop", error = e.msg + + # safety cooldown to protect from edge cases + await sleepAsync(100.milliseconds) + + info "Health monitor loop end" + proc selectRandomPeersForKeepalive( node: WakuNode, outPeers: seq[PeerId], numRandomPeers: int ): Future[seq[PeerId]] {.async.} = ## Select peers for random keepalive, prioritizing mesh peers - if node.wakuRelay.isNil(): + if isNil(node.wakuRelay): return selectRandomPeers(outPeers, numRandomPeers) let meshPeers = node.wakuRelay.getPeersInMesh().valueOr: @@ -382,55 +681,67 @@ proc startKeepalive*( hm.keepAliveFut = hm.node.keepAliveLoop(randomPeersKeepalive, allPeersKeepalive) return ok() -proc getNodeHealthReport*(hm: NodeHealthMonitor): Future[HealthReport] {.async.} = - var report: HealthReport - report.nodeHealth = hm.nodeHealth - - if not hm.node.isNil(): - let relayHealth = hm.getRelayHealth() - report.protocolsHealth.add(relayHealth) - report.protocolsHealth.add(await hm.getRlnRelayHealth()) - report.protocolsHealth.add(hm.getLightpushHealth(relayHealth.health)) - report.protocolsHealth.add(hm.getLegacyLightpushHealth(relayHealth.health)) - report.protocolsHealth.add(hm.getFilterHealth(relayHealth.health)) - report.protocolsHealth.add(hm.getStoreHealth()) - report.protocolsHealth.add(hm.getLegacyStoreHealth()) - report.protocolsHealth.add(hm.getPeerExchangeHealth()) - report.protocolsHealth.add(hm.getRendezvousHealth()) - report.protocolsHealth.add(hm.getMixHealth()) - - report.protocolsHealth.add(hm.getLightpushClientHealth(relayHealth.health)) - report.protocolsHealth.add(hm.getLegacyLightpushClientHealth(relayHealth.health)) - report.protocolsHealth.add(hm.getStoreClientHealth()) - report.protocolsHealth.add(hm.getLegacyStoreClientHealth()) - report.protocolsHealth.add(hm.getFilterClientHealth(relayHealth.health)) - return report - -proc setNodeToHealthMonitor*(hm: NodeHealthMonitor, node: WakuNode) = - hm.node = node - proc setOverallHealth*(hm: NodeHealthMonitor, health: HealthStatus) = hm.nodeHealth = health proc startHealthMonitor*(hm: NodeHealthMonitor): Result[void, string] = hm.onlineMonitor.startOnlineMonitor() + + if isNil(hm.node.peerManager): + return err("startHealthMonitor: no node peerManager to monitor") + + if not isNil(hm.node.wakuRelay): + hm.relayObserver = PubSubObserver( + onRecv: proc(peer: PubSubPeer, msgs: var RPCMsg) {.gcsafe, raises: [].} = + hm.onRelayMsg(peer, msgs) + ) + hm.node.wakuRelay.addObserver(hm.relayObserver) + + hm.peerEventListener = EventWakuPeer.listen( + hm.node.brokerCtx, + proc(evt: EventWakuPeer): Future[void] {.async: (raises: []), gcsafe.} = + ## Recompute health on any peer changing anything (join, leave, identify, metadata update) + hm.healthUpdateEvent.fire(), + ).valueOr: + return err("Failed to subscribe to peer events: " & error) + + hm.healthUpdateEvent = newAsyncEvent() + hm.healthUpdateEvent.fire() + + hm.healthLoopFut = hm.healthLoop() + hm.startKeepalive().isOkOr: return err("startHealthMonitor: failed starting keep alive: " & error) return ok() proc stopHealthMonitor*(hm: NodeHealthMonitor) {.async.} = - if not hm.onlineMonitor.isNil(): + if not isNil(hm.onlineMonitor): await hm.onlineMonitor.stopOnlineMonitor() - if not hm.keepAliveFut.isNil(): + if not isNil(hm.keepAliveFut): await hm.keepAliveFut.cancelAndWait() + if not isNil(hm.healthLoopFut): + await hm.healthLoopFut.cancelAndWait() + + if hm.peerEventListener.id != 0: + EventWakuPeer.dropListener(hm.node.brokerCtx, hm.peerEventListener) + + if not isNil(hm.node.wakuRelay) and not isNil(hm.relayObserver): + hm.node.wakuRelay.removeObserver(hm.relayObserver) + proc new*( T: type NodeHealthMonitor, + node: WakuNode, dnsNameServers = @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")], ): T = + let om = OnlineMonitor.init(dnsNameServers) + om.setPeerStoreToOnlineMonitor(node.switch.peerStore) + om.addOnlineStateObserver(node.peerManager.getOnlineStateObserver()) T( nodeHealth: INITIALIZING, - node: nil, - onlineMonitor: OnlineMonitor.init(dnsNameServers), + node: node, + onlineMonitor: om, + connectionStatus: ConnectionStatus.Disconnected, + strength: initTable[WakuProtocol, int](), ) diff --git a/waku/node/health_monitor/protocol_health.nim b/waku/node/health_monitor/protocol_health.nim index 7bacea94b..4479888c8 100644 --- a/waku/node/health_monitor/protocol_health.nim +++ b/waku/node/health_monitor/protocol_health.nim @@ -1,5 +1,8 @@ import std/[options, strformat] import ./health_status +import waku/common/waku_protocol + +export waku_protocol type ProtocolHealth* = object protocol*: string @@ -39,8 +42,7 @@ proc shuttingDown*(p: var ProtocolHealth): ProtocolHealth = proc `$`*(p: ProtocolHealth): string = return fmt"protocol: {p.protocol}, health: {p.health}, description: {p.desc}" -proc init*(p: typedesc[ProtocolHealth], protocol: string): ProtocolHealth = - let p = ProtocolHealth( - protocol: protocol, health: HealthStatus.NOT_MOUNTED, desc: none[string]() +proc init*(p: typedesc[ProtocolHealth], protocol: WakuProtocol): ProtocolHealth = + return ProtocolHealth( + protocol: $protocol, health: HealthStatus.NOT_MOUNTED, desc: none[string]() ) - return p diff --git a/waku/waku_relay/topic_health.nim b/waku/node/health_monitor/topic_health.nim similarity index 84% rename from waku/waku_relay/topic_health.nim rename to waku/node/health_monitor/topic_health.nim index 774abc584..5a1ea0a16 100644 --- a/waku/waku_relay/topic_health.nim +++ b/waku/node/health_monitor/topic_health.nim @@ -1,11 +1,12 @@ import chronos -import ../waku_core +import waku/waku_core type TopicHealth* = enum UNHEALTHY MINIMALLY_HEALTHY SUFFICIENTLY_HEALTHY + NOT_SUBSCRIBED proc `$`*(t: TopicHealth): string = result = @@ -13,6 +14,7 @@ proc `$`*(t: TopicHealth): string = of UNHEALTHY: "UnHealthy" of MINIMALLY_HEALTHY: "MinimallyHealthy" of SUFFICIENTLY_HEALTHY: "SufficientlyHealthy" + of NOT_SUBSCRIBED: "NotSubscribed" type TopicHealthChangeHandler* = proc( pubsubTopic: PubsubTopic, topicHealth: TopicHealth diff --git a/waku/node/kernel_api/lightpush.nim b/waku/node/kernel_api/lightpush.nim index f42cb146e..ffe2afdac 100644 --- a/waku/node/kernel_api/lightpush.nim +++ b/waku/node/kernel_api/lightpush.nim @@ -34,26 +34,27 @@ import logScope: topics = "waku node lightpush api" +const MountWithoutRelayError* = "cannot mount lightpush because relay is not mounted" + ## Waku lightpush proc mountLegacyLightPush*( node: WakuNode, rateLimit: RateLimitSetting = DefaultGlobalNonRelayRateLimit -) {.async.} = +): Future[Result[void, string]] {.async.} = info "mounting legacy light push" - let pushHandler = - if node.wakuRelay.isNil: - info "mounting legacy lightpush without relay (nil)" - legacy_lightpush_protocol.getNilPushHandler() + if node.wakuRelay.isNil(): + return err(MountWithoutRelayError) + + info "mounting legacy lightpush with relay" + let rlnPeer = + if node.wakuRlnRelay.isNil(): + info "mounting legacy lightpush without rln-relay" + none(WakuRLNRelay) else: - info "mounting legacy lightpush with relay" - let rlnPeer = - if isNil(node.wakuRlnRelay): - info "mounting legacy lightpush without rln-relay" - none(WakuRLNRelay) - else: - info "mounting legacy lightpush with rln-relay" - some(node.wakuRlnRelay) - legacy_lightpush_protocol.getRelayPushHandler(node.wakuRelay, rlnPeer) + info "mounting legacy lightpush with rln-relay" + some(node.wakuRlnRelay) + let pushHandler = + legacy_lightpush_protocol.getRelayPushHandler(node.wakuRelay, rlnPeer) node.wakuLegacyLightPush = WakuLegacyLightPush.new(node.peerManager, node.rng, pushHandler, some(rateLimit)) @@ -64,6 +65,9 @@ proc mountLegacyLightPush*( node.switch.mount(node.wakuLegacyLightPush, protocolMatcher(WakuLegacyLightPushCodec)) + info "legacy lightpush mounted successfully" + return ok() + proc mountLegacyLightPushClient*(node: WakuNode) = info "mounting legacy light push client" @@ -146,23 +150,21 @@ proc legacyLightpushPublish*( proc mountLightPush*( node: WakuNode, rateLimit: RateLimitSetting = DefaultGlobalNonRelayRateLimit -) {.async.} = +): Future[Result[void, string]] {.async.} = info "mounting light push" - let pushHandler = - if node.wakuRelay.isNil(): - info "mounting lightpush v2 without relay (nil)" - lightpush_protocol.getNilPushHandler() + if node.wakuRelay.isNil(): + return err(MountWithoutRelayError) + + info "mounting lightpush with relay" + let rlnPeer = + if node.wakuRlnRelay.isNil(): + info "mounting lightpush without rln-relay" + none(WakuRLNRelay) else: - info "mounting lightpush with relay" - let rlnPeer = - if isNil(node.wakuRlnRelay): - info "mounting lightpush without rln-relay" - none(WakuRLNRelay) - else: - info "mounting lightpush with rln-relay" - some(node.wakuRlnRelay) - lightpush_protocol.getRelayPushHandler(node.wakuRelay, rlnPeer) + info "mounting lightpush with rln-relay" + some(node.wakuRlnRelay) + let pushHandler = lightpush_protocol.getRelayPushHandler(node.wakuRelay, rlnPeer) node.wakuLightPush = WakuLightPush.new( node.peerManager, node.rng, pushHandler, node.wakuAutoSharding, some(rateLimit) @@ -174,6 +176,9 @@ proc mountLightPush*( node.switch.mount(node.wakuLightPush, protocolMatcher(WakuLightPushCodec)) + info "lightpush mounted successfully" + return ok() + proc mountLightPushClient*(node: WakuNode) = info "mounting light push client" @@ -188,7 +193,6 @@ proc lightpushPublishHandler( mixify: bool = false, ): Future[lightpush_protocol.WakuLightPushResult] {.async.} = let msgHash = pubsubTopic.computeMessageHash(message).to0xHex() - if not node.wakuLightpushClient.isNil(): notice "publishing message with lightpush", pubsubTopic = pubsubTopic, @@ -196,23 +200,23 @@ proc lightpushPublishHandler( target_peer_id = peer.peerId, msg_hash = msgHash, mixify = mixify - if mixify: #indicates we want to use mix to send the message - #TODO: How to handle multiple addresses? - let conn = node.wakuMix.toConnection( - MixDestination.init(peer.peerId, peer.addrs[0]), - WakuLightPushCodec, - MixParameters(expectReply: Opt.some(true), numSurbs: Opt.some(byte(1))), - # indicating we only want a single path to be used for reply hence numSurbs = 1 - ).valueOr: - error "could not create mix connection" - return lighpushErrorResult( - LightPushErrorCode.SERVICE_NOT_AVAILABLE, - "Waku lightpush with mix not available", - ) + if defined(libp2p_mix_experimental_exit_is_dest) and mixify: + #indicates we want to use mix to send the message + when defined(libp2p_mix_experimental_exit_is_dest): + #TODO: How to handle multiple addresses? + let conn = node.wakuMix.toConnection( + MixDestination.exitNode(peer.peerId), + WakuLightPushCodec, + MixParameters(expectReply: Opt.some(true), numSurbs: Opt.some(byte(1))), + # indicating we only want a single path to be used for reply hence numSurbs = 1 + ).valueOr: + error "could not create mix connection" + return lighpushErrorResult( + LightPushErrorCode.SERVICE_NOT_AVAILABLE, + "Waku lightpush with mix not available", + ) - return await node.wakuLightpushClient.publishWithConn( - pubsubTopic, message, conn, peer.peerId - ) + return await node.wakuLightpushClient.publish(some(pubsubTopic), message, conn) else: return await node.wakuLightpushClient.publish(some(pubsubTopic), message, peer) @@ -261,7 +265,7 @@ proc lightpushPublish*( LightPushErrorCode.NO_PEERS_TO_RELAY, "no suitable remote peers" ) - let pubsubForPublish = pubSubTopic.valueOr: + let pubsubForPublish = pubsubTopic.valueOr: if node.wakuAutoSharding.isNone(): let msg = "Pubsub topic must be specified when static sharding is enabled" error "lightpush publish error", error = msg diff --git a/waku/node/kernel_api/relay.nim b/waku/node/kernel_api/relay.nim index 827cc1e5f..ec4d05ddd 100644 --- a/waku/node/kernel_api/relay.nim +++ b/waku/node/kernel_api/relay.nim @@ -19,16 +19,22 @@ import libp2p/utility import - ../waku_node, - ../../waku_relay, - ../../waku_core, - ../../waku_core/topics/sharding, - ../../waku_filter_v2, - ../../waku_archive_legacy, - ../../waku_archive, - ../../waku_store_sync, - ../peer_manager, - ../../waku_rln_relay + waku/[ + waku_relay, + waku_core, + waku_core/topics/sharding, + waku_filter_v2, + waku_archive_legacy, + waku_archive, + waku_store_sync, + waku_rln_relay, + node/waku_node, + node/peer_manager, + common/broker/broker_context, + events/message_events, + ] + +export waku_relay.WakuRelayHandler declarePublicHistogram waku_histogram_message_size, "message size histogram in kB", @@ -42,14 +48,25 @@ logScope: ## Waku relay proc registerRelayHandler( - node: WakuNode, topic: PubsubTopic, appHandler: WakuRelayHandler -) = + node: WakuNode, topic: PubsubTopic, appHandler: WakuRelayHandler = nil +): bool = ## Registers the only handler for the given topic. ## Notice that this handler internally calls other handlers, such as filter, ## archive, etc, plus the handler provided by the application. + ## Returns `true` if a mesh subscription was created or `false` if the relay + ## was already subscribed to the topic. - if node.wakuRelay.isSubscribed(topic): - return + let alreadySubscribed = node.wakuRelay.isSubscribed(topic) + + if not appHandler.isNil(): + if not alreadySubscribed or not node.legacyAppHandlers.hasKey(topic): + node.legacyAppHandlers[topic] = appHandler + else: + debug "Legacy appHandler already exists for active PubsubTopic, ignoring new handler", + topic = topic + + if alreadySubscribed: + return false proc traceHandler(topic: PubsubTopic, msg: WakuMessage) {.async, gcsafe.} = let msgSizeKB = msg.payload.len / 1000 @@ -80,6 +97,9 @@ proc registerRelayHandler( node.wakuStoreReconciliation.messageIngress(topic, msg) + proc internalHandler(topic: PubsubTopic, msg: WakuMessage) {.async, gcsafe.} = + MessageSeenEvent.emit(node.brokerCtx, topic, msg) + let uniqueTopicHandler = proc( topic: PubsubTopic, msg: WakuMessage ): Future[void] {.async, gcsafe.} = @@ -87,43 +107,61 @@ proc registerRelayHandler( await filterHandler(topic, msg) await archiveHandler(topic, msg) await syncHandler(topic, msg) - await appHandler(topic, msg) + await internalHandler(topic, msg) + + # Call the legacy (kernel API) app handler if it exists. + # Normally, hasKey is false and the MessageSeenEvent bus (new API) is used instead. + # But we need to support legacy behavior (kernel API use), hence this. + # NOTE: We can delete `legacyAppHandlers` if instead we refactor WakuRelay to support multiple + # PubsubTopic handlers, since that's actually supported by libp2p PubSub (bigger refactor...) + if node.legacyAppHandlers.hasKey(topic) and not node.legacyAppHandlers[topic].isNil(): + await node.legacyAppHandlers[topic](topic, msg) node.wakuRelay.subscribe(topic, uniqueTopicHandler) +proc getTopicOfSubscriptionEvent( + node: WakuNode, subscription: SubscriptionEvent +): Result[(PubsubTopic, Option[ContentTopic]), string] = + case subscription.kind + of ContentSub, ContentUnsub: + if node.wakuAutoSharding.isSome(): + let shard = node.wakuAutoSharding.get().getShard((subscription.topic)).valueOr: + return err("Autosharding error: " & error) + return ok(($shard, some(subscription.topic))) + else: + return + err("Static sharding is used, relay subscriptions must specify a pubsub topic") + of PubsubSub, PubsubUnsub: + return ok((subscription.topic, none[ContentTopic]())) + else: + return err("Unsupported subscription type in relay getTopicOfSubscriptionEvent") + proc subscribe*( node: WakuNode, subscription: SubscriptionEvent, handler: WakuRelayHandler ): Result[void, string] = ## Subscribes to a PubSub or Content topic. Triggers handler when receiving messages on ## this topic. WakuRelayHandler is a method that takes a topic and a Waku message. + ## If `handler` is nil, the API call will subscribe to the topic in the relay mesh + ## but no app handler will be registered at this time (it can be registered later with + ## another call to this proc for the same gossipsub topic). - if node.wakuRelay.isNil(): + if isNil(node.wakuRelay): error "Invalid API call to `subscribe`. WakuRelay not mounted." return err("Invalid API call to `subscribe`. WakuRelay not mounted.") - let (pubsubTopic, contentTopicOp) = - case subscription.kind - of ContentSub: - if node.wakuAutoSharding.isSome(): - let shard = node.wakuAutoSharding.get().getShard((subscription.topic)).valueOr: - error "Autosharding error", error = error - return err("Autosharding error: " & error) - ($shard, some(subscription.topic)) - else: - return err( - "Static sharding is used, relay subscriptions must specify a pubsub topic" - ) - of PubsubSub: - (subscription.topic, none(ContentTopic)) + let (pubsubTopic, contentTopicOp) = getTopicOfSubscriptionEvent(node, subscription).valueOr: + error "Failed to decode subscription event", error = error + return err("Failed to decode subscription event: " & error) + + if node.registerRelayHandler(pubsubTopic, handler): + info "subscribe", pubsubTopic, contentTopicOp + node.topicSubscriptionQueue.emit((kind: PubsubSub, topic: pubsubTopic)) + else: + if isNil(handler): + warn "No-effect API call to subscribe. Already subscribed to topic", pubsubTopic else: - return err("Unsupported subscription type in relay subscribe") - - if node.wakuRelay.isSubscribed(pubsubTopic): - warn "No-effect API call to subscribe. Already subscribed to topic", pubsubTopic - return ok() - - node.registerRelayHandler(pubsubTopic, handler) - node.topicSubscriptionQueue.emit((kind: PubsubSub, topic: pubsubTopic)) + info "subscribe (was already subscribed in the mesh; appHandler set)", + pubsubTopic = pubsubTopic return ok() @@ -131,41 +169,50 @@ proc unsubscribe*( node: WakuNode, subscription: SubscriptionEvent ): Result[void, string] = ## Unsubscribes from a specific PubSub or Content topic. + ## This will both unsubscribe from the relay mesh and remove the app handler, if any. + ## NOTE: This works because using MAPI and Kernel API at the same time is unsupported. - if node.wakuRelay.isNil(): + if isNil(node.wakuRelay): error "Invalid API call to `unsubscribe`. WakuRelay not mounted." return err("Invalid API call to `unsubscribe`. WakuRelay not mounted.") - let (pubsubTopic, contentTopicOp) = - case subscription.kind - of ContentUnsub: - if node.wakuAutoSharding.isSome(): - let shard = node.wakuAutoSharding.get().getShard((subscription.topic)).valueOr: - error "Autosharding error", error = error - return err("Autosharding error: " & error) - ($shard, some(subscription.topic)) - else: - return err( - "Static sharding is used, relay subscriptions must specify a pubsub topic" - ) - of PubsubUnsub: - (subscription.topic, none(ContentTopic)) + let (pubsubTopic, contentTopicOp) = getTopicOfSubscriptionEvent(node, subscription).valueOr: + error "Failed to decode unsubscribe event", error = error + return err("Failed to decode unsubscribe event: " & error) + + let hadHandler = node.legacyAppHandlers.hasKey(pubsubTopic) + if hadHandler: + node.legacyAppHandlers.del(pubsubTopic) + + if node.wakuRelay.isSubscribed(pubsubTopic): + info "unsubscribe", pubsubTopic, contentTopicOp + node.wakuRelay.unsubscribe(pubsubTopic) + node.topicSubscriptionQueue.emit((kind: PubsubUnsub, topic: pubsubTopic)) + else: + if not hadHandler: + warn "No-effect API call to `unsubscribe`. Was not subscribed", pubsubTopic else: - return err("Unsupported subscription type in relay unsubscribe") - - if not node.wakuRelay.isSubscribed(pubsubTopic): - warn "No-effect API call to `unsubscribe`. Was not subscribed", pubsubTopic - return ok() - - info "unsubscribe", pubsubTopic, contentTopicOp - node.wakuRelay.unsubscribe(pubsubTopic) - node.topicSubscriptionQueue.emit((kind: PubsubUnsub, topic: pubsubTopic)) + info "unsubscribe (was not subscribed in the mesh; appHandler removed)", + pubsubTopic = pubsubTopic return ok() +proc isSubscribed*( + node: WakuNode, subscription: SubscriptionEvent +): Result[bool, string] = + if node.wakuRelay.isNil(): + error "Invalid API call to `isSubscribed`. WakuRelay not mounted." + return err("Invalid API call to `isSubscribed`. WakuRelay not mounted.") + + let (pubsubTopic, contentTopicOp) = getTopicOfSubscriptionEvent(node, subscription).valueOr: + error "Failed to decode subscription event", error = error + return err("Failed to decode subscription event: " & error) + + return ok(node.wakuRelay.isSubscribed(pubsubTopic)) + proc publish*( node: WakuNode, pubsubTopicOp: Option[PubsubTopic], message: WakuMessage -): Future[Result[void, string]] {.async, gcsafe.} = +): Future[Result[int, string]] {.async, gcsafe.} = ## Publish a `WakuMessage`. Pubsub topic contains; none, a named or static shard. ## `WakuMessage` should contain a `contentTopic` field for light node functionality. ## It is also used to determine the shard. @@ -184,16 +231,20 @@ proc publish*( let msg = "Autosharding error: " & error return err(msg) - #TODO instead of discard return error when 0 peers received the message - discard await node.wakuRelay.publish(pubsubTopic, message) + let numPeers = (await node.wakuRelay.publish(pubsubTopic, message)).valueOr: + warn "waku.relay did not publish", error = error + # Todo: If NoPeersToPublish, we might want to return ok(0) instead!!! + return err("publish failed in relay: " & $error) notice "waku.relay published", peerId = node.peerId, pubsubTopic = pubsubTopic, msg_hash = pubsubTopic.computeMessageHash(message).to0xHex(), - publishTime = getNowInNanosecondTime() + publishTime = getNowInNanosecondTime(), + numPeers = numPeers - return ok() + # TODO: investigate if we can return error in case numPeers is 0 + ok(numPeers) proc mountRelay*( node: WakuNode, diff --git a/waku/node/peer_manager/peer_manager.nim b/waku/node/peer_manager/peer_manager.nim index 72b526aca..0c435468f 100644 --- a/waku/node/peer_manager/peer_manager.nim +++ b/waku/node/peer_manager/peer_manager.nim @@ -1,27 +1,31 @@ {.push raises: [].} import - std/[options, sets, sequtils, times, strformat, strutils, math, random, tables], + std/ + [ + options, sets, sequtils, times, strformat, strutils, math, random, tables, + algorithm, + ], chronos, chronicles, metrics, - libp2p/multistream, - libp2p/muxers/muxer, - libp2p/nameresolving/nameresolver, - libp2p/peerstore - -import - ../../common/nimchronos, - ../../common/enr, - ../../common/callbacks, - ../../common/utils/parse_size_units, - ../../waku_core, - ../../waku_relay, - ../../waku_relay/protocol, - ../../waku_enr/sharding, - ../../waku_enr/capabilities, - ../../waku_metadata, - ../health_monitor/online_monitor, + libp2p/[multistream, muxers/muxer, nameresolving/nameresolver, peerstore], + waku/[ + waku_core, + waku_relay, + waku_metadata, + waku_core/topics/sharding, + waku_relay/protocol, + waku_enr/sharding, + waku_enr/capabilities, + events/peer_events, + common/nimchronos, + common/enr, + common/callbacks, + common/utils/parse_size_units, + common/broker/broker_context, + node/health_monitor/online_monitor, + ], ./peer_store/peer_storage, ./waku_peer_store @@ -84,6 +88,7 @@ type ConnectionChangeHandler* = proc( ): Future[void] {.gcsafe, raises: [Defect].} type PeerManager* = ref object of RootObj + brokerCtx: BrokerContext switch*: Switch wakuMetadata*: WakuMetadata initialBackoffInSec*: int @@ -103,6 +108,7 @@ type PeerManager* = ref object of RootObj onConnectionChange*: ConnectionChangeHandler online: bool ## state managed by online_monitor module getShards: GetShards + maxConnections: int #~~~~~~~~~~~~~~~~~~~# # Helper Functions # @@ -221,7 +227,19 @@ proc selectPeer*( protocol = proto, peers, address = cast[uint](pm.switch.peerStore) if shard.isSome(): - peers.keepItIf((it.enr.isSome() and it.enr.get().containsShard(shard.get()))) + # Parse the shard from the pubsub topic to get cluster and shard ID + let shardInfo = RelayShard.parse(shard.get()).valueOr: + trace "Failed to parse shard from pubsub topic", topic = shard.get() + return none(RemotePeerInfo) + + # Filter peers that support the requested shard + # Check both ENR (if present) and the shards field on RemotePeerInfo + peers.keepItIf( + # Check ENR if available + (it.enr.isSome() and it.enr.get().containsShard(shard.get())) or + # Otherwise check the shards field directly + (it.shards.len > 0 and it.shards.contains(shardInfo.shardId)) + ) shuffle(peers) @@ -482,8 +500,9 @@ proc canBeConnected*(pm: PeerManager, peerId: PeerId): bool = proc connectedPeers*( pm: PeerManager, protocol: string = "" ): (seq[PeerId], seq[PeerId]) = - ## Returns the peerIds of physical connections (in and out) - ## If a protocol is specified, only returns peers with at least one stream of that protocol + ## Returns the PeerIds of peers with an active socket connection. + ## If a protocol is specified, it returns peers that currently have one + ## or more active logical streams for that protocol. var inPeers: seq[PeerId] var outPeers: seq[PeerId] @@ -499,6 +518,65 @@ proc connectedPeers*( return (inPeers, outPeers) +proc capablePeers*(pm: PeerManager, protocol: string): (seq[PeerId], seq[PeerId]) = + ## Returns the PeerIds of peers with an active socket connection. + ## If a protocol is specified, it returns peers that have identified + ## themselves as supporting the protocol. + + var inPeers: seq[PeerId] + var outPeers: seq[PeerId] + + for peerId, muxers in pm.switch.connManager.getConnections(): + # filter out peers that don't have the capability registered in the peer store + if pm.switch.peerStore.hasPeer(peerId, protocol): + for peerConn in muxers: + if peerConn.connection.transportDir == Direction.In: + inPeers.add(peerId) + elif peerConn.connection.transportDir == Direction.Out: + outPeers.add(peerId) + + return (inPeers, outPeers) + +proc getConnectedPeersCount*(pm: PeerManager, protocol: string): int = + ## Returns the total number of unique connected peers (inbound + outbound) + ## with active streams for a specific protocol. + let (inPeers, outPeers) = pm.connectedPeers(protocol) + var peers = initHashSet[PeerId](nextPowerOfTwo(inPeers.len + outPeers.len)) + for p in inPeers: + peers.incl(p) + for p in outPeers: + peers.incl(p) + return peers.len + +proc getCapablePeersCount*(pm: PeerManager, protocol: string): int = + ## Returns the total number of unique connected peers (inbound + outbound) + ## who have identified themselves as supporting the given protocol. + let (inPeers, outPeers) = pm.capablePeers(protocol) + var peers = initHashSet[PeerId](nextPowerOfTwo(inPeers.len + outPeers.len)) + for p in inPeers: + peers.incl(p) + for p in outPeers: + peers.incl(p) + return peers.len + +proc getPeersForShard*(pm: PeerManager, protocolId: string, shard: PubsubTopic): int = + let (inPeers, outPeers) = pm.connectedPeers(protocolId) + let connectedProtocolPeers = inPeers & outPeers + if connectedProtocolPeers.len == 0: + return 0 + + let shardInfo = RelayShard.parse(shard).valueOr: + # count raw peers of the given protocol if for some reason we can't get + # a shard mapping out of the gossipsub topic string. + return connectedProtocolPeers.len + + var shardPeers = 0 + for peerId in connectedProtocolPeers: + if pm.switch.peerStore.hasShard(peerId, shardInfo.clusterId, shardInfo.shardId): + shardPeers.inc() + + return shardPeers + proc disconnectAllPeers*(pm: PeerManager) {.async.} = let (inPeerIds, outPeerIds) = pm.connectedPeers() let connectedPeers = concat(inPeerIds, outPeerIds) @@ -634,7 +712,7 @@ proc getPeerIp(pm: PeerManager, peerId: PeerId): Option[string] = # Event Handling # #~~~~~~~~~~~~~~~~~# -proc onPeerMetadata(pm: PeerManager, peerId: PeerId) {.async.} = +proc refreshPeerMetadata(pm: PeerManager, peerId: PeerId) {.async.} = let res = catch: await pm.switch.dial(peerId, WakuMetadataCodec) @@ -658,6 +736,15 @@ proc onPeerMetadata(pm: PeerManager, peerId: PeerId) {.async.} = $clusterId break guardClauses + # Store the shard information from metadata in the peer store + if pm.switch.peerStore.peerExists(peerId): + let shards = metadata.shards.mapIt(it.uint16) + pm.switch.peerStore.setShardInfo(peerId, shards) + + # TODO: should only trigger an event if metadata actually changed + # should include the shard subscription delta in the event when + # it is a MetadataUpdated event + EventWakuPeer.emit(pm.brokerCtx, peerId, WakuPeerEventKind.EventMetadataUpdated) return info "disconnecting from peer", peerId = peerId, reason = reason @@ -667,14 +754,14 @@ proc onPeerMetadata(pm: PeerManager, peerId: PeerId) {.async.} = # called when a peer i) first connects to us ii) disconnects all connections from us proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} = if not pm.wakuMetadata.isNil() and event.kind == PeerEventKind.Joined: - await pm.onPeerMetadata(peerId) + await pm.refreshPeerMetadata(peerId) var peerStore = pm.switch.peerStore var direction: PeerDirection var connectedness: Connectedness case event.kind - of Joined: + of PeerEventKind.Joined: direction = if event.initiator: Outbound else: Inbound connectedness = Connected @@ -702,10 +789,12 @@ proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} = asyncSpawn(pm.switch.disconnect(peerId)) peerStore.delete(peerId) + EventWakuPeer.emit(pm.brokerCtx, peerId, WakuPeerEventKind.EventConnected) + if not pm.onConnectionChange.isNil(): # we don't want to await for the callback to finish asyncSpawn pm.onConnectionChange(peerId, Joined) - of Left: + of PeerEventKind.Left: direction = UnknownDirection connectedness = CanConnect @@ -717,12 +806,16 @@ proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} = pm.ipTable.del(ip) break + EventWakuPeer.emit(pm.brokerCtx, peerId, WakuPeerEventKind.EventDisconnected) + if not pm.onConnectionChange.isNil(): # we don't want to await for the callback to finish asyncSpawn pm.onConnectionChange(peerId, Left) - of Identified: + of PeerEventKind.Identified: info "event identified", peerId = peerId + EventWakuPeer.emit(pm.brokerCtx, peerId, WakuPeerEventKind.EventIdentified) + peerStore[ConnectionBook][peerId] = connectedness peerStore[DirectionBook][peerId] = direction @@ -743,7 +836,6 @@ proc logAndMetrics(pm: PeerManager) {.async.} = var peerStore = pm.switch.peerStore # log metrics let (inRelayPeers, outRelayPeers) = pm.connectedPeers(WakuRelayCodec) - let maxConnections = pm.switch.connManager.inSema.size let notConnectedPeers = peerStore.getDisconnectedPeers().mapIt(RemotePeerInfo.init(it.peerId, it.addrs)) let outsideBackoffPeers = notConnectedPeers.filterIt(pm.canBeConnected(it.peerId)) @@ -753,7 +845,7 @@ proc logAndMetrics(pm: PeerManager) {.async.} = info "Relay peer connections", inRelayConns = $inRelayPeers.len & "/" & $pm.inRelayPeersTarget, outRelayConns = $outRelayPeers.len & "/" & $pm.outRelayPeersTarget, - totalConnections = $totalConnections & "/" & $maxConnections, + totalConnections = $totalConnections & "/" & $pm.maxConnections, notConnectedPeers = notConnectedPeers.len, outsideBackoffPeers = outsideBackoffPeers.len @@ -1036,16 +1128,16 @@ proc new*( wakuMetadata: WakuMetadata = nil, maxRelayPeers: Option[int] = none(int), maxServicePeers: Option[int] = none(int), - relayServiceRatio: string = "60:40", + relayServiceRatio: string = "50:50", storage: PeerStorage = nil, initialBackoffInSec = InitialBackoffInSec, backoffFactor = BackoffFactor, maxFailedAttempts = MaxFailedAttempts, colocationLimit = DefaultColocationLimit, shardedPeerManagement = false, + maxConnections: int = MaxConnections, ): PeerManager {.gcsafe.} = let capacity = switch.peerStore.capacity - let maxConnections = switch.connManager.inSema.size if maxConnections > capacity: error "Max number of connections can't be greater than PeerManager capacity", capacity = capacity, maxConnections = maxConnections @@ -1080,8 +1172,11 @@ proc new*( error "Max backoff time can't be over 1 week", maxBackoff = backoff raise newException(Defect, "Max backoff time can't be over 1 week") + let brokerCtx = globalBrokerContext() + let pm = PeerManager( switch: switch, + brokerCtx: brokerCtx, wakuMetadata: wakuMetadata, storage: storage, initialBackoffInSec: initialBackoffInSec, @@ -1094,6 +1189,7 @@ proc new*( colocationLimit: colocationLimit, shardedPeerManagement: shardedPeerManagement, online: true, + maxConnections: maxConnections, ) proc peerHook( diff --git a/waku/node/peer_manager/waku_peer_store.nim b/waku/node/peer_manager/waku_peer_store.nim index 2653153bf..93ac9ad2e 100644 --- a/waku/node/peer_manager/waku_peer_store.nim +++ b/waku/node/peer_manager/waku_peer_store.nim @@ -6,7 +6,8 @@ import chronicles, eth/p2p/discoveryv5/enr, libp2p/builders, - libp2p/peerstore + libp2p/peerstore, + libp2p/crypto/curve25519 import ../../waku_core, @@ -39,6 +40,9 @@ type # Keeps track of the ENR (Ethereum Node Record) of a peer ENRBook* = ref object of PeerBook[enr.Record] + # Keeps track of peer shards + ShardBook* = ref object of PeerBook[seq[uint16]] + proc getPeer*(peerStore: PeerStore, peerId: PeerId): RemotePeerInfo = let addresses = if peerStore[LastSeenBook][peerId].isSome(): @@ -55,6 +59,7 @@ proc getPeer*(peerStore: PeerStore, peerId: PeerId): RemotePeerInfo = else: none(enr.Record), protocols: peerStore[ProtoBook][peerId], + shards: peerStore[ShardBook][peerId], agent: peerStore[AgentBook][peerId], protoVersion: peerStore[ProtoVersionBook][peerId], publicKey: peerStore[KeyBook][peerId], @@ -64,6 +69,11 @@ proc getPeer*(peerStore: PeerStore, peerId: PeerId): RemotePeerInfo = direction: peerStore[DirectionBook][peerId], lastFailedConn: peerStore[LastFailedConnBook][peerId], numberFailedConn: peerStore[NumberFailedConnBook][peerId], + mixPubKey: + if peerStore[MixPubKeyBook][peerId] != default(Curve25519Key): + some(peerStore[MixPubKeyBook][peerId]) + else: + none(Curve25519Key), ) proc delete*(peerStore: PeerStore, peerId: PeerId) = @@ -72,16 +82,24 @@ proc delete*(peerStore: PeerStore, peerId: PeerId) = proc peers*(peerStore: PeerStore): seq[RemotePeerInfo] = let allKeys = concat( - toSeq(peerStore[LastSeenBook].book.keys()), + toSeq(peerStore[LastSeenOutboundBook].book.keys()), toSeq(peerStore[AddressBook].book.keys()), toSeq(peerStore[ProtoBook].book.keys()), toSeq(peerStore[KeyBook].book.keys()), + toSeq(peerStore[ShardBook].book.keys()), ) .toHashSet() return allKeys.mapIt(peerStore.getPeer(it)) proc addPeer*(peerStore: PeerStore, peer: RemotePeerInfo, origin = UnknownOrigin) = + ## Storing MixPubKey even if peer is already present as this info might be new + ## or updated. + if peer.mixPubKey.isSome(): + trace "adding mix pub key to peer store", + peer_id = $peer.peerId, mix_pub_key = $peer.mixPubKey.get() + peerStore[MixPubKeyBook].book[peer.peerId] = peer.mixPubKey.get() + ## Notice that the origin parameter is used to manually override the given peer origin. ## At the time of writing, this is used in waku_discv5 or waku_node (peer exchange.) if peerStore[AddressBook][peer.peerId] == peer.addrs and @@ -108,6 +126,7 @@ proc addPeer*(peerStore: PeerStore, peer: RemotePeerInfo, origin = UnknownOrigin peerStore[ProtoBook][peer.peerId] = protos ## We don't care whether the item was already present in the table or not. Hence, we always discard the hasKeyOrPut's bool returned value + discard peerStore[AgentBook].book.hasKeyOrPut(peer.peerId, peer.agent) discard peerStore[ProtoVersionBook].book.hasKeyOrPut(peer.peerId, peer.protoVersion) discard peerStore[KeyBook].book.hasKeyOrPut(peer.peerId, peer.publicKey) @@ -127,6 +146,9 @@ proc addPeer*(peerStore: PeerStore, peer: RemotePeerInfo, origin = UnknownOrigin if peer.enr.isSome(): peerStore[ENRBook][peer.peerId] = peer.enr.get() +proc setShardInfo*(peerStore: PeerStore, peerId: PeerID, shards: seq[uint16]) = + peerStore[ShardBook][peerId] = shards + proc peers*(peerStore: PeerStore, proto: string): seq[RemotePeerInfo] = peerStore.peers().filterIt(it.protocols.contains(proto)) @@ -137,7 +159,9 @@ proc connectedness*(peerStore: PeerStore, peerId: PeerId): Connectedness = peerStore[ConnectionBook].book.getOrDefault(peerId, NotConnected) proc hasShard*(peerStore: PeerStore, peerId: PeerID, cluster, shard: uint16): bool = - peerStore[ENRBook].book.getOrDefault(peerId).containsShard(cluster, shard) + return + peerStore[ENRBook].book.getOrDefault(peerId).containsShard(cluster, shard) or + peerStore[ShardBook].book.getOrDefault(peerId, @[]).contains(shard) proc hasCapability*(peerStore: PeerStore, peerId: PeerID, cap: Capabilities): bool = peerStore[ENRBook].book.getOrDefault(peerId).supportsCapability(cap) @@ -194,7 +218,8 @@ proc getPeersByShard*( peerStore: PeerStore, cluster, shard: uint16 ): seq[RemotePeerInfo] = return peerStore.peers.filterIt( - it.enr.isSome() and it.enr.get().containsShard(cluster, shard) + (it.enr.isSome() and it.enr.get().containsShard(cluster, shard)) or + it.shards.contains(shard) ) proc getPeersByCapability*( diff --git a/waku/node/waku_node.nim b/waku/node/waku_node.nim index ffc2acaa4..0cef4cc5d 100644 --- a/waku/node/waku_node.nim +++ b/waku/node/waku_node.nim @@ -22,40 +22,55 @@ import libp2p/transports/tcptransport, libp2p/transports/wstransport, libp2p/utility, + libp2p/utils/offsettedseq, libp2p/protocols/mix, libp2p/protocols/mix/mix_protocol import - ../waku_core, - ../waku_core/topics/sharding, - ../waku_relay, - ../waku_archive, - ../waku_archive_legacy, - ../waku_store_legacy/protocol as legacy_store, - ../waku_store_legacy/client as legacy_store_client, - ../waku_store_legacy/common as legacy_store_common, - ../waku_store/protocol as store, - ../waku_store/client as store_client, - ../waku_store/common as store_common, - ../waku_store/resume, - ../waku_store_sync, - ../waku_filter_v2, - ../waku_filter_v2/client as filter_client, - ../waku_metadata, - ../waku_rendezvous/protocol, - ../waku_lightpush_legacy/client as legacy_ligntpuhs_client, - ../waku_lightpush_legacy as legacy_lightpush_protocol, - ../waku_lightpush/client as ligntpuhs_client, - ../waku_lightpush as lightpush_protocol, - ../waku_enr, - ../waku_peer_exchange, - ../waku_rln_relay, + waku/[ + waku_core, + waku_core/topics/sharding, + waku_relay, + waku_archive, + waku_archive_legacy, + waku_store_legacy/protocol as legacy_store, + waku_store_legacy/client as legacy_store_client, + waku_store_legacy/common as legacy_store_common, + waku_store/protocol as store, + waku_store/client as store_client, + waku_store/common as store_common, + waku_store/resume, + waku_store_sync, + waku_filter_v2, + waku_filter_v2/common as filter_common, + waku_filter_v2/client as filter_client, + waku_metadata, + waku_rendezvous/protocol, + waku_rendezvous/client as rendezvous_client, + waku_rendezvous/waku_peer_record, + waku_lightpush_legacy/client as legacy_ligntpuhs_client, + waku_lightpush_legacy as legacy_lightpush_protocol, + waku_lightpush/client as ligntpuhs_client, + waku_lightpush as lightpush_protocol, + waku_enr, + waku_peer_exchange, + waku_rln_relay, + common/rate_limit/setting, + common/callbacks, + common/nimchronos, + common/broker/broker_context, + common/broker/request_broker, + waku_mix, + requests/node_requests, + requests/health_requests, + events/health_events, + events/peer_events, + ], + waku/discovery/waku_kademlia, ./net_config, ./peer_manager, - ../common/rate_limit/setting, - ../common/callbacks, - ../common/nimchronos, - ../waku_mix + ./health_monitor/health_status, + ./health_monitor/topic_health declarePublicCounter waku_node_messages, "number of messages received", ["type"] @@ -84,6 +99,9 @@ const clientId* = "Nimbus Waku v2 node" const WakuNodeVersionString* = "version / git commit hash: " & git_version +const EdgeTopicHealthyThreshold = 2 + ## Lightpush server and filter server requirement for a healthy topic in edge mode + # key and crypto modules different type # TODO: Move to application instance (e.g., `WakuNode2`) @@ -120,27 +138,63 @@ type enr*: enr.Record libp2pPing*: Ping rng*: ref rand.HmacDrbgContext + brokerCtx*: BrokerContext wakuRendezvous*: WakuRendezVous + wakuRendezvousClient*: rendezvous_client.WakuRendezVousClient announcedAddresses*: seq[MultiAddress] + extMultiAddrsOnly*: bool # When true, skip automatic IP address replacement started*: bool # Indicates that node has started listening topicSubscriptionQueue*: AsyncEventQueue[SubscriptionEvent] rateLimitSettings*: ProtocolRateLimitSettings + legacyAppHandlers*: Table[PubsubTopic, WakuRelayHandler] + ## Kernel API Relay appHandlers (if any) wakuMix*: WakuMix + edgeTopicsHealth*: Table[PubsubTopic, TopicHealth] + edgeHealthEvent*: AsyncEvent + edgeHealthLoop: Future[void] + peerEventListener*: EventWakuPeerListener + kademliaDiscoveryLoop*: Future[void] + wakuKademlia*: WakuKademlia -proc getShardsGetter(node: WakuNode): GetShards = +proc deduceRelayShard( + node: WakuNode, + contentTopic: ContentTopic, + pubsubTopicOp: Option[PubsubTopic] = none[PubsubTopic](), +): Result[RelayShard, string] = + let pubsubTopic = pubsubTopicOp.valueOr: + if node.wakuAutoSharding.isNone(): + return err("Pubsub topic must be specified when static sharding is enabled.") + let shard = node.wakuAutoSharding.get().getShard(contentTopic).valueOr: + let msg = "Deducing shard failed: " & error + return err(msg) + return ok(shard) + + let shard = RelayShard.parse(pubsubTopic).valueOr: + return err("Invalid topic:" & pubsubTopic & " " & $error) + return ok(shard) + +proc getShardsGetter(node: WakuNode, configuredShards: seq[uint16]): GetShards = return proc(): seq[uint16] {.closure, gcsafe, raises: [].} = # fetch pubsubTopics subscribed to relay and convert them to shards if node.wakuRelay.isNil(): - return @[] + # If relay is not mounted, return configured shards + return configuredShards + let subscribedTopics = node.wakuRelay.subscribedTopics() + + # If relay hasn't subscribed to any topics yet, return configured shards + if subscribedTopics.len == 0: + return configuredShards + let relayShards = topicsToRelayShards(subscribedTopics).valueOr: error "could not convert relay topics to shards", error = $error, topics = subscribedTopics - return @[] + # Fall back to configured shards on error + return configuredShards if relayShards.isSome(): let shards = relayShards.get().shardIds return shards - return @[] + return configuredShards proc getCapabilitiesGetter(node: WakuNode): GetCapabilities = return proc(): seq[Capabilities] {.closure, gcsafe, raises: [].} = @@ -148,6 +202,17 @@ proc getCapabilitiesGetter(node: WakuNode): GetCapabilities = return @[] return node.enr.getCapabilities() +proc getWakuPeerRecordGetter(node: WakuNode): GetWakuPeerRecord = + return proc(): WakuPeerRecord {.closure, gcsafe, raises: [].} = + var mixKey: string + if not node.wakuMix.isNil(): + mixKey = node.wakuMix.pubKey.to0xHex() + return WakuPeerRecord.init( + peerId = node.switch.peerInfo.peerId, + addresses = node.announcedAddresses, + mixKey = mixKey, + ) + proc new*( T: type WakuNode, netConfig: NetConfig, @@ -162,18 +227,21 @@ proc new*( info "Initializing networking", addrs = $netConfig.announcedAddresses + let brokerCtx = globalBrokerContext() + let queue = newAsyncEventQueue[SubscriptionEvent](0) let node = WakuNode( peerManager: peerManager, switch: switch, rng: rng, + brokerCtx: brokerCtx, enr: enr, announcedAddresses: netConfig.announcedAddresses, topicSubscriptionQueue: queue, rateLimitSettings: rateLimitSettings, ) - peerManager.setShardGetter(node.getShardsGetter()) + peerManager.setShardGetter(node.getShardsGetter(@[])) return node @@ -218,7 +286,7 @@ proc mountMetadata*( if not node.wakuMetadata.isNil(): return err("Waku metadata already mounted, skipping") - let metadata = WakuMetadata.new(clusterId, node.getShardsGetter()) + let metadata = WakuMetadata.new(clusterId, node.getShardsGetter(shards)) node.wakuMetadata = metadata node.peerManager.wakuMetadata = metadata @@ -237,10 +305,11 @@ proc mountAutoSharding*( info "mounting auto sharding", clusterId = clusterId, shardCount = shardCount node.wakuAutoSharding = some(Sharding(clusterId: clusterId, shardCountGenZero: shardCount)) + return ok() proc getMixNodePoolSize*(node: WakuNode): int = - return node.wakuMix.getNodePoolSize() + return node.wakuMix.poolSize() proc mountMix*( node: WakuNode, @@ -257,12 +326,12 @@ proc mountMix*( return err("Failed to convert multiaddress to string.") info "local addr", localaddr = localaddrStr - let nodeAddr = localaddrStr & "/p2p/" & $node.peerId node.wakuMix = WakuMix.new( - nodeAddr, node.peerManager, clusterId, mixPrivKey, mixnodes + localaddrStr, node.peerManager, clusterId, mixPrivKey, mixnodes ).valueOr: error "Waku Mix protocol initialization failed", err = error return + #TODO: should we do the below only for exit node? Also, what if multiple protocols use mix? node.wakuMix.registerDestReadBehavior(WakuLightPushCodec, readLp(int(-1))) let catchRes = catch: node.switch.mount(node.wakuMix) @@ -346,15 +415,32 @@ proc selectRandomPeers*(peers: seq[PeerId], numRandomPeers: int): seq[PeerId] = shuffle(randomPeers) return randomPeers[0 ..< min(len(randomPeers), numRandomPeers)] -proc mountRendezvous*(node: WakuNode, clusterId: uint16) {.async: (raises: []).} = +proc mountRendezvousClient*(node: WakuNode, clusterId: uint16) {.async: (raises: []).} = + info "mounting rendezvous client" + + node.wakuRendezvousClient = rendezvous_client.WakuRendezVousClient.new( + node.switch, node.peerManager, clusterId + ).valueOr: + error "initializing waku rendezvous client failed", error = error + return + + if node.started: + await node.wakuRendezvousClient.start() + +proc mountRendezvous*( + node: WakuNode, clusterId: uint16, shards: seq[RelayShard] = @[] +) {.async: (raises: []).} = info "mounting rendezvous discovery protocol" + let configuredShards = shards.mapIt(it.shardId) + node.wakuRendezvous = WakuRendezVous.new( node.switch, node.peerManager, clusterId, - node.getShardsGetter(), + node.getShardsGetter(configuredShards), node.getCapabilitiesGetter(), + node.getWakuPeerRecordGetter(), ).valueOr: error "initializing waku rendezvous failed", error = error return @@ -362,6 +448,11 @@ proc mountRendezvous*(node: WakuNode, clusterId: uint16) {.async: (raises: []).} if node.started: await node.wakuRendezvous.start() + try: + node.switch.mount(node.wakuRendezvous, protocolMatcher(WakuRendezVousCodec)) + except LPError: + error "failed to mount wakuRendezvous", error = getCurrentExceptionMsg() + proc isBindIpWithZeroPort(inputMultiAdd: MultiAddress): bool = let inputStr = $inputMultiAdd if inputStr.contains("0.0.0.0/tcp/0") or inputStr.contains("127.0.0.1/tcp/0"): @@ -370,6 +461,11 @@ proc isBindIpWithZeroPort(inputMultiAdd: MultiAddress): bool = return false proc updateAnnouncedAddrWithPrimaryIpAddr*(node: WakuNode): Result[void, string] = + # Skip automatic IP replacement if extMultiAddrsOnly is set + # This respects the user's explicitly configured announced addresses + if node.extMultiAddrsOnly: + return ok() + let peerInfo = node.switch.peerInfo var announcedStr = "" var listenStr = "" @@ -410,6 +506,118 @@ proc updateAnnouncedAddrWithPrimaryIpAddr*(node: WakuNode): Result[void, string] return ok() +proc calculateEdgeTopicHealth(node: WakuNode, shard: PubsubTopic): TopicHealth = + let filterPeers = + node.peerManager.getPeersForShard(filter_common.WakuFilterSubscribeCodec, shard) + let lightpushPeers = + node.peerManager.getPeersForShard(lightpush_protocol.WakuLightPushCodec, shard) + + if filterPeers >= EdgeTopicHealthyThreshold and + lightpushPeers >= EdgeTopicHealthyThreshold: + return TopicHealth.SUFFICIENTLY_HEALTHY + elif filterPeers > 0 and lightpushPeers > 0: + return TopicHealth.MINIMALLY_HEALTHY + + return TopicHealth.UNHEALTHY + +proc loopEdgeHealth(node: WakuNode) {.async.} = + while node.started: + await node.edgeHealthEvent.wait() + node.edgeHealthEvent.clear() + + try: + for shard in node.edgeTopicsHealth.keys: + if not node.wakuRelay.isNil and node.wakuRelay.isSubscribed(shard): + continue + + let oldHealth = node.edgeTopicsHealth.getOrDefault(shard, TopicHealth.UNHEALTHY) + let newHealth = node.calculateEdgeTopicHealth(shard) + if newHealth != oldHealth: + node.edgeTopicsHealth[shard] = newHealth + EventShardTopicHealthChange.emit(node.brokerCtx, shard, newHealth) + except CancelledError: + break + except CatchableError as e: + warn "Error in edge health check", error = e.msg + + # safety cooldown to protect from edge cases + await sleepAsync(100.milliseconds) + +proc startProvidersAndListeners*(node: WakuNode) = + node.peerEventListener = EventWakuPeer.listen( + node.brokerCtx, + proc(evt: EventWakuPeer) {.async: (raises: []), gcsafe.} = + node.edgeHealthEvent.fire(), + ).valueOr: + error "Failed to listen to peer events", error = error + return + + RequestRelayShard.setProvider( + node.brokerCtx, + proc( + pubsubTopic: Option[PubsubTopic], contentTopic: ContentTopic + ): Result[RequestRelayShard, string] = + let shard = node.deduceRelayShard(contentTopic, pubsubTopic).valueOr: + return err($error) + return ok(RequestRelayShard(relayShard: shard)), + ).isOkOr: + error "Can't set provider for RequestRelayShard", error = error + + RequestShardTopicsHealth.setProvider( + node.brokerCtx, + proc(topics: seq[PubsubTopic]): Result[RequestShardTopicsHealth, string] = + var response: RequestShardTopicsHealth + + for shard in topics: + var healthStatus = TopicHealth.UNHEALTHY + + if not node.wakuRelay.isNil: + healthStatus = + node.wakuRelay.topicsHealth.getOrDefault(shard, TopicHealth.NOT_SUBSCRIBED) + + if healthStatus == TopicHealth.NOT_SUBSCRIBED: + healthStatus = node.calculateEdgeTopicHealth(shard) + + response.topicHealth.add((shard, healthStatus)) + + return ok(response), + ).isOkOr: + error "Can't set provider for RequestShardTopicsHealth", error = error + + RequestContentTopicsHealth.setProvider( + node.brokerCtx, + proc(topics: seq[ContentTopic]): Result[RequestContentTopicsHealth, string] = + var response: RequestContentTopicsHealth + + for contentTopic in topics: + var topicHealth = TopicHealth.NOT_SUBSCRIBED + + let shardResult = node.deduceRelayShard(contentTopic, none[PubsubTopic]()) + + if shardResult.isOk(): + let shardObj = shardResult.get() + let pubsubTopic = $shardObj + if not isNil(node.wakuRelay): + topicHealth = node.wakuRelay.topicsHealth.getOrDefault( + pubsubTopic, TopicHealth.NOT_SUBSCRIBED + ) + + if topicHealth == TopicHealth.NOT_SUBSCRIBED and + pubsubTopic in node.edgeTopicsHealth: + topicHealth = node.calculateEdgeTopicHealth(pubsubTopic) + + response.contentTopicHealth.add((topic: contentTopic, health: topicHealth)) + + return ok(response), + ).isOkOr: + error "Can't set provider for RequestContentTopicsHealth", error = error + +proc stopProvidersAndListeners*(node: WakuNode) = + EventWakuPeer.dropListener(node.brokerCtx, node.peerEventListener) + RequestRelayShard.clearProvider(node.brokerCtx) + RequestContentTopicsHealth.clearProvider(node.brokerCtx) + RequestShardTopicsHealth.clearProvider(node.brokerCtx) + proc start*(node: WakuNode) {.async.} = ## Starts a created Waku Node and ## all its mounted protocols. @@ -438,6 +646,9 @@ proc start*(node: WakuNode) {.async.} = if not node.wakuRendezvous.isNil(): await node.wakuRendezvous.start() + if not node.wakuRendezvousClient.isNil(): + await node.wakuRendezvousClient.start() + if not node.wakuStoreReconciliation.isNil(): node.wakuStoreReconciliation.start() @@ -455,6 +666,11 @@ proc start*(node: WakuNode) {.async.} = ## The switch will update addresses after start using the addressMapper await node.switch.start() + node.edgeHealthEvent = newAsyncEvent() + node.edgeHealthLoop = loopEdgeHealth(node) + + node.startProvidersAndListeners() + node.started = true if not zeroPortPresent: @@ -467,6 +683,13 @@ proc start*(node: WakuNode) {.async.} = proc stop*(node: WakuNode) {.async.} = ## By stopping the switch we are stopping all the underlying mounted protocols + + node.stopProvidersAndListeners() + + if not node.edgeHealthLoop.isNil: + await node.edgeHealthLoop.cancelAndWait() + node.edgeHealthLoop = nil + await node.switch.stop() node.peerManager.stop() @@ -493,9 +716,15 @@ proc stop*(node: WakuNode) {.async.} = not node.wakuPeerExchangeClient.pxLoopHandle.isNil(): await node.wakuPeerExchangeClient.pxLoopHandle.cancelAndWait() + if not node.wakuKademlia.isNil(): + await node.wakuKademlia.stop() + if not node.wakuRendezvous.isNil(): await node.wakuRendezvous.stopWait() + if not node.wakuRendezvousClient.isNil(): + await node.wakuRendezvousClient.stopWait() + node.started = false proc isReady*(node: WakuNode): Future[bool] {.async: (raises: [Exception]).} = diff --git a/waku/requests/health_requests.nim b/waku/requests/health_requests.nim new file mode 100644 index 000000000..3554922b3 --- /dev/null +++ b/waku/requests/health_requests.nim @@ -0,0 +1,39 @@ +import waku/common/broker/request_broker + +import waku/api/types +import waku/node/health_monitor/[protocol_health, topic_health, health_report] +import waku/waku_core/topics +import waku/common/waku_protocol + +export protocol_health, topic_health + +# Get the overall node connectivity status +RequestBroker(sync): + type RequestConnectionStatus* = object + connectionStatus*: ConnectionStatus + +# Get the health status of a set of content topics +RequestBroker(sync): + type RequestContentTopicsHealth* = object + contentTopicHealth*: seq[tuple[topic: ContentTopic, health: TopicHealth]] + + proc signature(topics: seq[ContentTopic]): Result[RequestContentTopicsHealth, string] + +# Get a consolidated node health report +RequestBroker: + type RequestHealthReport* = object + healthReport*: HealthReport + +# Get the health status of a set of shards (pubsub topics) +RequestBroker(sync): + type RequestShardTopicsHealth* = object + topicHealth*: seq[tuple[topic: PubsubTopic, health: TopicHealth]] + + proc signature(topics: seq[PubsubTopic]): Result[RequestShardTopicsHealth, string] + +# Get the health status of a mounted protocol +RequestBroker: + type RequestProtocolHealth* = object + healthStatus*: ProtocolHealth + + proc signature(protocol: WakuProtocol): Future[Result[RequestProtocolHealth, string]] diff --git a/waku/requests/node_requests.nim b/waku/requests/node_requests.nim new file mode 100644 index 000000000..a4ccc6de4 --- /dev/null +++ b/waku/requests/node_requests.nim @@ -0,0 +1,11 @@ +import std/options +import waku/common/broker/[request_broker, multi_request_broker] +import waku/waku_core/[topics] + +RequestBroker(sync): + type RequestRelayShard* = object + relayShard*: RelayShard + + proc signature( + pubsubTopic: Option[PubsubTopic], contentTopic: ContentTopic + ): Result[RequestRelayShard, string] diff --git a/waku/requests/requests.nim b/waku/requests/requests.nim new file mode 100644 index 000000000..9225c0f3e --- /dev/null +++ b/waku/requests/requests.nim @@ -0,0 +1,3 @@ +import ./[health_requests, rln_requests, node_requests] + +export health_requests, rln_requests, node_requests diff --git a/waku/requests/rln_requests.nim b/waku/requests/rln_requests.nim new file mode 100644 index 000000000..8b61f9fcd --- /dev/null +++ b/waku/requests/rln_requests.nim @@ -0,0 +1,9 @@ +import waku/common/broker/request_broker, waku/waku_core/message/message + +RequestBroker: + type RequestGenerateRlnProof* = object + proof*: seq[byte] + + proc signature( + message: WakuMessage, senderEpoch: float64 + ): Future[Result[RequestGenerateRlnProof, string]] {.async.} diff --git a/waku/rest_api/endpoint/builder.nim b/waku/rest_api/endpoint/builder.nim index bbd8de422..41ab7e06b 100644 --- a/waku/rest_api/endpoint/builder.nim +++ b/waku/rest_api/endpoint/builder.nim @@ -28,7 +28,6 @@ import # It will always be called from main thread anyway. # Ref: https://nim-lang.org/docs/manual.html#threads-gc-safety var restServerNotInstalledTab {.threadvar.}: TableRef[string, string] -restServerNotInstalledTab = newTable[string, string]() export WakuRestServerRef @@ -42,6 +41,9 @@ type RestServerConf* = object proc startRestServerEssentials*( nodeHealthMonitor: NodeHealthMonitor, conf: RestServerConf, portsShift: uint16 ): Result[WakuRestServerRef, string] = + if restServerNotInstalledTab.isNil: + restServerNotInstalledTab = newTable[string, string]() + let requestErrorHandler: RestRequestErrorHandler = proc( error: RestRequestError, request: HttpRequestRef ): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} = diff --git a/waku/rest_api/endpoint/health/types.nim b/waku/rest_api/endpoint/health/types.nim index 57f8b284c..88fa736a8 100644 --- a/waku/rest_api/endpoint/health/types.nim +++ b/waku/rest_api/endpoint/health/types.nim @@ -2,7 +2,8 @@ import results import chronicles, json_serialization, json_serialization/std/options -import ../../../waku_node, ../serdes +import ../serdes +import waku/[waku_node, api/types] #### Serialization and deserialization @@ -44,6 +45,7 @@ proc writeValue*( ) {.raises: [IOError].} = writer.beginRecord() writer.writeField("nodeHealth", $value.nodeHealth) + writer.writeField("connectionStatus", $value.connectionStatus) writer.writeField("protocolsHealth", value.protocolsHealth) writer.endRecord() @@ -52,6 +54,7 @@ proc readValue*( ) {.raises: [SerializationError, IOError].} = var nodeHealth: Option[HealthStatus] + connectionStatus: Option[ConnectionStatus] protocolsHealth: Option[seq[ProtocolHealth]] for fieldName in readObjectFields(reader): @@ -66,6 +69,16 @@ proc readValue*( reader.raiseUnexpectedValue("Invalid `health` value: " & $error) nodeHealth = some(health) + of "connectionStatus": + if connectionStatus.isSome(): + reader.raiseUnexpectedField( + "Multiple `connectionStatus` fields found", "HealthReport" + ) + + let state = ConnectionStatus.init(reader.readValue(string)).valueOr: + reader.raiseUnexpectedValue("Invalid `connectionStatus` value: " & $error) + + connectionStatus = some(state) of "protocolsHealth": if protocolsHealth.isSome(): reader.raiseUnexpectedField( @@ -79,5 +92,8 @@ proc readValue*( if nodeHealth.isNone(): reader.raiseUnexpectedValue("Field `nodeHealth` is missing") - value = - HealthReport(nodeHealth: nodeHealth.get, protocolsHealth: protocolsHealth.get(@[])) + value = HealthReport( + nodeHealth: nodeHealth.get, + connectionStatus: connectionStatus.get, + protocolsHealth: protocolsHealth.get(@[]), + ) diff --git a/waku/rest_api/endpoint/store/client.nim b/waku/rest_api/endpoint/store/client.nim index 80939ee25..71ba7610d 100644 --- a/waku/rest_api/endpoint/store/client.nim +++ b/waku/rest_api/endpoint/store/client.nim @@ -57,7 +57,7 @@ proc getStoreMessagesV3*( # Optional cursor fields cursor: string = "", # base64-encoded hash ascending: string = "", - pageSize: string = "", + pageSize: string = "20", # default value is 20 ): RestResponse[StoreQueryResponseHex] {. rest, endpoint: "/store/v3/messages", meth: HttpMethod.MethodGet .} diff --git a/waku/rest_api/endpoint/store/handlers.nim b/waku/rest_api/endpoint/store/handlers.nim index 79724b9d7..7d37191fb 100644 --- a/waku/rest_api/endpoint/store/handlers.nim +++ b/waku/rest_api/endpoint/store/handlers.nim @@ -129,6 +129,14 @@ proc createStoreQuery( except CatchableError: return err("page size parsing error: " & getCurrentExceptionMsg()) + # Enforce default value of page_size to 20 + if parsedPagedSize.isNone(): + parsedPagedSize = some(20.uint64) + + # Enforce max value of page_size to 100 + if parsedPagedSize.get() > 100: + parsedPagedSize = some(100.uint64) + return ok( StoreQueryRequest( includeData: parsedIncludeData, diff --git a/waku/utils/requests.nim b/waku/utils/requests.nim index 5e5b9d960..d9afd2887 100644 --- a/waku/utils/requests.nim +++ b/waku/utils/requests.nim @@ -7,4 +7,4 @@ import bearssl/rand, stew/byteutils proc generateRequestId*(rng: ref HmacDrbgContext): string = var bytes: array[10, byte] hmacDrbgGenerate(rng[], bytes) - return toHex(bytes) + return byteutils.toHex(bytes) diff --git a/waku/waku_archive/archive.nim b/waku/waku_archive/archive.nim index 707c757a3..8eb1fc051 100644 --- a/waku/waku_archive/archive.nim +++ b/waku/waku_archive/archive.nim @@ -61,9 +61,19 @@ proc validate*(msg: WakuMessage): Result[void, string] = upperBound = now + MaxMessageTimestampVariance if msg.timestamp < lowerBound: + warn "rejecting message with old timestamp", + msgTimestamp = msg.timestamp, + lowerBound = lowerBound, + now = now, + drift = (now - msg.timestamp) div 1_000_000_000 return err(invalidMessageOld) if upperBound < msg.timestamp: + warn "rejecting message with future timestamp", + msgTimestamp = msg.timestamp, + upperBound = upperBound, + now = now, + drift = (msg.timestamp - now) div 1_000_000_000 return err(invalidMessageFuture) return ok() diff --git a/waku/waku_archive/driver/postgres_driver/postgres_driver.nim b/waku/waku_archive/driver/postgres_driver/postgres_driver.nim index 842d7cbc2..2f495ba5d 100644 --- a/waku/waku_archive/driver/postgres_driver/postgres_driver.nim +++ b/waku/waku_archive/driver/postgres_driver/postgres_driver.nim @@ -5,6 +5,7 @@ import stew/[byteutils, arrayops], results, chronos, + metrics, db_connector/[postgres, db_common], chronicles import @@ -16,6 +17,9 @@ import ./postgres_healthcheck, ./partitions_manager +declarePublicGauge postgres_payload_size_bytes, + "Payload size in bytes of correctly stored messages" + type PostgresDriver* = ref object of ArchiveDriver ## Establish a separate pools for read/write operations writeConnPool: PgAsyncPool @@ -293,13 +297,13 @@ method put*( pubsubTopic: PubsubTopic, message: WakuMessage, ): Future[ArchiveDriverResult[void]] {.async.} = - let messageHash = toHex(messageHash) + let messageHash = byteutils.toHex(messageHash) let contentTopic = message.contentTopic - let payload = toHex(message.payload) + let payload = byteutils.toHex(message.payload) let version = $message.version let timestamp = $message.timestamp - let meta = toHex(message.meta) + let meta = byteutils.toHex(message.meta) trace "put PostgresDriver", messageHash, contentTopic, payload, version, timestamp, meta @@ -333,7 +337,7 @@ method put*( return err("could not put msg in messages table: " & $error) ## Now add the row to messages_lookup - return await s.writeConnPool.runStmt( + let ret = await s.writeConnPool.runStmt( InsertRowInMessagesLookupStmtName, InsertRowInMessagesLookupStmtDefinition, @[messageHash, timestamp], @@ -341,6 +345,10 @@ method put*( @[int32(0), int32(0)], ) + if ret.isOk(): + postgres_payload_size_bytes.set(message.payload.len) + return ret + method getAllMessages*( s: PostgresDriver ): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = @@ -431,7 +439,7 @@ proc getMessagesArbitraryQuery( var args: seq[string] if cursor.isSome(): - let hashHex = toHex(cursor.get()) + let hashHex = byteutils.toHex(cursor.get()) let timeCursor = ?await s.getTimeCursor(hashHex) @@ -512,7 +520,7 @@ proc getMessageHashesArbitraryQuery( var args: seq[string] if cursor.isSome(): - let hashHex = toHex(cursor.get()) + let hashHex = byteutils.toHex(cursor.get()) let timeCursor = ?await s.getTimeCursor(hashHex) @@ -622,7 +630,7 @@ proc getMessagesPreparedStmt( return ok(rows) - let hashHex = toHex(cursor.get()) + let hashHex = byteutils.toHex(cursor.get()) let timeCursor = ?await s.getTimeCursor(hashHex) @@ -715,7 +723,7 @@ proc getMessageHashesPreparedStmt( return ok(rows) - let hashHex = toHex(cursor.get()) + let hashHex = byteutils.toHex(cursor.get()) let timeCursor = ?await s.getTimeCursor(hashHex) @@ -1339,8 +1347,10 @@ proc removePartition( (await self.performWriteQuery(detachPartitionQuery)).isOkOr: info "detected error when trying to detach partition", error - if ($error).contains("FINALIZE") or - ($error).contains("already pending detach in part"): + if ($error).contains("FINALIZE") or ($error).contains("already pending"): + ## We assume "already pending detach in partitioned table ..." as possible error + debug "enforce detach with FINALIZE because of detected error", error + ## We assume the database is suggesting to use FINALIZE when detaching a partition let detachPartitionFinalizeQuery = "ALTER TABLE messages DETACH PARTITION " & partitionName & " FINALIZE;" diff --git a/waku/waku_archive_legacy/driver/postgres_driver/postgres_driver.nim b/waku/waku_archive_legacy/driver/postgres_driver/postgres_driver.nim index 1a39c1267..a6784e4f8 100644 --- a/waku/waku_archive_legacy/driver/postgres_driver/postgres_driver.nim +++ b/waku/waku_archive_legacy/driver/postgres_driver/postgres_driver.nim @@ -213,13 +213,13 @@ method put*( messageHash: WakuMessageHash, receivedTime: Timestamp, ): Future[ArchiveDriverResult[void]] {.async.} = - let digest = toHex(digest.data) - let messageHash = toHex(messageHash) + let digest = byteutils.toHex(digest.data) + let messageHash = byteutils.toHex(messageHash) let contentTopic = message.contentTopic - let payload = toHex(message.payload) + let payload = byteutils.toHex(message.payload) let version = $message.version let timestamp = $message.timestamp - let meta = toHex(message.meta) + let meta = byteutils.toHex(message.meta) trace "put PostgresDriver", timestamp = timestamp @@ -312,7 +312,7 @@ proc getMessagesArbitraryQuery( args.add(pubsubTopic.get()) if cursor.isSome(): - let hashHex = toHex(cursor.get().hash) + let hashHex = byteutils.toHex(cursor.get().hash) var entree: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] proc entreeCallback(pqResult: ptr PGresult) = @@ -463,7 +463,7 @@ proc getMessagesPreparedStmt( let limit = $maxPageSize if cursor.isSome(): - let hash = toHex(cursor.get().hash) + let hash = byteutils.toHex(cursor.get().hash) var entree: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] @@ -576,7 +576,7 @@ proc getMessagesV2PreparedStmt( var stmtDef = if ascOrder: SelectWithCursorV2AscStmtDef else: SelectWithCursorV2DescStmtDef - let digest = toHex(cursor.get().digest.data) + let digest = byteutils.toHex(cursor.get().digest.data) let timestamp = $cursor.get().storeTime ( diff --git a/waku/waku_core/codecs.nim b/waku/waku_core/codecs.nim index 6dcdfe2f5..0d9394c71 100644 --- a/waku/waku_core/codecs.nim +++ b/waku/waku_core/codecs.nim @@ -10,3 +10,4 @@ const WakuMetadataCodec* = "/vac/waku/metadata/1.0.0" WakuPeerExchangeCodec* = "/vac/waku/peer-exchange/2.0.0-alpha1" WakuLegacyStoreCodec* = "/vac/waku/store/2.0.0-beta4" + WakuRendezVousCodec* = "/vac/waku/rendezvous/1.0.0" diff --git a/waku/waku_core/message/digest.nim b/waku/waku_core/message/digest.nim index 8b99abd7e..3f82ce8f6 100644 --- a/waku/waku_core/message/digest.nim +++ b/waku/waku_core/message/digest.nim @@ -19,6 +19,11 @@ func shortLog*(hash: WakuMessageHash): string = func `$`*(hash: WakuMessageHash): string = shortLog(hash) +func to0xHex*(hash: WakuMessageHash): string = + var hexhash = newStringOfCap(64) + hexhash &= hash.toOpenArray(hash.low, hash.high).to0xHex() + hexhash + const EmptyWakuMessageHash*: WakuMessageHash = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, diff --git a/waku/waku_core/peers.nim b/waku/waku_core/peers.nim index 5591699c6..51a8e1157 100644 --- a/waku/waku_core/peers.nim +++ b/waku/waku_core/peers.nim @@ -9,6 +9,7 @@ import eth/p2p/discoveryv5/enr, eth/net/utils, libp2p/crypto/crypto, + libp2p/crypto/curve25519, libp2p/crypto/secp, libp2p/errors, libp2p/multiaddress, @@ -37,6 +38,7 @@ type Static PeerExchange Dns + Kademlia PeerDirection* = enum UnknownDirection @@ -48,6 +50,8 @@ type RemotePeerInfo* = ref object addrs*: seq[MultiAddress] enr*: Option[enr.Record] protocols*: seq[string] + shards*: seq[uint16] + mixPubKey*: Option[Curve25519Key] agent*: string protoVersion*: string @@ -73,6 +77,7 @@ proc init*( addrs: seq[MultiAddress] = @[], enr: Option[enr.Record] = none(enr.Record), protocols: seq[string] = @[], + shards: seq[uint16] = @[], publicKey: crypto.PublicKey = crypto.PublicKey(), agent: string = "", protoVersion: string = "", @@ -82,12 +87,14 @@ proc init*( direction: PeerDirection = UnknownDirection, lastFailedConn: Moment = Moment.init(0, Second), numberFailedConn: int = 0, + mixPubKey: Option[Curve25519Key] = none(Curve25519Key), ): T = RemotePeerInfo( peerId: peerId, addrs: addrs, enr: enr, protocols: protocols, + shards: shards, publicKey: publicKey, agent: agent, protoVersion: protoVersion, @@ -97,6 +104,7 @@ proc init*( direction: direction, lastFailedConn: lastFailedConn, numberFailedConn: numberFailedConn, + mixPubKey: mixPubKey, ) proc init*( @@ -105,9 +113,12 @@ proc init*( addrs: seq[MultiAddress] = @[], enr: Option[enr.Record] = none(enr.Record), protocols: seq[string] = @[], + shards: seq[uint16] = @[], ): T {.raises: [Defect, ResultError[cstring], LPError].} = let peerId = PeerID.init(peerId).tryGet() - RemotePeerInfo(peerId: peerId, addrs: addrs, enr: enr, protocols: protocols) + RemotePeerInfo( + peerId: peerId, addrs: addrs, enr: enr, protocols: protocols, shards: shards + ) ## Parse @@ -326,6 +337,7 @@ converter toRemotePeerInfo*(peerInfo: PeerInfo): RemotePeerInfo = addrs: peerInfo.listenAddrs, enr: none(enr.Record), protocols: peerInfo.protocols, + shards: @[], agent: peerInfo.agentVersion, protoVersion: peerInfo.protoVersion, publicKey: peerInfo.publicKey, @@ -361,6 +373,9 @@ proc getAgent*(peer: RemotePeerInfo): string = return peer.agent proc getShards*(peer: RemotePeerInfo): seq[uint16] = + if peer.shards.len > 0: + return peer.shards + if peer.enr.isNone(): return @[] diff --git a/waku/waku_core/subscription/subscription_manager.nim b/waku/waku_core/subscription/subscription_manager.nim index 1b950b3b4..ccade763b 100644 --- a/waku/waku_core/subscription/subscription_manager.nim +++ b/waku/waku_core/subscription/subscription_manager.nim @@ -5,19 +5,19 @@ import std/tables, results, chronicles, chronos import ./push_handler, ../topics, ../message ## Subscription manager -type SubscriptionManager* = object +type LegacySubscriptionManager* = object subscriptions: TableRef[(string, ContentTopic), FilterPushHandler] -proc init*(T: type SubscriptionManager): T = - SubscriptionManager( +proc init*(T: type LegacySubscriptionManager): T = + LegacySubscriptionManager( subscriptions: newTable[(string, ContentTopic), FilterPushHandler]() ) -proc clear*(m: var SubscriptionManager) = +proc clear*(m: var LegacySubscriptionManager) = m.subscriptions.clear() proc registerSubscription*( - m: SubscriptionManager, + m: LegacySubscriptionManager, pubsubTopic: PubsubTopic, contentTopic: ContentTopic, handler: FilterPushHandler, @@ -29,12 +29,12 @@ proc registerSubscription*( error "failed to register filter subscription", error = getCurrentExceptionMsg() proc removeSubscription*( - m: SubscriptionManager, pubsubTopic: PubsubTopic, contentTopic: ContentTopic + m: LegacySubscriptionManager, pubsubTopic: PubsubTopic, contentTopic: ContentTopic ) = m.subscriptions.del((pubsubTopic, contentTopic)) proc notifySubscriptionHandler*( - m: SubscriptionManager, + m: LegacySubscriptionManager, pubsubTopic: PubsubTopic, contentTopic: ContentTopic, message: WakuMessage, @@ -48,5 +48,5 @@ proc notifySubscriptionHandler*( except CatchableError: discard -proc getSubscriptionsCount*(m: SubscriptionManager): int = +proc getSubscriptionsCount*(m: LegacySubscriptionManager): int = m.subscriptions.len() diff --git a/waku/waku_filter_v2/client.nim b/waku/waku_filter_v2/client.nim index c42bca3db..ba8cd3d0c 100644 --- a/waku/waku_filter_v2/client.nim +++ b/waku/waku_filter_v2/client.nim @@ -10,9 +10,8 @@ import bearssl/rand, stew/byteutils import - ../node/peer_manager, - ../node/delivery_monitor/subscriptions_observer, - ../waku_core, + waku/ + [node/peer_manager, waku_core, events/delivery_events, common/broker/broker_context], ./common, ./protocol_metrics, ./rpc_codec, @@ -22,18 +21,15 @@ logScope: topics = "waku filter client" type WakuFilterClient* = ref object of LPProtocol + brokerCtx: BrokerContext rng: ref HmacDrbgContext peerManager: PeerManager pushHandlers: seq[FilterPushHandler] - subscrObservers: seq[SubscriptionObserver] func generateRequestId(rng: ref HmacDrbgContext): string = var bytes: array[10, byte] hmacDrbgGenerate(rng[], bytes) - return toHex(bytes) - -proc addSubscrObserver*(wfc: WakuFilterClient, obs: SubscriptionObserver) = - wfc.subscrObservers.add(obs) + return byteutils.toHex(bytes) proc sendSubscribeRequest( wfc: WakuFilterClient, @@ -132,8 +128,7 @@ proc subscribe*( ?await wfc.sendSubscribeRequest(servicePeer, filterSubscribeRequest) - for obs in wfc.subscrObservers: - obs.onSubscribe(pubSubTopic, contentTopicSeq) + OnFilterSubscribeEvent.emit(wfc.brokerCtx, pubsubTopic, contentTopicSeq) return ok() @@ -156,8 +151,7 @@ proc unsubscribe*( ?await wfc.sendSubscribeRequest(servicePeer, filterSubscribeRequest) - for obs in wfc.subscrObservers: - obs.onUnsubscribe(pubSubTopic, contentTopicSeq) + OnFilterUnSubscribeEvent.emit(wfc.brokerCtx, pubsubTopic, contentTopicSeq) return ok() @@ -210,6 +204,9 @@ proc initProtocolHandler(wfc: WakuFilterClient) = proc new*( T: type WakuFilterClient, peerManager: PeerManager, rng: ref HmacDrbgContext ): T = - let wfc = WakuFilterClient(rng: rng, peerManager: peerManager, pushHandlers: @[]) + let brokerCtx = globalBrokerContext() + let wfc = WakuFilterClient( + brokerCtx: brokerCtx, rng: rng, peerManager: peerManager, pushHandlers: @[] + ) wfc.initProtocolHandler() wfc diff --git a/waku/waku_lightpush/callbacks.nim b/waku/waku_lightpush/callbacks.nim index bde4e3e26..ac2e562b6 100644 --- a/waku/waku_lightpush/callbacks.nim +++ b/waku/waku_lightpush/callbacks.nim @@ -31,7 +31,7 @@ proc checkAndGenerateRLNProof*( proc getNilPushHandler*(): PushMessageHandler = return proc( - peer: PeerId, pubsubTopic: string, message: WakuMessage + pubsubTopic: string, message: WakuMessage ): Future[WakuLightPushResult] {.async.} = return lightpushResultInternalError("no waku relay found") @@ -39,7 +39,7 @@ proc getRelayPushHandler*( wakuRelay: WakuRelay, rlnPeer: Option[WakuRLNRelay] = none[WakuRLNRelay]() ): PushMessageHandler = return proc( - peer: PeerId, pubsubTopic: string, message: WakuMessage + pubsubTopic: string, message: WakuMessage ): Future[WakuLightPushResult] {.async.} = # append RLN proof let msgWithProof = checkAndGenerateRLNProof(rlnPeer, message).valueOr: diff --git a/waku/waku_lightpush/client.nim b/waku/waku_lightpush/client.nim index 4d0c49a84..fd12c49d2 100644 --- a/waku/waku_lightpush/client.nim +++ b/waku/waku_lightpush/client.nim @@ -5,7 +5,6 @@ import libp2p/peerid, libp2p/stream/connection import ../waku_core/peers, ../node/peer_manager, - ../node/delivery_monitor/publish_observer, ../utils/requests, ../waku_core, ./common, @@ -17,17 +16,24 @@ logScope: topics = "waku lightpush client" type WakuLightPushClient* = ref object - peerManager*: PeerManager rng*: ref rand.HmacDrbgContext - publishObservers: seq[PublishObserver] + peerManager*: PeerManager proc new*( T: type WakuLightPushClient, peerManager: PeerManager, rng: ref rand.HmacDrbgContext ): T = WakuLightPushClient(peerManager: peerManager, rng: rng) -proc addPublishObserver*(wl: WakuLightPushClient, obs: PublishObserver) = - wl.publishObservers.add(obs) +proc ensureTimestampSet(message: var WakuMessage) = + if message.timestamp == 0: + message.timestamp = getNowInNanosecondTime() + +## Short log string for peer identifiers (overloads for convenience) +func shortPeerId(peer: PeerId): string = + shortLog(peer) + +func shortPeerId(peer: RemotePeerInfo): string = + shortLog(peer.peerId) proc sendPushRequest( wl: WakuLightPushClient, @@ -74,64 +80,47 @@ proc publish*( wl: WakuLightPushClient, pubSubTopic: Option[PubsubTopic] = none(PubsubTopic), wakuMessage: WakuMessage, - peer: PeerId | RemotePeerInfo, + dest: Connection | PeerId | RemotePeerInfo, ): Future[WakuLightPushResult] {.async, gcsafe.} = var message = wakuMessage - if message.timestamp == 0: - message.timestamp = getNowInNanosecondTime() + ensureTimestampSet(message) - when peer is PeerId: - info "publish", - peerId = shortLog(peer), - msg_hash = computeMessageHash(pubsubTopic.get(""), message).to0xHex - else: - info "publish", - peerId = shortLog(peer.peerId), - msg_hash = computeMessageHash(pubsubTopic.get(""), message).to0xHex + let msgHash = computeMessageHash(pubSubTopic.get(""), message).to0xHex() - let pushRequest = LightpushRequest( - requestId: generateRequestId(wl.rng), pubSubTopic: pubSubTopic, message: message + let peerIdStr = + when dest is Connection: + shortPeerId(dest.peerId) + else: + shortPeerId(dest) + + info "publish", + myPeerId = wl.peerManager.switch.peerInfo.peerId, + peerId = peerIdStr, + msgHash = msgHash, + sentTime = getNowInNanosecondTime() + + let request = LightpushRequest( + requestId: generateRequestId(wl.rng), pubsubTopic: pubSubTopic, message: message ) - let publishedCount = ?await wl.sendPushRequest(pushRequest, peer) - for obs in wl.publishObservers: - obs.onMessagePublished(pubSubTopic.get(""), message) + let relayPeerCount = + when dest is Connection: + ?await wl.sendPushRequest(request, dest.peerId, some(dest)) + else: + ?await wl.sendPushRequest(request, dest) - return lightpushSuccessResult(publishedCount) + return lightpushSuccessResult(relayPeerCount) proc publishToAny*( - wl: WakuLightPushClient, pubSubTopic: PubsubTopic, wakuMessage: WakuMessage + wl: WakuLightPushClient, pubsubTopic: PubsubTopic, wakuMessage: WakuMessage ): Future[WakuLightPushResult] {.async, gcsafe.} = - ## This proc is similar to the publish one but in this case - ## we don't specify a particular peer and instead we get it from peer manager - - var message = wakuMessage - if message.timestamp == 0: - message.timestamp = getNowInNanosecondTime() - + # Like publish, but selects a peer automatically from the peer manager let peer = wl.peerManager.selectPeer(WakuLightPushCodec).valueOr: # TODO: check if it is matches the situation - shall we distinguish client side missing peers from server side? return lighpushErrorResult( LightPushErrorCode.NO_PEERS_TO_RELAY, "no suitable remote peers" ) - - info "publishToAny", - my_peer_id = wl.peerManager.switch.peerInfo.peerId, - peer_id = peer.peerId, - msg_hash = computeMessageHash(pubsubTopic, message).to0xHex, - sentTime = getNowInNanosecondTime() - - let pushRequest = LightpushRequest( - requestId: generateRequestId(wl.rng), - pubSubTopic: some(pubSubTopic), - message: message, - ) - let publishedCount = ?await wl.sendPushRequest(pushRequest, peer) - - for obs in wl.publishObservers: - obs.onMessagePublished(pubSubTopic, message) - - return lightpushSuccessResult(publishedCount) + return await wl.publish(some(pubsubTopic), wakuMessage, peer) proc publishWithConn*( wl: WakuLightPushClient, @@ -140,22 +129,4 @@ proc publishWithConn*( conn: Connection, destPeer: PeerId, ): Future[WakuLightPushResult] {.async, gcsafe.} = - info "publishWithConn", - my_peer_id = wl.peerManager.switch.peerInfo.peerId, - peer_id = destPeer, - msg_hash = computeMessageHash(pubsubTopic, message).to0xHex, - sentTime = getNowInNanosecondTime() - - let pushRequest = LightpushRequest( - requestId: generateRequestId(wl.rng), - pubSubTopic: some(pubSubTopic), - message: message, - ) - #TODO: figure out how to not pass destPeer as this is just a hack - let publishedCount = - ?await wl.sendPushRequest(pushRequest, destPeer, conn = some(conn)) - - for obs in wl.publishObservers: - obs.onMessagePublished(pubSubTopic, message) - - return lightpushSuccessResult(publishedCount) + return await wl.publish(some(pubSubTopic), message, conn) diff --git a/waku/waku_lightpush/common.nim b/waku/waku_lightpush/common.nim index f2687834e..f0762e2d2 100644 --- a/waku/waku_lightpush/common.nim +++ b/waku/waku_lightpush/common.nim @@ -25,7 +25,7 @@ type ErrorStatus* = tuple[code: LightpushStatusCode, desc: Option[string]] type WakuLightPushResult* = Result[uint32, ErrorStatus] type PushMessageHandler* = proc( - peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + pubsubTopic: PubsubTopic, message: WakuMessage ): Future[WakuLightPushResult] {.async.} const TooManyRequestsMessage* = "Request rejected due to too many requests" @@ -35,7 +35,15 @@ func isSuccess*(response: LightPushResponse): bool = func toPushResult*(response: LightPushResponse): WakuLightPushResult = if isSuccess(response): - return ok(response.relayPeerCount.get(0)) + let relayPeerCount = response.relayPeerCount.get(0) + return ( + if (relayPeerCount == 0): + # Consider publishing to zero peers an error even if the service node + # sent us a "successful" response with zero peers + err((LightPushErrorCode.NO_PEERS_TO_RELAY, response.statusDesc)) + else: + ok(relayPeerCount) + ) else: return err((response.statusCode, response.statusDesc)) @@ -51,11 +59,6 @@ func lightpushResultBadRequest*(msg: string): WakuLightPushResult = func lightpushResultServiceUnavailable*(msg: string): WakuLightPushResult = return err((LightPushErrorCode.SERVICE_NOT_AVAILABLE, some(msg))) -func lighpushErrorResult*( - statusCode: LightpushStatusCode, desc: Option[string] -): WakuLightPushResult = - return err((statusCode, desc)) - func lighpushErrorResult*( statusCode: LightpushStatusCode, desc: string ): WakuLightPushResult = diff --git a/waku/waku_lightpush/protocol.nim b/waku/waku_lightpush/protocol.nim index 2e8c9c2f1..ecbff8461 100644 --- a/waku/waku_lightpush/protocol.nim +++ b/waku/waku_lightpush/protocol.nim @@ -71,16 +71,16 @@ proc handleRequest( msg_hash = msg_hash, receivedTime = getNowInNanosecondTime() - let res = (await wl.pushHandler(peerId, pubsubTopic, pushRequest.message)).valueOr: + let res = (await wl.pushHandler(pubsubTopic, pushRequest.message)).valueOr: return err((code: error.code, desc: error.desc)) return ok(res) proc handleRequest*( wl: WakuLightPush, peerId: PeerId, buffer: seq[byte] ): Future[LightPushResponse] {.async.} = - let pushRequest = LightPushRequest.decode(buffer).valueOr: + let request = LightPushRequest.decode(buffer).valueOr: let desc = decodeRpcFailure & ": " & $error - error "failed to push message", error = desc + error "failed to decode Lightpush request", error = desc let errorCode = LightPushErrorCode.BAD_REQUEST waku_lightpush_v3_errors.inc(labelValues = [$errorCode]) return LightPushResponse( @@ -89,16 +89,16 @@ proc handleRequest*( statusDesc: some(desc), ) - let relayPeerCount = (await handleRequest(wl, peerId, pushRequest)).valueOr: + let relayPeerCount = (await wl.handleRequest(peerId, request)).valueOr: let desc = error.desc waku_lightpush_v3_errors.inc(labelValues = [$error.code]) error "failed to push message", error = desc return LightPushResponse( - requestId: pushRequest.requestId, statusCode: error.code, statusDesc: desc + requestId: request.requestId, statusCode: error.code, statusDesc: desc ) return LightPushResponse( - requestId: pushRequest.requestId, + requestId: request.requestId, statusCode: LightPushSuccessCode.SUCCESS, statusDesc: none[string](), relayPeerCount: some(relayPeerCount), @@ -123,7 +123,7 @@ proc initProtocolHandler(wl: WakuLightPush) = ) try: - rpc = await handleRequest(wl, conn.peerId, buffer) + rpc = await wl.handleRequest(conn.peerId, buffer) except CatchableError: error "lightpush failed handleRequest", error = getCurrentExceptionMsg() do: diff --git a/waku/waku_lightpush_legacy/callbacks.nim b/waku/waku_lightpush_legacy/callbacks.nim index 1fe4cf302..a5b88b5b8 100644 --- a/waku/waku_lightpush_legacy/callbacks.nim +++ b/waku/waku_lightpush_legacy/callbacks.nim @@ -30,7 +30,7 @@ proc checkAndGenerateRLNProof*( proc getNilPushHandler*(): PushMessageHandler = return proc( - peer: PeerId, pubsubTopic: string, message: WakuMessage + pubsubTopic: string, message: WakuMessage ): Future[WakuLightPushResult[void]] {.async.} = return err("no waku relay found") @@ -38,7 +38,7 @@ proc getRelayPushHandler*( wakuRelay: WakuRelay, rlnPeer: Option[WakuRLNRelay] = none[WakuRLNRelay]() ): PushMessageHandler = return proc( - peer: PeerId, pubsubTopic: string, message: WakuMessage + pubsubTopic: string, message: WakuMessage ): Future[WakuLightPushResult[void]] {.async.} = # append RLN proof let msgWithProof = ?checkAndGenerateRLNProof(rlnPeer, message) diff --git a/waku/waku_lightpush_legacy/client.nim b/waku/waku_lightpush_legacy/client.nim index 0e3c9bd6f..ab489bec9 100644 --- a/waku/waku_lightpush_legacy/client.nim +++ b/waku/waku_lightpush_legacy/client.nim @@ -5,7 +5,6 @@ import libp2p/peerid import ../waku_core/peers, ../node/peer_manager, - ../node/delivery_monitor/publish_observer, ../utils/requests, ../waku_core, ./common, @@ -19,7 +18,6 @@ logScope: type WakuLegacyLightPushClient* = ref object peerManager*: PeerManager rng*: ref rand.HmacDrbgContext - publishObservers: seq[PublishObserver] proc new*( T: type WakuLegacyLightPushClient, @@ -28,9 +26,6 @@ proc new*( ): T = WakuLegacyLightPushClient(peerManager: peerManager, rng: rng) -proc addPublishObserver*(wl: WakuLegacyLightPushClient, obs: PublishObserver) = - wl.publishObservers.add(obs) - proc sendPushRequest( wl: WakuLegacyLightPushClient, req: PushRequest, peer: PeerId | RemotePeerInfo ): Future[WakuLightPushResult[void]] {.async, gcsafe.} = @@ -86,9 +81,6 @@ proc publish*( let pushRequest = PushRequest(pubSubTopic: pubSubTopic, message: message) ?await wl.sendPushRequest(pushRequest, peer) - for obs in wl.publishObservers: - obs.onMessagePublished(pubSubTopic, message) - notice "publishing message with lightpush", pubsubTopic = pubsubTopic, contentTopic = message.contentTopic, @@ -111,7 +103,4 @@ proc publishToAny*( let pushRequest = PushRequest(pubSubTopic: pubSubTopic, message: message) ?await wl.sendPushRequest(pushRequest, peer) - for obs in wl.publishObservers: - obs.onMessagePublished(pubSubTopic, message) - return ok() diff --git a/waku/waku_lightpush_legacy/common.nim b/waku/waku_lightpush_legacy/common.nim index fcdf1814c..1b40ba72b 100644 --- a/waku/waku_lightpush_legacy/common.nim +++ b/waku/waku_lightpush_legacy/common.nim @@ -9,7 +9,7 @@ export WakuLegacyLightPushCodec type WakuLightPushResult*[T] = Result[T, string] type PushMessageHandler* = proc( - peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + pubsubTopic: PubsubTopic, message: WakuMessage ): Future[WakuLightPushResult[void]] {.async.} const TooManyRequestsMessage* = "TOO_MANY_REQUESTS" diff --git a/waku/waku_lightpush_legacy/protocol.nim b/waku/waku_lightpush_legacy/protocol.nim index d51943cff..72fc963ee 100644 --- a/waku/waku_lightpush_legacy/protocol.nim +++ b/waku/waku_lightpush_legacy/protocol.nim @@ -53,7 +53,7 @@ proc handleRequest*( msg_hash = msg_hash, receivedTime = getNowInNanosecondTime() - let handleRes = await wl.pushHandler(peerId, pubsubTopic, message) + let handleRes = await wl.pushHandler(pubsubTopic, message) isSuccess = handleRes.isOk() pushResponseInfo = (if isSuccess: "OK" else: handleRes.error) diff --git a/waku/waku_mix/protocol.nim b/waku/waku_mix/protocol.nim index 34b50f8a9..2c972bef6 100644 --- a/waku/waku_mix/protocol.nim +++ b/waku/waku_mix/protocol.nim @@ -1,31 +1,33 @@ {.push raises: [].} -import chronicles, std/[options, tables, sequtils], chronos, results, metrics, strutils +import chronicles, std/options, chronos, results, metrics import libp2p/crypto/curve25519, + libp2p/crypto/crypto, libp2p/protocols/mix, libp2p/protocols/mix/mix_node, - libp2p/[multiaddress, multicodec, peerid], + libp2p/protocols/mix/mix_protocol, + libp2p/protocols/mix/mix_metrics, + libp2p/protocols/mix/delay_strategy, + libp2p/[multiaddress, peerid], eth/common/keys import - ../node/peer_manager, - ../waku_core, - ../waku_enr, - ../node/peer_manager/waku_peer_store, - ../common/nimchronos + waku/node/peer_manager, + waku/waku_core, + waku/waku_enr, + waku/node/peer_manager/waku_peer_store logScope: topics = "waku mix" -const mixMixPoolSize = 3 +const minMixPoolSize = 4 type WakuMix* = ref object of MixProtocol peerManager*: PeerManager clusterId: uint16 - nodePoolLoopHandle: Future[void] pubKey*: Curve25519Key WakuMixResult*[T] = Result[T, string] @@ -34,91 +36,10 @@ type multiAddr*: string pubKey*: Curve25519Key -proc mixPoolFilter*(cluster: Option[uint16], peer: RemotePeerInfo): bool = - # Note that origin based(discv5) filtering is not done intentionally - # so that more mix nodes can be discovered. - if peer.enr.isNone(): - trace "peer has no ENR", peer = $peer - return false - - if cluster.isSome() and peer.enr.get().isClusterMismatched(cluster.get()): - trace "peer has mismatching cluster", peer = $peer - return false - - # Filter if mix is enabled - if not peer.enr.get().supportsCapability(Capabilities.Mix): - trace "peer doesn't support mix", peer = $peer - return false - - return true - -proc appendPeerIdToMultiaddr*(multiaddr: MultiAddress, peerId: PeerId): MultiAddress = - if multiaddr.contains(multiCodec("p2p")).get(): - return multiaddr - - var maddrStr = multiaddr.toString().valueOr: - error "Failed to convert multiaddress to string.", err = error - return multiaddr - maddrStr.add("/p2p/" & $peerId) - var cleanAddr = MultiAddress.init(maddrStr).valueOr: - error "Failed to convert string to multiaddress.", err = error - return multiaddr - return cleanAddr - -func getIPv4Multiaddr*(maddrs: seq[MultiAddress]): Option[MultiAddress] = - for multiaddr in maddrs: - trace "checking multiaddr", addr = $multiaddr - if multiaddr.contains(multiCodec("ip4")).get(): - trace "found ipv4 multiaddr", addr = $multiaddr - return some(multiaddr) - trace "no ipv4 multiaddr found" - return none(MultiAddress) - -#[ Not deleting as these can be reused once discovery is sorted - proc populateMixNodePool*(mix: WakuMix) = - # populate only peers that i) are reachable ii) share cluster iii) support mix - let remotePeers = mix.peerManager.switch.peerStore.peers().filterIt( - mixPoolFilter(some(mix.clusterId), it) - ) - var mixNodes = initTable[PeerId, MixPubInfo]() - - for i in 0 ..< min(remotePeers.len, 100): - let remotePeerENR = remotePeers[i].enr.get() - let ipv4addr = getIPv4Multiaddr(remotePeers[i].addrs).valueOr: - trace "peer has no ipv4 address", peer = $remotePeers[i] - continue - let maddrWithPeerId = - toString(appendPeerIdToMultiaddr(ipv4addr, remotePeers[i].peerId)) - trace "remote peer ENR", - peerId = remotePeers[i].peerId, enr = remotePeerENR, maddr = maddrWithPeerId - - let peerMixPubKey = mixKey(remotePeerENR).get() - let mixNodePubInfo = - createMixPubInfo(maddrWithPeerId.value, intoCurve25519Key(peerMixPubKey)) - mixNodes[remotePeers[i].peerId] = mixNodePubInfo - - mix_pool_size.set(len(mixNodes)) - # set the mix node pool - mix.setNodePool(mixNodes) - trace "mix node pool updated", poolSize = mix.getNodePoolSize() - -proc startMixNodePoolMgr*(mix: WakuMix) {.async.} = - info "starting mix node pool manager" - # try more aggressively to populate the pool at startup - var attempts = 50 - # TODO: make initial pool size configurable - while mix.getNodePoolSize() < 100 and attempts > 0: - attempts -= 1 - mix.populateMixNodePool() - await sleepAsync(1.seconds) - - # TODO: make interval configurable - heartbeat "Updating mix node pool", 5.seconds: - mix.populateMixNodePool() - ]# - -proc toMixNodeTable(bootnodes: seq[MixNodePubInfo]): Table[PeerId, MixPubInfo] = - var mixNodes = initTable[PeerId, MixPubInfo]() +proc processBootNodes( + bootnodes: seq[MixNodePubInfo], peermgr: PeerManager, mix: WakuMix +) = + var count = 0 for node in bootnodes: let pInfo = parsePeerInfo(node.multiAddr).valueOr: error "Failed to get peer id from multiaddress: ", @@ -139,9 +60,15 @@ proc toMixNodeTable(bootnodes: seq[MixNodePubInfo]): Table[PeerId, MixPubInfo] = error "Failed to parse multiaddress", multiAddr = node.multiAddr, error = error continue - mixNodes[peerId] = MixPubInfo.init(peerId, multiAddr, node.pubKey, peerPubKey.skkey) - info "using mix bootstrap nodes ", bootNodes = mixNodes - return mixNodes + let mixPubInfo = MixPubInfo.init(peerId, multiAddr, node.pubKey, peerPubKey.skkey) + mix.nodePool.add(mixPubInfo) + count.inc() + + peermgr.addPeer( + RemotePeerInfo.init(peerId, @[multiAddr], mixPubKey = some(node.pubKey)) + ) + mix_pool_size.set(count) + info "using mix bootstrap nodes ", count = count proc new*( T: type WakuMix, @@ -152,30 +79,37 @@ proc new*( bootnodes: seq[MixNodePubInfo], ): WakuMixResult[T] = let mixPubKey = public(mixPrivKey) - info "mixPrivKey", mixPrivKey = mixPrivKey, mixPubKey = mixPubKey + info "mixPubKey", mixPubKey = mixPubKey let nodeMultiAddr = MultiAddress.init(nodeAddr).valueOr: return err("failed to parse mix node address: " & $nodeAddr & ", error: " & error) let localMixNodeInfo = initMixNodeInfo( peermgr.switch.peerInfo.peerId, nodeMultiAddr, mixPubKey, mixPrivKey, peermgr.switch.peerInfo.publicKey.skkey, peermgr.switch.peerInfo.privateKey.skkey, ) - if bootnodes.len < mixMixPoolSize: - warn "publishing with mix won't work as there are less than 3 mix nodes in node pool" - let initTable = toMixNodeTable(bootnodes) - if len(initTable) < mixMixPoolSize: - warn "publishing with mix won't work as there are less than 3 mix nodes in node pool" + if bootnodes.len < minMixPoolSize: + warn "publishing with mix won't work until atleast 3 mix nodes in node pool" + var m = WakuMix(peerManager: peermgr, clusterId: clusterId, pubKey: mixPubKey) - procCall MixProtocol(m).init(localMixNodeInfo, initTable, peermgr.switch) + procCall MixProtocol(m).init( + localMixNodeInfo, + peermgr.switch, + delayStrategy = + ExponentialDelayStrategy.new(meanDelayMs = 50, rng = crypto.newRng()), + ) + + processBootNodes(bootnodes, peermgr, m) + + if m.nodePool.len < minMixPoolSize: + warn "publishing with mix won't work until atleast 3 mix nodes in node pool" return ok(m) +proc poolSize*(mix: WakuMix): int = + mix.nodePool.len + method start*(mix: WakuMix) = info "starting waku mix protocol" - #mix.nodePoolLoopHandle = mix.startMixNodePoolMgr() This can be re-enabled once discovery is addressed method stop*(mix: WakuMix) {.async.} = - if mix.nodePoolLoopHandle.isNil(): - return - await mix.nodePoolLoopHandle.cancelAndWait() - mix.nodePoolLoopHandle = nil + discard # Mix Protocol diff --git a/waku/waku_relay.nim b/waku/waku_relay.nim index 96328d984..a91033cf1 100644 --- a/waku/waku_relay.nim +++ b/waku/waku_relay.nim @@ -1,3 +1,4 @@ -import ./waku_relay/[protocol, topic_health] +import ./waku_relay/protocol +import waku/node/health_monitor/topic_health export protocol, topic_health diff --git a/waku/waku_relay/protocol.nim b/waku/waku_relay/protocol.nim index cbf9123dd..17470af29 100644 --- a/waku/waku_relay/protocol.nim +++ b/waku/waku_relay/protocol.nim @@ -5,7 +5,7 @@ {.push raises: [].} import - std/[strformat, strutils], + std/[strformat, strutils, sets], stew/byteutils, results, sequtils, @@ -17,10 +17,17 @@ import libp2p/protocols/pubsub/rpc/messages, libp2p/stream/connection, libp2p/switch -import - ../waku_core, ./message_id, ./topic_health, ../node/delivery_monitor/publish_observer -from ../waku_core/codecs import WakuRelayCodec +import + waku/waku_core, + waku/node/health_monitor/topic_health, + waku/requests/health_requests, + waku/events/health_events, + ./message_id, + waku/common/broker/broker_context, + waku/events/peer_events + +from waku/waku_core/codecs import WakuRelayCodec export WakuRelayCodec type ShardMetrics = object @@ -149,6 +156,8 @@ type pubsubTopic: PubsubTopic, message: WakuMessage ): Future[ValidationResult] {.gcsafe, raises: [Defect].} WakuRelay* = ref object of GossipSub + brokerCtx: BrokerContext + peerEventListener: EventWakuPeerListener # seq of tuples: the first entry in the tuple contains the validators are called for every topic # the second entry contains the error messages to be returned when the validator fails wakuValidators: seq[tuple[handler: WakuValidatorHandler, errorMessage: string]] @@ -157,10 +166,14 @@ type # map topic with its assigned validator within pubsub topicHandlers: Table[PubsubTopic, TopicHandler] # map topic with the TopicHandler proc in charge of attending topic's incoming message events - publishObservers: seq[PublishObserver] topicsHealth*: Table[string, TopicHealth] onTopicHealthChange*: TopicHealthChangeHandler topicHealthLoopHandle*: Future[void] + topicHealthUpdateEvent: AsyncEvent + topicHealthDirty: HashSet[string] + # list of topics that need their health updated in the update event + topicHealthCheckAll: bool + # true if all topics need to have their health status refreshed in the update event msgMetricsPerShard*: Table[string, ShardMetrics] # predefinition for more detailed results from publishing new message @@ -283,6 +296,21 @@ proc initRelayObservers(w: WakuRelay) = ) proc onRecv(peer: PubSubPeer, msgs: var RPCMsg) = + if msgs.control.isSome(): + let ctrl = msgs.control.get() + var topicsChanged = false + + for graft in ctrl.graft: + w.topicHealthDirty.incl(graft.topicID) + topicsChanged = true + + for prune in ctrl.prune: + w.topicHealthDirty.incl(prune.topicID) + topicsChanged = true + + if topicsChanged: + w.topicHealthUpdateEvent.fire() + for msg in msgs.messages: let (msg_id_short, topic, wakuMessage, msgSize) = decodeRpcMessageInfo(peer, msg).valueOr: continue @@ -338,11 +366,25 @@ proc new*( maxMessageSize = maxMessageSize, parameters = GossipsubParameters, ) + w.brokerCtx = globalBrokerContext() procCall GossipSub(w).initPubSub() + w.topicsHealth = initTable[string, TopicHealth]() + w.topicHealthUpdateEvent = newAsyncEvent() + w.topicHealthDirty = initHashSet[string]() + w.topicHealthCheckAll = false w.initProtocolHandler() w.initRelayObservers() - w.topicsHealth = initTable[string, TopicHealth]() + + w.peerEventListener = EventWakuPeer.listen( + w.brokerCtx, + proc(evt: EventWakuPeer): Future[void] {.async: (raises: []), gcsafe.} = + if evt.kind == WakuPeerEventKind.EventDisconnected: + w.topicHealthCheckAll = true + w.topicHealthUpdateEvent.fire() + , + ).valueOr: + return err("Failed to subscribe to peer events: " & error) except InitializationError: return err("initialization error: " & getCurrentExceptionMsg()) @@ -353,12 +395,6 @@ proc addValidator*( ) {.gcsafe.} = w.wakuValidators.add((handler, errorMessage)) -proc addPublishObserver*(w: WakuRelay, obs: PublishObserver) = - ## Observer when the api client performed a publish operation. This - ## is initially aimed for bringing an additional layer of delivery reliability thanks - ## to store - w.publishObservers.add(obs) - proc addObserver*(w: WakuRelay, observer: PubSubObserver) {.gcsafe.} = ## Observes when a message is sent/received from the GossipSub PoV procCall GossipSub(w).addObserver(observer) @@ -426,38 +462,58 @@ proc calculateTopicHealth(wakuRelay: WakuRelay, topic: string): TopicHealth = return TopicHealth.MINIMALLY_HEALTHY return TopicHealth.SUFFICIENTLY_HEALTHY -proc updateTopicsHealth(wakuRelay: WakuRelay) {.async.} = - var futs = newSeq[Future[void]]() - for topic in toSeq(wakuRelay.topics.keys): - ## loop over all the topics I'm subscribed to - let - oldHealth = wakuRelay.topicsHealth.getOrDefault(topic) - currentHealth = wakuRelay.calculateTopicHealth(topic) +proc isSubscribed*(w: WakuRelay, topic: PubsubTopic): bool = + GossipSub(w).topics.hasKey(topic) - if oldHealth == currentHealth: - continue +proc subscribedTopics*(w: WakuRelay): seq[PubsubTopic] = + return toSeq(GossipSub(w).topics.keys()) - wakuRelay.topicsHealth[topic] = currentHealth - if not wakuRelay.onTopicHealthChange.isNil(): - let fut = wakuRelay.onTopicHealthChange(topic, currentHealth) - if not fut.completed(): # Fast path for successful sync handlers - futs.add(fut) +proc topicsHealthLoop(w: WakuRelay) {.async.} = + while true: + await w.topicHealthUpdateEvent.wait() + w.topicHealthUpdateEvent.clear() + + var topicsToCheck: seq[string] + + if w.topicHealthCheckAll: + topicsToCheck = toSeq(w.topics.keys) + else: + topicsToCheck = toSeq(w.topicHealthDirty) + + w.topicHealthCheckAll = false + w.topicHealthDirty.clear() + + var futs = newSeq[Future[void]]() + + for topic in topicsToCheck: + # guard against topic being unsubscribed since fire() + if not w.isSubscribed(topic): + continue + + let + oldHealth = w.topicsHealth.getOrDefault(topic, TopicHealth.UNHEALTHY) + currentHealth = w.calculateTopicHealth(topic) + + if oldHealth == currentHealth: + continue + + w.topicsHealth[topic] = currentHealth + + EventShardTopicHealthChange.emit(w.brokerCtx, topic, currentHealth) + + if not w.onTopicHealthChange.isNil(): + futs.add(w.onTopicHealthChange(topic, currentHealth)) if futs.len() > 0: - # slow path - we have to wait for the handlers to complete try: - futs = await allFinished(futs) + discard await allFinished(futs) except CancelledError: - # check for errors in futures - for fut in futs: - if fut.failed: - let err = fut.readError() - warn "Error in health change handler", description = err.msg + break + except CatchableError as e: + warn "Error in topic health callback", error = e.msg -proc topicsHealthLoop(wakuRelay: WakuRelay) {.async.} = - while true: - await wakuRelay.updateTopicsHealth() - await sleepAsync(10.seconds) + # safety cooldown to protect from edge cases + await sleepAsync(100.milliseconds) method start*(w: WakuRelay) {.async, base.} = info "start" @@ -467,15 +523,13 @@ method start*(w: WakuRelay) {.async, base.} = method stop*(w: WakuRelay) {.async, base.} = info "stop" await procCall GossipSub(w).stop() + + if w.peerEventListener.id != 0: + EventWakuPeer.dropListener(w.brokerCtx, w.peerEventListener) + if not w.topicHealthLoopHandle.isNil(): await w.topicHealthLoopHandle.cancelAndWait() -proc isSubscribed*(w: WakuRelay, topic: PubsubTopic): bool = - GossipSub(w).topics.hasKey(topic) - -proc subscribedTopics*(w: WakuRelay): seq[PubsubTopic] = - return toSeq(GossipSub(w).topics.keys()) - proc generateOrderedValidator(w: WakuRelay): ValidatorHandler {.gcsafe.} = # rejects messages that are not WakuMessage let wrappedValidator = proc( @@ -573,6 +627,8 @@ proc subscribe*(w: WakuRelay, pubsubTopic: PubsubTopic, handler: WakuRelayHandle procCall GossipSub(w).subscribe(pubsubTopic, topicHandler) w.topicHandlers[pubsubTopic] = topicHandler + w.topicHealthDirty.incl(pubsubTopic) + w.topicHealthUpdateEvent.fire() proc unsubscribeAll*(w: WakuRelay, pubsubTopic: PubsubTopic) = ## Unsubscribe all handlers on this pubsub topic @@ -582,6 +638,8 @@ proc unsubscribeAll*(w: WakuRelay, pubsubTopic: PubsubTopic) = procCall GossipSub(w).unsubscribeAll(pubsubTopic) w.topicValidator.del(pubsubTopic) w.topicHandlers.del(pubsubTopic) + w.topicsHealth.del(pubsubTopic) + w.topicHealthDirty.excl(pubsubTopic) proc unsubscribe*(w: WakuRelay, pubsubTopic: PubsubTopic) = if not w.topicValidator.hasKey(pubsubTopic): @@ -607,6 +665,8 @@ proc unsubscribe*(w: WakuRelay, pubsubTopic: PubsubTopic) = w.topicValidator.del(pubsubTopic) w.topicHandlers.del(pubsubTopic) + w.topicsHealth.del(pubsubTopic) + w.topicHealthDirty.excl(pubsubTopic) proc publish*( w: WakuRelay, pubsubTopic: PubsubTopic, wakuMessage: WakuMessage @@ -628,9 +688,6 @@ proc publish*( if relayedPeerCount <= 0: return err(NoPeersToPublish) - for obs in w.publishObservers: - obs.onMessagePublished(pubSubTopic, message) - return ok(relayedPeerCount) proc getConnectedPubSubPeers*( diff --git a/waku/waku_rendezvous/client.nim b/waku/waku_rendezvous/client.nim new file mode 100644 index 000000000..09e789774 --- /dev/null +++ b/waku/waku_rendezvous/client.nim @@ -0,0 +1,142 @@ +{.push raises: [].} + +import + std/[options, sequtils, tables], + results, + chronos, + chronicles, + libp2p/protocols/rendezvous, + libp2p/crypto/curve25519, + libp2p/switch, + libp2p/utils/semaphore + +import metrics except collect + +import + waku/node/peer_manager, + waku/waku_core/peers, + waku/waku_core/codecs, + ./common, + ./waku_peer_record + +logScope: + topics = "waku rendezvous client" + +declarePublicCounter rendezvousPeerFoundTotal, + "total number of peers found via rendezvous" + +type WakuRendezVousClient* = ref object + switch: Switch + peerManager: PeerManager + clusterId: uint16 + requestInterval: timer.Duration + periodicRequestFut: Future[void] + # Internal rendezvous instance for making requests + rdv: GenericRendezVous[WakuPeerRecord] + +const MaxSimultanesousAdvertisements = 5 +const RendezVousLookupInterval = 10.seconds + +proc requestAll*( + self: WakuRendezVousClient +): Future[Result[void, string]] {.async: (raises: []).} = + trace "waku rendezvous client requests started" + + let namespace = computeMixNamespace(self.clusterId) + + # Get a random WakuRDV peer + let rpi = self.peerManager.selectPeer(WakuRendezVousCodec).valueOr: + return err("could not get a peer supporting WakuRendezVousCodec") + + var records: seq[WakuPeerRecord] + try: + # Use the libp2p rendezvous request method + records = await self.rdv.request( + Opt.some(namespace), Opt.some(PeersRequestedCount), Opt.some(@[rpi.peerId]) + ) + except CatchableError as e: + return err("rendezvous request failed: " & e.msg) + + trace "waku rendezvous client request got peers", count = records.len + for record in records: + if not self.switch.peerStore.peerExists(record.peerId): + rendezvousPeerFoundTotal.inc() + if record.mixKey.len == 0 or record.peerId == self.switch.peerInfo.peerId: + continue + trace "adding peer from rendezvous", + peerId = record.peerId, addresses = $record.addresses, mixKey = record.mixKey + let rInfo = RemotePeerInfo.init( + record.peerId, + record.addresses, + mixPubKey = some(intoCurve25519Key(fromHex(record.mixKey))), + ) + self.peerManager.addPeer(rInfo) + + trace "waku rendezvous client request finished" + + return ok() + +proc periodicRequests(self: WakuRendezVousClient) {.async.} = + info "waku rendezvous periodic requests started", interval = self.requestInterval + + # infinite loop + while true: + await sleepAsync(self.requestInterval) + + (await self.requestAll()).isOkOr: + error "waku rendezvous requests failed", error = error + + # Exponential backoff + +#[ TODO: Reevaluate for mix, maybe be aggresive in the start until a sizeable pool is built and then backoff + self.requestInterval += self.requestInterval + + if self.requestInterval >= 1.days: + break ]# + +proc new*( + T: type WakuRendezVousClient, + switch: Switch, + peerManager: PeerManager, + clusterId: uint16, +): Result[T, string] {.raises: [].} = + # Create a minimal GenericRendezVous instance for client-side requests + # We don't need the full server functionality, just the request method + let rng = newRng() + let rdv = GenericRendezVous[WakuPeerRecord]( + switch: switch, + rng: rng, + sema: newAsyncSemaphore(MaxSimultanesousAdvertisements), + minDuration: rendezvous.MinimumAcceptedDuration, + maxDuration: rendezvous.MaximumDuration, + minTTL: rendezvous.MinimumAcceptedDuration.seconds.uint64, + maxTTL: rendezvous.MaximumDuration.seconds.uint64, + peers: @[], # Will be populated from selectPeer calls + cookiesSaved: initTable[PeerId, Table[string, seq[byte]]](), + peerRecordValidator: checkWakuPeerRecord, + ) + + # Set codec separately as it's inherited from LPProtocol + rdv.codec = WakuRendezVousCodec + + let client = T( + switch: switch, + peerManager: peerManager, + clusterId: clusterId, + requestInterval: RendezVousLookupInterval, + rdv: rdv, + ) + + info "waku rendezvous client initialized", clusterId = clusterId + + return ok(client) + +proc start*(self: WakuRendezVousClient) {.async: (raises: []).} = + self.periodicRequestFut = self.periodicRequests() + info "waku rendezvous client started" + +proc stopWait*(self: WakuRendezVousClient) {.async: (raises: []).} = + if not self.periodicRequestFut.isNil(): + await self.periodicRequestFut.cancelAndWait() + + info "waku rendezvous client stopped" diff --git a/waku/waku_rendezvous/common.nim b/waku/waku_rendezvous/common.nim index 6125ac860..18c633efb 100644 --- a/waku/waku_rendezvous/common.nim +++ b/waku/waku_rendezvous/common.nim @@ -11,6 +11,14 @@ const DefaultRequestsInterval* = 1.minutes const MaxRegistrationInterval* = 5.minutes const PeersRequestedCount* = 12 +proc computeMixNamespace*(clusterId: uint16): string = + var namespace = "rs/" + + namespace &= $clusterId + namespace &= "/mix" + + return namespace + proc computeNamespace*(clusterId: uint16, shard: uint16): string = var namespace = "rs/" diff --git a/waku/waku_rendezvous/protocol.nim b/waku/waku_rendezvous/protocol.nim index 0eb55d350..00b5f1a5c 100644 --- a/waku/waku_rendezvous/protocol.nim +++ b/waku/waku_rendezvous/protocol.nim @@ -1,70 +1,90 @@ {.push raises: [].} import - std/[sugar, options], + std/[sugar, options, sequtils, tables], results, chronos, chronicles, - metrics, + stew/byteutils, libp2p/protocols/rendezvous, + libp2p/protocols/rendezvous/protobuf, + libp2p/utils/semaphore, + libp2p/utils/offsettedseq, + libp2p/crypto/curve25519, libp2p/switch, libp2p/utility +import metrics except collect + import ../node/peer_manager, ../common/callbacks, ../waku_enr/capabilities, ../waku_core/peers, - ../waku_core/topics, - ../waku_core/topics/pubsub_topic, - ./common + ../waku_core/codecs, + ./common, + ./waku_peer_record logScope: topics = "waku rendezvous" -declarePublicCounter rendezvousPeerFoundTotal, - "total number of peers found via rendezvous" - -type WakuRendezVous* = ref object - rendezvous: Rendezvous +type WakuRendezVous* = ref object of GenericRendezVous[WakuPeerRecord] peerManager: PeerManager clusterId: uint16 getShards: GetShards getCapabilities: GetCapabilities + getPeerRecord: GetWakuPeerRecord registrationInterval: timer.Duration periodicRegistrationFut: Future[void] - requestInterval: timer.Duration - periodicRequestFut: Future[void] +const MaximumNamespaceLen = 255 -proc batchAdvertise*( +method discover*( + self: WakuRendezVous, conn: Connection, d: Discover +) {.async: (raises: [CancelledError, LPStreamError]).} = + # Override discover method to avoid collect macro generic instantiation issues + trace "Received Discover", peerId = conn.peerId, ns = d.ns + await procCall GenericRendezVous[WakuPeerRecord](self).discover(conn, d) + +proc advertise*( self: WakuRendezVous, namespace: string, - ttl: Duration = DefaultRegistrationTTL, peers: seq[PeerId], + ttl: timer.Duration = self.minDuration, ): Future[Result[void, string]] {.async: (raises: []).} = - ## Register with all rendezvous peers under a namespace + trace "advertising via waku rendezvous", + namespace = namespace, ttl = ttl, peers = $peers, peerRecord = $self.getPeerRecord() + let se = SignedPayload[WakuPeerRecord].init( + self.switch.peerInfo.privateKey, self.getPeerRecord() + ).valueOr: + return + err("rendezvous advertisement failed: Failed to sign Waku Peer Record: " & $error) + let sprBuff = se.encode().valueOr: + return err("rendezvous advertisement failed: Wrong Signed Peer Record: " & $error) # rendezvous.advertise expects already opened connections # must dial first + var futs = collect(newSeq): for peerId in peers: - self.peerManager.dialPeer(peerId, RendezVousCodec) + self.peerManager.dialPeer(peerId, self.codec) let dialCatch = catch: await allFinished(futs) - futs = dialCatch.valueOr: - return err("batchAdvertise: " & error.msg) + if dialCatch.isErr(): + return err("advertise: " & dialCatch.error.msg) + + futs = dialCatch.get() let conns = collect(newSeq): for fut in futs: let catchable = catch: fut.read() - catchable.isOkOr: - warn "a rendezvous dial failed", cause = error.msg + if catchable.isErr(): + warn "a rendezvous dial failed", cause = catchable.error.msg continue let connOpt = catchable.get() @@ -74,149 +94,34 @@ proc batchAdvertise*( conn - let advertCatch = catch: - await self.rendezvous.advertise(namespace, Opt.some(ttl)) - - for conn in conns: - await conn.close() - - advertCatch.isOkOr: - return err("batchAdvertise: " & error.msg) + if conns.len == 0: + return err("could not establish any connections to rendezvous peers") + try: + await self.advertise(namespace, ttl, peers, sprBuff) + except Exception as e: + return err("rendezvous advertisement failed: " & e.msg) + finally: + for conn in conns: + await conn.close() return ok() -proc batchRequest*( - self: WakuRendezVous, - namespace: string, - count: int = DiscoverLimit, - peers: seq[PeerId], -): Future[Result[seq[PeerRecord], string]] {.async: (raises: []).} = - ## Request all records from all rendezvous peers matching a namespace - - # rendezvous.request expects already opened connections - # must dial first - var futs = collect(newSeq): - for peerId in peers: - self.peerManager.dialPeer(peerId, RendezVousCodec) - - let dialCatch = catch: - await allFinished(futs) - - futs = dialCatch.valueOr: - return err("batchRequest: " & error.msg) - - let conns = collect(newSeq): - for fut in futs: - let catchable = catch: - fut.read() - - catchable.isOkOr: - warn "a rendezvous dial failed", cause = error.msg - continue - - let connOpt = catchable.get() - - let conn = connOpt.valueOr: - continue - - conn - - let reqCatch = catch: - await self.rendezvous.request(Opt.some(namespace), Opt.some(count), Opt.some(peers)) - - for conn in conns: - await conn.close() - - reqCatch.isOkOr: - return err("batchRequest: " & error.msg) - - return ok(reqCatch.get()) - -proc advertiseAll( +proc advertiseAll*( self: WakuRendezVous ): Future[Result[void, string]] {.async: (raises: []).} = - info "waku rendezvous advertisements started" + trace "waku rendezvous advertisements started" - let shards = self.getShards() - - let futs = collect(newSeq): - for shardId in shards: - # Get a random RDV peer for that shard - - let pubsub = - toPubsubTopic(RelayShard(clusterId: self.clusterId, shardId: shardId)) - - let rpi = self.peerManager.selectPeer(RendezVousCodec, some(pubsub)).valueOr: - continue - - let namespace = computeNamespace(self.clusterId, shardId) - - # Advertise yourself on that peer - self.batchAdvertise(namespace, DefaultRegistrationTTL, @[rpi.peerId]) - - if futs.len < 1: + let rpi = self.peerManager.selectPeer(self.codec).valueOr: return err("could not get a peer supporting RendezVousCodec") - let catchable = catch: - await allFinished(futs) + let namespace = computeMixNamespace(self.clusterId) - catchable.isOkOr: - return err(error.msg) + # Advertise yourself on that peer + let res = await self.advertise(namespace, @[rpi.peerId]) - for fut in catchable.get(): - if fut.failed(): - warn "a rendezvous advertisement failed", cause = fut.error.msg + trace "waku rendezvous advertisements finished" - info "waku rendezvous advertisements finished" - - return ok() - -proc initialRequestAll*( - self: WakuRendezVous -): Future[Result[void, string]] {.async: (raises: []).} = - info "waku rendezvous initial requests started" - - let shards = self.getShards() - - let futs = collect(newSeq): - for shardId in shards: - let namespace = computeNamespace(self.clusterId, shardId) - # Get a random RDV peer for that shard - let rpi = self.peerManager.selectPeer( - RendezVousCodec, - some(toPubsubTopic(RelayShard(clusterId: self.clusterId, shardId: shardId))), - ).valueOr: - continue - - # Ask for peer records for that shard - self.batchRequest(namespace, PeersRequestedCount, @[rpi.peerId]) - - if futs.len < 1: - return err("could not get a peer supporting RendezVousCodec") - - let catchable = catch: - await allFinished(futs) - - catchable.isOkOr: - return err(error.msg) - - for fut in catchable.get(): - if fut.failed(): - warn "a rendezvous request failed", cause = fut.error.msg - elif fut.finished(): - let res = fut.value() - - let records = res.valueOr: - warn "a rendezvous request failed", cause = $error - continue - - for record in records: - rendezvousPeerFoundTotal.inc() - self.peerManager.addPeer(record) - - info "waku rendezvous initial request finished" - - return ok() + return res proc periodicRegistration(self: WakuRendezVous) {.async.} = info "waku rendezvous periodic registration started", @@ -237,22 +142,6 @@ proc periodicRegistration(self: WakuRendezVous) {.async.} = # Back to normal interval if no errors self.registrationInterval = DefaultRegistrationInterval -proc periodicRequests(self: WakuRendezVous) {.async.} = - info "waku rendezvous periodic requests started", interval = self.requestInterval - - # infinite loop - while true: - (await self.initialRequestAll()).isOkOr: - error "waku rendezvous requests failed", error = error - - await sleepAsync(self.requestInterval) - - # Exponential backoff - self.requestInterval += self.requestInterval - - if self.requestInterval >= 1.days: - break - proc new*( T: type WakuRendezVous, switch: Switch, @@ -260,46 +149,91 @@ proc new*( clusterId: uint16, getShards: GetShards, getCapabilities: GetCapabilities, + getPeerRecord: GetWakuPeerRecord, ): Result[T, string] {.raises: [].} = - let rvCatchable = catch: - RendezVous.new(switch = switch, minDuration = DefaultRegistrationTTL) + let rng = newRng() + let wrv = T( + rng: rng, + salt: string.fromBytes(generateBytes(rng[], 8)), + registered: initOffsettedSeq[RegisteredData](), + expiredDT: Moment.now() - 1.days, + sema: newAsyncSemaphore(SemaphoreDefaultSize), + minDuration: rendezvous.MinimumAcceptedDuration, + maxDuration: rendezvous.MaximumDuration, + minTTL: rendezvous.MinimumAcceptedDuration.seconds.uint64, + maxTTL: rendezvous.MaximumDuration.seconds.uint64, + peerRecordValidator: checkWakuPeerRecord, + ) - let rv = rvCatchable.valueOr: - return err(error.msg) - - let mountCatchable = catch: - switch.mount(rv) - - mountCatchable.isOkOr: - return err(error.msg) - - var wrv = WakuRendezVous() - wrv.rendezvous = rv wrv.peerManager = peerManager wrv.clusterId = clusterId wrv.getShards = getShards wrv.getCapabilities = getCapabilities wrv.registrationInterval = DefaultRegistrationInterval - wrv.requestInterval = DefaultRequestsInterval + wrv.getPeerRecord = getPeerRecord + wrv.switch = switch + wrv.codec = WakuRendezVousCodec + + proc handleStream( + conn: Connection, proto: string + ) {.async: (raises: [CancelledError]).} = + try: + let + buf = await conn.readLp(4096) + msg = Message.decode(buf).tryGet() + case msg.msgType + of MessageType.Register: + #TODO: override this to store peers registered with us in peerstore with their info as well. + await wrv.register(conn, msg.register.tryGet(), wrv.getPeerRecord()) + of MessageType.RegisterResponse: + trace "Got an unexpected Register Response", response = msg.registerResponse + of MessageType.Unregister: + wrv.unregister(conn, msg.unregister.tryGet()) + of MessageType.Discover: + await wrv.discover(conn, msg.discover.tryGet()) + of MessageType.DiscoverResponse: + trace "Got an unexpected Discover Response", response = msg.discoverResponse + except CancelledError as exc: + trace "cancelled rendezvous handler" + raise exc + except CatchableError as exc: + trace "exception in rendezvous handler", description = exc.msg + finally: + await conn.close() + + wrv.handler = handleStream info "waku rendezvous initialized", - clusterId = clusterId, shards = getShards(), capabilities = getCapabilities() + clusterId = clusterId, + shards = getShards(), + capabilities = getCapabilities(), + wakuPeerRecord = getPeerRecord() return ok(wrv) proc start*(self: WakuRendezVous) {.async: (raises: []).} = + # Start the parent GenericRendezVous (starts the register deletion loop) + if self.started: + warn "waku rendezvous already started" + return + try: + await procCall GenericRendezVous[WakuPeerRecord](self).start() + except CancelledError as exc: + error "failed to start GenericRendezVous", cause = exc.msg + return # start registering forever self.periodicRegistrationFut = self.periodicRegistration() - self.periodicRequestFut = self.periodicRequests() - info "waku rendezvous discovery started" proc stopWait*(self: WakuRendezVous) {.async: (raises: []).} = if not self.periodicRegistrationFut.isNil(): await self.periodicRegistrationFut.cancelAndWait() - if not self.periodicRequestFut.isNil(): - await self.periodicRequestFut.cancelAndWait() + # Stop the parent GenericRendezVous (stops the register deletion loop) + await GenericRendezVous[WakuPeerRecord](self).stop() + + # Stop the parent GenericRendezVous (stops the register deletion loop) + await GenericRendezVous[WakuPeerRecord](self).stop() info "waku rendezvous discovery stopped" diff --git a/waku/waku_rendezvous/waku_peer_record.nim b/waku/waku_rendezvous/waku_peer_record.nim new file mode 100644 index 000000000..d6e700eb5 --- /dev/null +++ b/waku/waku_rendezvous/waku_peer_record.nim @@ -0,0 +1,74 @@ +import std/times, sugar + +import + libp2p/[ + protocols/rendezvous, + signed_envelope, + multicodec, + multiaddress, + protobuf/minprotobuf, + peerid, + ] + +type WakuPeerRecord* = object + # Considering only mix as of now, but we can keep extending this to include all capabilities part of Waku ENR + peerId*: PeerId + seqNo*: uint64 + addresses*: seq[MultiAddress] + mixKey*: string + +proc payloadDomain*(T: typedesc[WakuPeerRecord]): string = + $multiCodec("libp2p-custom-peer-record") + +proc payloadType*(T: typedesc[WakuPeerRecord]): seq[byte] = + @[(byte) 0x30, (byte) 0x00, (byte) 0x00] + +proc init*( + T: typedesc[WakuPeerRecord], + peerId: PeerId, + seqNo = getTime().toUnix().uint64, + addresses: seq[MultiAddress], + mixKey: string, +): T = + WakuPeerRecord(peerId: peerId, seqNo: seqNo, addresses: addresses, mixKey: mixKey) + +proc decode*( + T: typedesc[WakuPeerRecord], buffer: seq[byte] +): Result[WakuPeerRecord, ProtoError] = + let pb = initProtoBuffer(buffer) + var record = WakuPeerRecord() + + ?pb.getRequiredField(1, record.peerId) + ?pb.getRequiredField(2, record.seqNo) + discard ?pb.getRepeatedField(3, record.addresses) + + if record.addresses.len == 0: + return err(ProtoError.RequiredFieldMissing) + + ?pb.getRequiredField(4, record.mixKey) + + return ok(record) + +proc encode*(record: WakuPeerRecord): seq[byte] = + var pb = initProtoBuffer() + + pb.write(1, record.peerId) + pb.write(2, record.seqNo) + + for address in record.addresses: + pb.write(3, address) + + pb.write(4, record.mixKey) + + pb.finish() + return pb.buffer + +proc checkWakuPeerRecord*( + _: WakuPeerRecord, spr: seq[byte], peerId: PeerId +): Result[void, string] {.gcsafe.} = + if spr.len == 0: + return err("Empty peer record") + let signedEnv = ?SignedPayload[WakuPeerRecord].decode(spr).mapErr(x => $x) + if signedEnv.data.peerId != peerId: + return err("Bad Peer ID") + return ok() diff --git a/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim b/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim index db68b2289..2ce7d4423 100644 --- a/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim +++ b/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim @@ -229,9 +229,20 @@ method register*( var gasPrice: int g.retryWrapper(gasPrice, "Failed to get gas price"): - int(await ethRpc.provider.eth_gasPrice()) * 2 + let fetchedGasPrice = uint64(await ethRpc.provider.eth_gasPrice()) + ## Multiply by 2 to speed up the transaction + ## Check for overflow when casting to int + if fetchedGasPrice > uint64(high(int) div 2): + warn "Gas price overflow detected, capping at maximum int value", + fetchedGasPrice = fetchedGasPrice, maxInt = high(int) + high(int) + else: + let calculatedGasPrice = int(fetchedGasPrice) * 2 + debug "Gas price calculated", + fetchedGasPrice = fetchedGasPrice, gasPrice = calculatedGasPrice + calculatedGasPrice let idCommitmentHex = identityCredential.idCommitment.inHex() - info "identityCredential idCommitmentHex", idCommitment = idCommitmentHex + debug "identityCredential idCommitmentHex", idCommitment = idCommitmentHex let idCommitment = identityCredential.idCommitment.toUInt256() let idCommitmentsToErase: seq[UInt256] = @[] info "registering the member", @@ -248,11 +259,10 @@ method register*( var tsReceipt: ReceiptObject g.retryWrapper(tsReceipt, "Failed to get the transaction receipt"): await ethRpc.getMinedTransactionReceipt(txHash) - info "registration transaction mined", txHash = txHash + debug "registration transaction mined", txHash = txHash g.registrationTxHash = some(txHash) # the receipt topic holds the hash of signature of the raised events - # TODO: make this robust. search within the event list for the event - info "ts receipt", receipt = tsReceipt[] + debug "ts receipt", receipt = tsReceipt[] if tsReceipt.status.isNone(): raise newException(ValueError, "Transaction failed: status is None") @@ -261,18 +271,27 @@ method register*( ValueError, "Transaction failed with status: " & $tsReceipt.status.get() ) - ## Extract MembershipRegistered event from transaction logs (third event) - let thirdTopic = tsReceipt.logs[2].topics[0] - info "third topic", thirdTopic = thirdTopic - if thirdTopic != - cast[FixedBytes[32]](keccak.keccak256.digest( - "MembershipRegistered(uint256,uint256,uint32)" - ).data): - raise newException(ValueError, "register: unexpected event signature") + ## Search through all transaction logs to find the MembershipRegistered event + let expectedEventSignature = cast[FixedBytes[32]](keccak.keccak256.digest( + "MembershipRegistered(uint256,uint256,uint32)" + ).data) - ## Parse MembershipRegistered event data: rateCommitment(256) || membershipRateLimit(256) || index(32) - let arguments = tsReceipt.logs[2].data - info "tx log data", arguments = arguments + var membershipRegisteredLog: Option[LogObject] + for log in tsReceipt.logs: + if log.topics.len > 0 and log.topics[0] == expectedEventSignature: + membershipRegisteredLog = some(log) + break + + if membershipRegisteredLog.isNone(): + raise newException( + ValueError, "register: MembershipRegistered event not found in transaction logs" + ) + + let registrationLog = membershipRegisteredLog.get() + + ## Parse MembershipRegistered event data: idCommitment(256) || membershipRateLimit(256) || index(32) + let arguments = registrationLog.data + trace "registration transaction log data", arguments = arguments let ## Extract membership index from transaction log data (big endian) membershipIndex = UInt256.fromBytesBE(arguments[64 .. 95]) @@ -360,7 +379,7 @@ method generateProof*( let x = keccak.keccak256.digest(data) - let extNullifier = poseidon(@[@(epoch), @(rlnIdentifier)]).valueOr: + let extNullifier = generateExternalNullifier(epoch, rlnIdentifier).valueOr: return err("Failed to compute external nullifier: " & error) let witness = RLNWitnessInput( @@ -438,10 +457,9 @@ method verifyProof*( var normalizedProof = proof - normalizedProof.externalNullifier = poseidon( - @[@(proof.epoch), @(proof.rlnIdentifier)] - ).valueOr: + let externalNullifier = generateExternalNullifier(proof.epoch, proof.rlnIdentifier).valueOr: return err("Failed to compute external nullifier: " & error) + normalizedProof.externalNullifier = externalNullifier let proofBytes = serialize(normalizedProof, input) let proofBuffer = proofBytes.toBuffer() diff --git a/waku/waku_rln_relay/rln/wrappers.nim b/waku/waku_rln_relay/rln/wrappers.nim index d1dec2b38..1b2b0270f 100644 --- a/waku/waku_rln_relay/rln/wrappers.nim +++ b/waku/waku_rln_relay/rln/wrappers.nim @@ -6,7 +6,8 @@ import stew/[arrayops, byteutils, endians2], stint, results, - std/[sequtils, strutils, tables] + std/[sequtils, strutils, tables], + nimcrypto/keccak as keccak import ./rln_interface, ../conversion_utils, ../protocol_types, ../protocol_metrics import ../../waku_core, ../../waku_keystore @@ -119,24 +120,6 @@ proc createRLNInstance*(): RLNResult = res = createRLNInstanceLocal() return res -proc sha256*(data: openArray[byte]): RlnRelayResult[MerkleNode] = - ## a thin layer on top of the Nim wrapper of the sha256 hasher - var lenPrefData = encodeLengthPrefix(data) - var - hashInputBuffer = lenPrefData.toBuffer() - outputBuffer: Buffer # will holds the hash output - - trace "sha256 hash input buffer length", bufflen = hashInputBuffer.len - let hashSuccess = sha256(addr hashInputBuffer, addr outputBuffer, true) - - # check whether the hash call is done successfully - if not hashSuccess: - return err("error in sha256 hash") - - let output = cast[ptr MerkleNode](outputBuffer.`ptr`)[] - - return ok(output) - proc poseidon*(data: seq[seq[byte]]): RlnRelayResult[array[32, byte]] = ## a thin layer on top of the Nim wrapper of the poseidon hasher var inputBytes = serialize(data) @@ -180,9 +163,18 @@ proc toLeaves*(rateCommitments: seq[RateCommitment]): RlnRelayResult[seq[seq[byt leaves.add(leaf) return ok(leaves) +proc generateExternalNullifier*( + epoch: Epoch, rlnIdentifier: RlnIdentifier +): RlnRelayResult[ExternalNullifier] = + let epochHash = keccak.keccak256.digest(@(epoch)) + let rlnIdentifierHash = keccak.keccak256.digest(@(rlnIdentifier)) + let externalNullifier = poseidon(@[@(epochHash), @(rlnIdentifierHash)]).valueOr: + return err("Failed to compute external nullifier: " & error) + return ok(externalNullifier) + proc extractMetadata*(proof: RateLimitProof): RlnRelayResult[ProofMetadata] = - let externalNullifier = poseidon(@[@(proof.epoch), @(proof.rlnIdentifier)]).valueOr: - return err("could not construct the external nullifier") + let externalNullifier = generateExternalNullifier(proof.epoch, proof.rlnIdentifier).valueOr: + return err("Failed to compute external nullifier: " & error) return ok( ProofMetadata( nullifier: proof.nullifier, diff --git a/waku/waku_rln_relay/rln_relay.nim b/waku/waku_rln_relay/rln_relay.nim index 6a8fea2b5..5c893e2a2 100644 --- a/waku/waku_rln_relay/rln_relay.nim +++ b/waku/waku_rln_relay/rln_relay.nim @@ -24,10 +24,14 @@ import ./nonce_manager import - ../common/error_handling, - ../waku_relay, # for WakuRelayHandler - ../waku_core, - ../waku_keystore + waku/[ + common/error_handling, + waku_relay, # for WakuRelayHandler + waku_core, + requests/rln_requests, + waku_keystore, + common/broker/broker_context, + ] logScope: topics = "waku rln_relay" @@ -65,6 +69,7 @@ type WakuRLNRelay* = ref object of RootObj nonceManager*: NonceManager epochMonitorFuture*: Future[void] rootChangesFuture*: Future[void] + brokerCtx*: BrokerContext proc calcEpoch*(rlnPeer: WakuRLNRelay, t: float64): Epoch = ## gets time `t` as `flaot64` with subseconds resolution in the fractional part @@ -91,6 +96,7 @@ proc stop*(rlnPeer: WakuRLNRelay) {.async: (raises: [Exception]).} = # stop the group sync, and flush data to tree db info "stopping rln-relay" + RequestGenerateRlnProof.clearProvider(rlnPeer.brokerCtx) await rlnPeer.groupManager.stop() proc hasDuplicate*( @@ -275,11 +281,11 @@ proc validateMessageAndUpdateLog*( return isValidMessage -proc appendRLNProof*( - rlnPeer: WakuRLNRelay, msg: var WakuMessage, senderEpochTime: float64 -): RlnRelayResult[void] = - ## returns true if it can create and append a `RateLimitProof` to the supplied `msg` - ## returns false otherwise +proc createRlnProof( + rlnPeer: WakuRLNRelay, msg: WakuMessage, senderEpochTime: float64 +): RlnRelayResult[seq[byte]] = + ## returns a new `RateLimitProof` for the supplied `msg` + ## returns an error if it cannot create the proof ## `senderEpochTime` indicates the number of seconds passed since Unix epoch. The fractional part holds sub-seconds. ## The `epoch` field of `RateLimitProof` is derived from the provided `senderEpochTime` (using `calcEpoch()`) @@ -291,7 +297,14 @@ proc appendRLNProof*( let proof = rlnPeer.groupManager.generateProof(input, epoch, nonce).valueOr: return err("could not generate rln-v2 proof: " & $error) - msg.proof = proof.encode().buffer + return ok(proof.encode().buffer) + +proc appendRLNProof*( + rlnPeer: WakuRLNRelay, msg: var WakuMessage, senderEpochTime: float64 +): RlnRelayResult[void] = + msg.proof = rlnPeer.createRlnProof(msg, senderEpochTime).valueOr: + return err($error) + return ok() proc clearNullifierLog*(rlnPeer: WakuRlnRelay) = @@ -333,7 +346,7 @@ proc generateRlnValidator*( let validationRes = wakuRlnRelay.validateMessageAndUpdateLog(message) let - proof = toHex(msgProof.proof) + proof = byteutils.toHex(msgProof.proof) epoch = fromEpoch(msgProof.epoch) root = inHex(msgProof.merkleRoot) shareX = inHex(msgProof.shareX) @@ -429,6 +442,7 @@ proc mount( rlnMaxEpochGap: max(uint64(MaxClockGapSeconds / float64(conf.epochSizeSec)), 1), rlnMaxTimestampGap: uint64(MaxClockGapSeconds), onFatalErrorAction: conf.onFatalErrorAction, + brokerCtx: globalBrokerContext(), ) # track root changes on smart contract merkle tree @@ -438,6 +452,19 @@ proc mount( # Start epoch monitoring in the background wakuRlnRelay.epochMonitorFuture = monitorEpochs(wakuRlnRelay) + + RequestGenerateRlnProof.setProvider( + wakuRlnRelay.brokerCtx, + proc( + msg: WakuMessage, senderEpochTime: float64 + ): Future[Result[RequestGenerateRlnProof, string]] {.async.} = + let proof = createRlnProof(wakuRlnRelay, msg, senderEpochTime).valueOr: + return err("Could not create RLN proof: " & $error) + + return ok(RequestGenerateRlnProof(proof: proof)), + ).isOkOr: + return err("Proof generator provider cannot be set: " & $error) + return ok(wakuRlnRelay) proc isReady*(rlnPeer: WakuRLNRelay): Future[bool] {.async: (raises: [Exception]).} = diff --git a/waku/waku_store/client.nim b/waku/waku_store/client.nim index 308d7f98e..5b261af47 100644 --- a/waku/waku_store/client.nim +++ b/waku/waku_store/client.nim @@ -1,6 +1,12 @@ {.push raises: [].} -import std/[options, tables], results, chronicles, chronos, metrics, bearssl/rand +import + std/[options, tables, sequtils, algorithm], + results, + chronicles, + chronos, + metrics, + bearssl/rand import ../node/peer_manager, ../utils/requests, ./protocol_metrics, ./common, ./rpc_codec @@ -10,6 +16,8 @@ logScope: const DefaultPageSize*: uint = 20 # A recommended default number of waku messages per page +const MaxQueryRetries = 5 # Maximum number of store peers to try before giving up + type WakuStoreClient* = ref object peerManager: PeerManager rng: ref rand.HmacDrbgContext @@ -79,18 +87,33 @@ proc query*( proc queryToAny*( self: WakuStoreClient, request: StoreQueryRequest, peerId = none(PeerId) ): Future[StoreQueryResult] {.async.} = - ## This proc is similar to the query one but in this case - ## we don't specify a particular peer and instead we get it from peer manager + ## we don't specify a particular peer and instead we get it from peer manager. + ## It will retry with different store peers if the dial fails. if request.paginationCursor.isSome() and request.paginationCursor.get() == EmptyCursor: return err(StoreError(kind: ErrorCode.BAD_REQUEST, cause: "invalid cursor")) - let peer = self.peerManager.selectPeer(WakuStoreCodec).valueOr: + # Get all available store peers + var peers = self.peerManager.switch.peerStore.getPeersByProtocol(WakuStoreCodec) + if peers.len == 0: return err(StoreError(kind: BAD_RESPONSE, cause: "no service store peer connected")) - let connection = (await self.peerManager.dialPeer(peer, WakuStoreCodec)).valueOr: - waku_store_errors.inc(labelValues = [DialFailure]) + # Shuffle to distribute load and limit retries + let peersToTry = peers[0 ..< min(peers.len, MaxQueryRetries)] - return err(StoreError(kind: ErrorCode.PEER_DIAL_FAILURE, address: $peer)) + var lastError: StoreError + for peer in peersToTry: + let connection = (await self.peerManager.dialPeer(peer, WakuStoreCodec)).valueOr: + waku_store_errors.inc(labelValues = [DialFailure]) + warn "failed to dial store peer, trying next" + lastError = StoreError(kind: ErrorCode.PEER_DIAL_FAILURE, address: $peer) + continue - return await self.sendStoreRequest(request, connection) + let response = (await self.sendStoreRequest(request, connection)).valueOr: + warn "store query failed, trying next peer", peerId = peer.peerId, error = $error + lastError = error + continue + + return ok(response) + + return err(lastError) diff --git a/waku/waku_store_sync/reconciliation.nim b/waku/waku_store_sync/reconciliation.nim index 0cc15d0df..23f513322 100644 --- a/waku/waku_store_sync/reconciliation.nim +++ b/waku/waku_store_sync/reconciliation.nim @@ -79,7 +79,8 @@ proc messageIngress*( let id = SyncID(time: msg.timestamp, hash: msgHash) self.storage.insert(id, pubsubTopic, msg.contentTopic).isOkOr: - error "failed to insert new message", msg_hash = $id.hash.toHex(), error = $error + error "failed to insert new message", + msg_hash = byteutils.toHex(id.hash), error = $error proc messageIngress*( self: SyncReconciliation, @@ -87,7 +88,7 @@ proc messageIngress*( pubsubTopic: PubsubTopic, msg: WakuMessage, ) = - trace "message ingress", msg_hash = msgHash.toHex(), msg = msg + trace "message ingress", msg_hash = byteutils.toHex(msgHash), msg = msg if msg.ephemeral: return @@ -95,7 +96,8 @@ proc messageIngress*( let id = SyncID(time: msg.timestamp, hash: msgHash) self.storage.insert(id, pubsubTopic, msg.contentTopic).isOkOr: - error "failed to insert new message", msg_hash = $id.hash.toHex(), error = $error + error "failed to insert new message", + msg_hash = byteutils.toHex(id.hash), error = $error proc messageIngress*( self: SyncReconciliation, @@ -104,7 +106,8 @@ proc messageIngress*( contentTopic: ContentTopic, ) = self.storage.insert(id, pubsubTopic, contentTopic).isOkOr: - error "failed to insert new message", msg_hash = $id.hash.toHex(), error = $error + error "failed to insert new message", + msg_hash = byteutils.toHex(id.hash), error = $error proc preProcessPayload( self: SyncReconciliation, payload: RangesData